| import torch |
| import torchvision |
| import torchvision.transforms as transforms |
| import os |
| import numpy as np |
| import random |
| import yaml |
| from torch.utils.data import TensorDataset, DataLoader |
|
|
| |
|
|
| def get_cifar10_dataloaders(batch_size=128, num_workers=2, local_dataset_path=None, shuffle=False): |
| """获取CIFAR10数据集的数据加载器 |
| |
| Args: |
| batch_size: 批次大小 |
| num_workers: 数据加载的工作进程数 |
| local_dataset_path: 本地数据集路径,如果提供则使用本地数据集,否则下载 |
| |
| Returns: |
| trainloader: 训练数据加载器 |
| testloader: 测试数据加载器 |
| """ |
| |
| transform_train = transforms.Compose([ |
| transforms.RandomCrop(32, padding=4), |
| transforms.RandomHorizontalFlip(), |
| transforms.ToTensor(), |
| transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), |
| ]) |
|
|
| transform_test = transforms.Compose([ |
| transforms.ToTensor(), |
| transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), |
| ]) |
|
|
| |
| if local_dataset_path: |
| print(f"使用本地数据集: {local_dataset_path}") |
| |
| cifar_path = os.path.join(local_dataset_path, 'cifar-10-batches-py') |
| download = not os.path.exists(cifar_path) or not os.listdir(cifar_path) |
| dataset_path = local_dataset_path |
| else: |
| print("未指定本地数据集路径,将下载数据集") |
| download = True |
| dataset_path = '../dataset' |
|
|
| |
| if not os.path.exists(dataset_path): |
| os.makedirs(dataset_path) |
|
|
| trainset = torchvision.datasets.CIFAR10( |
| root=dataset_path, train=True, download=download, transform=transform_train) |
| trainloader = torch.utils.data.DataLoader( |
| trainset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers) |
|
|
| testset = torchvision.datasets.CIFAR10( |
| root=dataset_path, train=False, download=download, transform=transform_test) |
| testloader = torch.utils.data.DataLoader( |
| testset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers) |
|
|
| return trainloader, testloader |
|
|
| def get_noisy_cifar10_dataloaders(batch_size=128, num_workers=2, local_dataset_path=None, shuffle=False): |
| """获取添加噪声后的CIFAR10数据集的数据加载器 |
| |
| Args: |
| batch_size: 批次大小 |
| num_workers: 数据加载的工作进程数 |
| local_dataset_path: 本地数据集路径,如果提供则使用本地数据集,否则下载 |
| shuffle: 是否打乱数据 |
| |
| Returns: |
| noisy_trainloader: 添加噪声后的训练数据加载器 |
| testloader: 正常测试数据加载器 |
| """ |
| |
| trainloader, testloader = get_cifar10_dataloaders( |
| batch_size=batch_size, |
| num_workers=num_workers, |
| local_dataset_path=local_dataset_path, |
| shuffle=False |
| ) |
| |
| |
| device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
| print(f"使用设备: {device}") |
| |
| |
| config_path = './train.yaml' |
| try: |
| with open(config_path, 'r') as f: |
| config = yaml.safe_load(f) |
| except FileNotFoundError: |
| print(f"找不到配置文件: {config_path},使用默认配置") |
| config = { |
| 'noise_levels': { |
| 'gaussian': [0.1, 0.3], |
| 'salt_pepper': [0.05, 0.1], |
| 'poisson': [1.0] |
| } |
| } |
| |
| |
| noise_levels = config.get('noise_levels', {}) |
| gaussian_level = noise_levels.get('gaussian', [0.1, 0.2]) |
| salt_pepper_level = noise_levels.get('salt_pepper', [0.05, 0.1]) |
| poisson_level = noise_levels.get('poisson', [1.0])[0] |
| |
| |
| data_list = [] |
| targets_list = [] |
| |
| for inputs, targets in trainloader: |
| data_list.append(inputs) |
| targets_list.append(targets) |
| |
| |
| all_data = torch.cat(data_list) |
| all_targets = torch.cat(targets_list) |
| |
| |
| noise_info = { |
| 'noise_types': [], |
| 'noise_levels': [], |
| 'noise_indices': [] |
| } |
| |
| |
| mean = torch.tensor([0.4914, 0.4822, 0.4465]).view(3, 1, 1).to(device) |
| std = torch.tensor([0.2023, 0.1994, 0.2010]).view(3, 1, 1).to(device) |
| |
| print("开始添加噪声...") |
| |
| |
| for label_value in range(10): |
| |
| indices = [i for i in range(len(all_targets)) if all_targets[i].item() == label_value] |
| |
| noise_type = None |
| noise_ratio = 0.0 |
| level = None |
| |
| |
| if label_value == 2: |
| noise_type = 1 |
| noise_ratio = 0.3 |
| level = gaussian_level[1] if len(gaussian_level) > 1 else gaussian_level[0] |
| elif label_value == 3: |
| noise_type = 1 |
| noise_ratio = 0.1 |
| level = gaussian_level[0] |
| elif label_value == 4: |
| noise_type = 2 |
| noise_ratio = 0.3 |
| level = salt_pepper_level[1] if len(salt_pepper_level) > 1 else salt_pepper_level[0] |
| elif label_value == 5: |
| noise_type = 2 |
| noise_ratio = 0.1 |
| level = salt_pepper_level[0] |
| elif label_value == 6: |
| noise_type = 3 |
| noise_ratio = 0.3 |
| level = poisson_level |
| elif label_value == 7: |
| noise_type = 3 |
| noise_ratio = 0.1 |
| level = poisson_level |
| |
| |
| if noise_type is not None and level is not None and noise_ratio > 0: |
| |
| num_samples_to_add_noise = int(len(indices) * noise_ratio) |
| if num_samples_to_add_noise == 0 and len(indices) > 0: |
| num_samples_to_add_noise = 1 |
| |
| |
| indices_to_add_noise = random.sample(indices, min(num_samples_to_add_noise, len(indices))) |
| |
| print(f"标签 {label_value}: 为 {len(indices_to_add_noise)}/{len(indices)} 个样本添加噪声类型 {noise_type},强度 {level}") |
| |
| |
| for i in indices_to_add_noise: |
| |
| img = all_data[i].to(device) |
| |
| |
| img_denorm = img * std + mean |
| |
| |
| if noise_type == 1: |
| |
| img_np = img_denorm.cpu().numpy() |
| img_np = np.transpose(img_np, (1, 2, 0)) |
| img_np = np.clip(img_np, 0, 1) * 255.0 |
| |
| |
| std_dev = level * 25 |
| noise = np.random.normal(0, std_dev, img_np.shape) |
| noisy_img = img_np + noise |
| noisy_img = np.clip(noisy_img, 0, 255) |
| |
| |
| noisy_img = noisy_img / 255.0 |
| noisy_img = np.transpose(noisy_img, (2, 0, 1)) |
| noisy_tensor = torch.from_numpy(noisy_img.astype(np.float32)).to(device) |
| |
| elif noise_type == 2: |
| |
| img_np = img_denorm.cpu().numpy() |
| img_np = np.transpose(img_np, (1, 2, 0)) |
| img_np = np.clip(img_np, 0, 1) * 255.0 |
| |
| |
| mask = np.random.random(img_np.shape[:2]) |
| |
| img_np_copy = img_np.copy() |
| img_np_copy[mask < level/2] = 0 |
| |
| img_np_copy[mask > 1 - level/2] = 255 |
| |
| |
| noisy_img = img_np_copy / 255.0 |
| noisy_img = np.transpose(noisy_img, (2, 0, 1)) |
| noisy_tensor = torch.from_numpy(noisy_img.astype(np.float32)).to(device) |
| |
| elif noise_type == 3: |
| |
| img_np = img_denorm.cpu().numpy() |
| img_np = np.transpose(img_np, (1, 2, 0)) |
| img_np = np.clip(img_np, 0, 1) * 255.0 |
| |
| |
| lam = np.maximum(img_np / 255.0 * 10.0, 0.0001) |
| noisy_img = np.random.poisson(lam) / 10.0 * 255.0 |
| noisy_img = np.clip(noisy_img, 0, 255) |
| |
| |
| noisy_img = noisy_img / 255.0 |
| noisy_img = np.transpose(noisy_img, (2, 0, 1)) |
| noisy_tensor = torch.from_numpy(noisy_img.astype(np.float32)).to(device) |
| |
| |
| noisy_tensor_norm = (noisy_tensor - mean) / std |
| |
| |
| all_data[i] = noisy_tensor_norm |
| |
| |
| noise_info['noise_types'].append(noise_type) |
| noise_info['noise_levels'].append(level) |
| noise_info['noise_indices'].append(i) |
| |
| |
| noise_indices = sorted(noise_info['noise_indices']) |
| noise_index_path = os.path.join('..', 'dataset', 'noise_index.npy') |
| os.makedirs(os.path.dirname(noise_index_path), exist_ok=True) |
| np.save(noise_index_path, noise_indices) |
| print(f"已保存噪声样本索引到 {noise_index_path},共 {len(noise_indices)} 个样本") |
| |
| |
| noisy_dataset = TensorDataset(all_data, all_targets) |
| |
| |
| noisy_trainloader = DataLoader( |
| noisy_dataset, |
| batch_size=batch_size, |
| shuffle=shuffle, |
| num_workers=num_workers |
| ) |
| |
| print(f"成功为{len(noise_info['noise_indices'])}/{len(all_data)} ({len(noise_info['noise_indices'])/len(all_data)*100:.1f}%)的样本添加噪声") |
| return noisy_trainloader, testloader |