import matplotlib.pyplot as plt from torchinfo import summary import torch.nn as nn from tqdm import * # tqdm用于显示进度条并评估任务时间开销 import numpy as np import sys
# Create a list of intermediate layers layers = [] for i inrange(len(hidden_sizes) - 1): layers.append(nn.Linear(hidden_sizes[i], hidden_sizes[i+1])) layers.append(nn.ReLU()) # Convert the list of layers into a Sequential module self.hidden_layers = nn.Sequential(*layers)
defforward(self, x): out = self.fcin(x) out = self.relu(out) # Pass through the hidden layers iflen(self.hidden_layers) > 0: out = self.hidden_layers(out) # 将上一步结果传递给fcout out = self.fcout(out) # 返回结果 return out
defevaluate(model, data_loader): # 评估 model.eval() correct = 0 total = 0 with torch.no_grad(): for x, y in data_loader: x = x.view(-1, input_size) logits = model(x) # _ 是 value , predicted 是 index _, predicted = torch.max(logits.data, 1) total += y.size(0) correct += (predicted == y).sum().item() return correct / total
defforward(self, x): # 使用ReLU激活函数,并进行最大池化 x = torch.relu(self.conv1(x)) # input: 1,28,28, output: 6,24,24 # output 计算逻辑:(28-5+2*0)/1 + 1 = 24 x = nn.functional.max_pool2d(x, kernel_size=2) # output: 6,12,12 # 使用ReLU激活函数,并进行最大池化 x = torch.relu(self.conv2(x)) # input: 6,12,12, output: 16,8,8 # output 计算逻辑:(12-5+2*0)/1 + 1 = 8 x = nn.functional.max_pool2d(x, kernel_size=2) # output: 16,4,4 # 将多维张量展平为一维张量 x = x.view(-1, 16 * 4 * 4) # 全连接层 x = torch.relu(self.fc1(x)) # 全连接层 x = torch.relu(self.fc2(x)) # 全连接层 x = self.fc3(x) return x # 查看模型结构及参数量,input_size表示示例输入数据的维度信息 print(summary(SimpleCnnNet(), input_size=(1, 1, 28, 28)))
# 导入必要的库 import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import DataLoader from torchvision import datasets, transforms from tqdm import * # tqdm用于显示进度条并评估任务时间开销 import numpy as np import sys
import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.utils.data import Dataset, DataLoader from torchvision import transforms
defforward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 64 * 50 * 50) # Flatten the tensor x = F.relu(self.fc1(x)) x = self.fc2(x) return x
# Training method deftrain(model, train_loader, val_loader, criterion, optimizer, num_epochs): model.train() for epoch inrange(num_epochs): running_loss = 0.0 for _data, _labels in train_loader: # Zero the parameter gradients optimizer.zero_grad()
# Validation method defvalidate(model, val_loader, criterion): model.eval() # Set the model to evaluation mode val_loss = 0.0 correct = 0 total = 0 with torch.no_grad(): for _data, _labels in val_loader:
# Generate test data test_data, test_labels = generate_dataset(num_samples=100, img_size=200)
# Create the test dataset and data loader test_dataset = ShapeDataset(test_data, test_labels, transform=transform) test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)
# Evaluate the model model.eval() # Set the model to evaluation mode correct = 0 total = 0 with torch.no_grad(): for inputs, labels in test_loader: # Ensure inputs have the correct shape inputs, labels = inputs.float(), labels outputs = model(inputs) _, predicted = torch.max(outputs, 1) total += labels.size(0) correct += (predicted == labels).sum().item()
print(f'Accuracy of the model on the test images: { correct / total:.2f}%') ''' Accuracy of the model on the test images: 0.90% '''
# Visualize some test samples and their predictions defvisualize_predictions(inputs, labels, predictions): fig, axes = plt.subplots(1, 5, figsize=(15, 5)) for i inrange(5): axes[i].imshow(inputs[i].squeeze(), cmap='gray') axes[i].set_title(f'True: {labels[i]}, Pred: {predictions[i]}') axes[i].axis('off') plt.show()
# Generate test data visual_data, visual_labels = generate_dataset(num_samples=5, img_size=200)
# Create the test dataset and data loader visual_dataset = ShapeDataset(visual_data, visual_labels, transform=transform) visual_loader = DataLoader(visual_dataset, batch_size=5, shuffle=False)