반응형
Implementation of neural networks - ASL
Data preparation
import pandas as pd
import numpy as np
train_df = pd.read_csv("data/sign_mnist_train.csv")
valid_df = pd.read_csv("data/sign_mnist_valid.csv")
train_df.head()
y_train = train_df['label']
y_valid = valid_df['label']
del train_df['label']
del valid_df['label']
x_train = train_df.values
x_valid = valid_df.values
print(x_train.shape)
print(x_valid.shape)
print(y_train.shape)
print(y_train[:5])
'''
(27455, 784)
(7172, 784)
(27455,)
0 3
1 6
2 2
3 2
4 12
Name: label, dtype: int64
'''
import matplotlib.pyplot as plt
plt.figure(figsize=(40,40))
num_images = 20
for i in range(num_images):
row = x_train[i]
label = y_train[i]
image = row.reshape(28,28)
plt.subplot(1,num_images, i+1)
plt.title(label, fontdict={'fontsize':30})
plt.axis('off')
plt.imshow(image, cmap='gray')
import torch
from torch.utils.data import Dataset, DataLoader
class ASL_Dataset(Dataset):
def __init__(self, path):
super(ASL_Dataset, self).__init__()
train_df = pd.read_csv(path)
y_train = train_df['label']
del train_df['label']
x_train = train_df.values
# preprocessing
x_train = np.array(x_train)
y_train = np.array(y_train)
x_train = x_train / 255.
# tensor
self.x_train_tensor = torch.tensor(x_train).float()
self.y_train_tensor = torch.tensor(y_train)
def __getitem__(self, index):
return self.x_train_tensor[index], self.y_train_tensor[index]
def __len__(self):
return self.x_train_tensor.shape[0]
training_data = ASL_Dataset("data/sign_mnist_train.csv")
validation_data = ASL_Dataset("data/sign_mnist_valid.csv")
train_loader = DataLoader(dataset=training_data, batch_size=1000, shuffle=True)
Constructiong a neural network
import torch.nn as nn
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(28*28, 256)
self.fc2 = nn.Linear(256, 64)
self.fc3 = nn.Linear(64, 24)
self.relu = nn.ReLU()
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.relu(x)
x = self.fc3(x) # unnormalized log-probability
return x
net = Net()
Loss function and Optimization method
loss_function = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=0.001)
Training of a neural network
for epoch in range(30):
loss_val = 0
for itr, data in enumerate(train_loader):
optimizer.zero_grad()
inputs, labels = data
pred = net(inputs)
loss = loss_function(pred, labels)
loss.backward()
optimizer.step()
loss_val += loss.item()
print("Epoch:", epoch+1, " , Loss:", loss_val)
'''
Epoch: 1 , Loss: 0.668689101934433
Epoch: 2 , Loss: 0.6515311505645514
Epoch: 3 , Loss: 0.638214835897088
Epoch: 4 , Loss: 0.5892331469804049
Epoch: 5 , Loss: 0.5794639568775892
Epoch: 6 , Loss: 0.5550589840859175
Epoch: 7 , Loss: 0.5423025581985712
Epoch: 8 , Loss: 0.5214628633111715
Epoch: 9 , Loss: 0.5088292397558689
Epoch: 10 , Loss: 0.48328547552227974
Epoch: 11 , Loss: 0.47769514936953783
Epoch: 12 , Loss: 0.4556390782818198
Epoch: 13 , Loss: 0.42544492054730654
Epoch: 14 , Loss: 0.4218867691233754
Epoch: 15 , Loss: 0.40776201244443655
Epoch: 16 , Loss: 0.4167418945580721
Epoch: 17 , Loss: 0.3998836725950241
Epoch: 18 , Loss: 0.37387024983763695
Epoch: 19 , Loss: 0.3697629049420357
Epoch: 20 , Loss: 0.370188289321959
Epoch: 21 , Loss: 0.3402200732380152
Epoch: 22 , Loss: 0.3259954256936908
Epoch: 23 , Loss: 0.32995925284922123
Epoch: 24 , Loss: 0.3105016676709056
Epoch: 25 , Loss: 0.3089276300743222
Epoch: 26 , Loss: 0.2963345553725958
Epoch: 27 , Loss: 0.2889462383463979
Epoch: 28 , Loss: 0.2756386958062649
Epoch: 29 , Loss: 0.2762810839340091
Epoch: 30 , Loss: 0.28116170689463615
'''
Prediction and evaluation for a test set
input_test = validation_data.x_train_tensor
pred_test = net(input_test)
pred_category = torch.argmax(pred_test, dim=1)
label_test = validation_data.y_train_tensor
accuracy = torch.mean((pred_category == label_test).float())
print("Test accuracy:", accuracy)
반응형