An example of building classification regression neural network by Python and accelerating it by GPU

Time:2021-5-8

Classification network

import torch
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt

#Construction data
n_data = torch.ones(100, 2)
x0 = torch.normal(3*n_data, 1)
x1 = torch.normal(-3*n_data, 1)
#The tags are Y0 = 0 and Y1 = 1
y0 = torch.zeros(100)
y1 = torch.ones(100)

#Connect data through. Cat
x = torch.cat((x0, x1), 0).type(torch.FloatTensor)
y = torch.cat((y0, y1), 0).type(torch.LongTensor)

#. cuda() will move the variable data into the GPU
x, y = Variable(x).cuda(), Variable(y).cuda()

# plt.scatter(x.data.cpu().numpy()[:, 0], x.data.cpu().numpy()[:, 1], c=y.data.cpu().numpy(), s=100, lw=0, cmap='RdYlBu')
# plt.show()

#Network construction method 1
class Net(torch.nn.Module):
 def __init__(self, n_feature, n_hidden, n_output):
 super(Net, self).__init__()
 #Input and output of hidden layer
 self.hidden1 = torch.nn.Linear(n_feature, n_hidden)
 self.hidden2 = torch.nn.Linear(n_hidden, n_hidden)
 #Input and output of output layer
 self.out = torch.nn.Linear(n_hidden, n_output)

 def forward(self, x):
 x = F.relu(self.hidden2(self.hidden1(x)))
 x = self.out(x)
 return x

#Initialize a network, 1 input layer, 10 hidden layers and 1 output layer
net = Net(2, 10, 2)

#Network construction method 2
'''
net = torch.nn.Sequential(
 torch.nn.Linear(2, 10),
 torch.nn.Linear(10, 10),
 torch.nn.ReLU(),
 torch.nn.Linear(10, 2),
)
'''
#. cuda() moves the network to the GPU
net.cuda()
#Configure network optimizer
optimizer = torch.optim.SGD(net.parameters(), lr=0.2)
# SGD: torch.optim.SGD(net.parameters(), lr=0.01)
# Momentum: torch.optim.SGD(net.parameters(), lr=0.01, momentum=0.8)
# RMSprop: torch.optim.RMSprop(net.parameters(), lr=0.01, alpha=0.9)
# Adam: torch.optim.Adam(net.parameters(), lr=0.01, betas=(0.9, 0.99))

loss_func = torch.nn.CrossEntropyLoss()

#Dynamic visualization
plt.ion()
plt.show()

for t in range(300):
 print(t)
 out = net(x)
 loss = loss_func(out, y)
 optimizer.zero_grad()
 loss.backward()
 optimizer.step()
 if t % 5 == 0:
 plt.cla()
 prediction = torch.max(F.softmax(out, dim=0), 1)[1].cuda()
 #The data in GPU can't be used by Matplotlib, so. CPU () is needed to move the data from GPU to CPU
 pred_y = prediction.data.cpu().numpy().squeeze()
 target_y = y.data.cpu().numpy()
 plt.scatter(x.data.cpu().numpy()[:, 0], x.data.cpu().numpy()[:, 1], c=pred_y, s=100, lw=0, cmap='RdYlBu')
 accuracy = sum(pred_y == target_y) / 200
 plt.text(1.5, -4, 'accuracy=%.2f' % accuracy, fontdict={'size':20, 'color':'red'})
 plt.pause(0.1)

plt.ioff()
plt.show()

Return to the Internet

import torch
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt

#Construction data
x = torch.unsqueeze(torch.linspace(-1,1,100), dim=1)
y = x.pow(2) + 0.2*torch.rand(x.size())

#. cuda() will move the variable data into the GPU
x, y = Variable(x).cuda(), Variable(y).cuda()

# plt.scatter(x.data.numpy(), y.data.numpy())
# plt.show()

#Network construction method 1
class Net(torch.nn.Module):
 def __init__(self, n_feature, n_hidden, n_output):
 super(Net, self).__init__()
 #Input and output of hidden layer
 self.hidden = torch.nn.Linear(n_feature, n_hidden)
 #Input and output of output layer
 self.predict = torch.nn.Linear(n_hidden, n_output)

 def forward(self, x):
 x = F.relu(self.hidden(x))
 x = self.predict(x)
 return x
 
#Initialize a network, 1 input layer, 10 hidden layers and 1 output layer
net = Net(1, 10, 1)

#Network construction method 2
'''
net = torch.nn.Sequential(
 torch.nn.Linear(1, 10),
 torch.nn.ReLU(),
 torch.nn.Linear(10, 1),
)
'''

#. cuda() moves the network to the GPU
net.cuda()
#Configure network optimizer
optimizer = torch.optim.SGD(net.parameters(), lr=0.5)
# SGD: torch.optim.SGD(net.parameters(), lr=0.01)
# Momentum: torch.optim.SGD(net.parameters(), lr=0.01, momentum=0.8)
# RMSprop: torch.optim.RMSprop(net.parameters(), lr=0.01, alpha=0.9)
# Adam: torch.optim.Adam(net.parameters(), lr=0.01, betas=(0.9, 0.99))

loss_func = torch.nn.MSELoss()

#Dynamic visualization
plt.ion()
plt.show()

for t in range(300):
 prediction = net(x)
 loss = loss_func(prediction, y)
 optimizer.zero_grad()
 loss.backward()
 optimizer.step()
 if t % 5 == 0 :
 plt.cla()
 #The data in GPU can't be used by Matplotlib, so. CPU () is needed to move the data from GPU to CPU
 plt.scatter(x.data.cpu().numpy(), y.data.cpu().numpy())
 plt.plot(x.data.cpu().numpy(), prediction.data.cpu().numpy(), 'r-', lw=5)
 plt.text(0.5, 0, 'Loss=%.4f' % loss.item(), fontdict={'size':20, 'color':'red'})
 plt.pause(0.1)

plt.ioff()
plt.show()

The above example of building classification regression neural network and accelerating it with GPU by Python is the whole content shared by Xiaobian. I hope it can give you a reference and support developer.

Recommended Today

[Q & A share the second bullet] MySQL search engine, after watching the tyrannical interviewer!

Hello, I’m younger brother. A few days ago, I shared the second interview question, the interview site of search engine in MySQL. This question is the interview at normal temperature. After reading it, I’m sure you will gain something in terms of database engine If you haven’t read my first share, you can refer to […]