1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
| import torch from torch.autograd import Variable import numpy as np
def get_data(): train_X = np.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,7.042,10.791,5.313,7.997,5.564,9.27,3.1]) train_Y = np.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,3.596,2.53,1.221,2.827,3.465,1.65,2.904,2.42,2.94,1.3]) dtype = torch.FloatTensor X = Variable(torch.from_numpy(train_X).type(dtype),requires_grad=False).view(17,1) y = Variable(torch.from_numpy(train_Y).type(dtype),requires_grad=False) return X,y
def get_weights(): w = Variable(torch.randn(1),requires_grad=True) b = Variable(torch.randn(1),requires_grad=True) return w,b
def network(x): y_pred = torch.matmul(x,w)+b return y_pred
def loss_fn(y,y_pred): loss = (y_pred-y).pow(2).sum() for param in [w,b]: if not param.grad is None: param.grad.data.zero_() loss.backward() return loss.data
def optimize(learning_rate): w.data -= learning_rate * w.grad.data b.data -= learning_rate * b.grad.data
learning_rate = 1e-4 x,y = get_data() w,b = get_weights() for i in range(500): y_pred = network(x) loss = loss_fn(y,y_pred) if i % 50 == 0: print(loss) optimize(learning_rate)
|