import torch
import numpy
import torch.nn.functional as F
from torch.autograd import Variable
x = torch.randn(1,10)
w = torch.randn(2,10,requires_grad=True)
o = torch.sigmoid([email protected]())
print(o.shape)
loss = F.mse_loss(torch.ones(1,2),o)##平方差损失
print(loss.shape)
loss.backward()
print(w.grad)
torch.Size([1, 2])
torch.Size([])
tensor([[-0.0059, -0.0482, -0.0712, 0.0172, 0.0735, 0.0953, -0.0761, -0.1083,
0.0385, 0.0079],
[-0.0122, -0.1006, -0.1487, 0.0360, 0.1533, 0.1989, -0.1588, -0.2259,
0.0802, 0.0165]])