pytorch requires"/>
pytorch requires
情况1
import torchdefault_requires_grad = torch.tensor([1.,2.,0.,0,-1,3],dtype=torch.float)
A = torch.tensor([1.,2.,0.,0,-1,3],dtype=torch.float, requires_grad = True)
B = torch.tensor([1.,2.,0.,0,-1,3],dtype=torch.float,requires_grad = False)
C = A+B
Loss=C**2
#A = A.detach()print('default_requires_grad: ',default_requires_grad.requires_grad)
print('A: ', A.requires_grad)
print('B: ',B.requires_grad)
print('C: ',C.requires_grad)
print('Loss: ',Loss.requires_grad)
#print('A after detach: ', A.requires_grad)
输出1:
default_requires_grad: False
A: True
B: False
C: True
Loss: True
情况2
A = torch.tensor([1.,2.,0.,0,-1,3],dtype=torch.float, requires_grad = True)
B = torch.tensor([1.,2.,0.,0,-1,3],dtype=torch.float,requires_grad = False)
C = A+B
Loss=C**2
A.detach()print('A: ', A.requires_grad)
print('B: ',B.requires_grad)
print('C: ',C.requires_grad)
print('Loss: ',Loss.requires_grad)
输出2
A: True
B: False
C: True
Loss: True
情况3
A = torch.tensor([1.,2.,0.,0,-1,3],dtype=torch.float, requires_grad = True)
B = torch.tensor([1.,2.,0.,0,-1,3],dtype=torch.float,requires_grad = False)
C = A+B
Loss=C**2
A = A.detach()print('A: ', A.requires_grad)
print('B: ',B.requires_grad)
print('C: ',C.requires_grad)
print('Loss: ',Loss.requires_grad)
输出3
A: False
B: False
C: True
Loss: True
情况4
A = torch.tensor([1.,2.,0.,0,-1,3],dtype=torch.float, requires_grad = True)
B = torch.tensor([1.,2.,0.,0,-1,3],dtype=torch.float,requires_grad = False)
C = A+B
Loss=C**2
C = C.detach()#print('default_requires_grad: ',default_requires_grad.requires_grad)
print('A: ', A.requires_grad)
print('B: ',B.requires_grad)
print('C: ',C.requires_grad)
print('Loss: ',Loss.requires_grad)
输出4
A: True
B: False
C: False
Loss: True
情况5
#default_requires_grad = torch.tensor([1.,2.,0.,0,-1,3],dtype=torch.float)
A = torch.tensor([1.,2.,0.,0,-1,3],dtype=torch.float, requires_grad = True)
B = torch.tensor([1.,2.,0.,0,-1,3],dtype=torch.float,requires_grad = False)
C = A+B
Loss=C**2
Loss = Loss.detach()#print('default_requires_grad: ',default_requires_grad.requires_grad)
print('A: ', A.requires_grad)
print('B: ',B.requires_grad)
print('C: ',C.requires_grad)
print('Loss: ',Loss.requires_grad)
输出5
A: True
B: False
C: True
Loss: False
输入 6
A = torch.tensor([1.,2.,0.,0,-1,3],dtype=torch.float, requires_grad = True)
B = torch.tensor([1.,2.,0.,0,-1,3],dtype=torch.float,requires_grad = False)
A = A.detach()C = A+B
Loss=C**2
Loss = Loss.detach()#print('default_requires_grad: ',default_requires_grad.requires_grad)
print('A: ', A.requires_grad)
print('B: ',B.requires_grad)
print('C: ',C.requires_grad)
print('Loss: ',Loss.requires_grad)
输出6
A: False
B: False
C: False
Loss: False
输入7
B = torch.tensor([1.,2.,0.,0,-1,3],dtype=torch.float,requires_grad = False)
C = B*Bprint(C.requires_grad)
输出7
False
更多推荐
pytorch requires
发布评论