squeeze torch数据降维

Published on Aug. 22, 2023, 12:10 p.m.

import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
y = torch.randn(2,3,4) 
y
tensor([[[ 0.4971, -1.2132, -0.9139,  0.5424],
         [ 0.1473,  0.5857,  0.1074,  0.5314],
         [ 0.9898, -0.4156, -0.2386,  0.4517]],

        [[ 0.4503, -1.4042,  0.8534,  0.9551],
         [-0.2095, -1.2657, -0.3565, -1.2950],
         [ 1.3797, -0.2025, -0.4090, -0.0397]]])
z=F.softmax(y,1)
z
tensor([[[0.2993, 0.1080, 0.1742, 0.3446],
         [0.2109, 0.6524, 0.4836, 0.3408],
         [0.4898, 0.2397, 0.3422, 0.3147]],

        [[0.2469, 0.1827, 0.6324, 0.6779],
         [0.1276, 0.2098, 0.1886, 0.0714],
         [0.6254, 0.6075, 0.1790, 0.2507]]])
z[:,:1]
tensor([[[0.2993, 0.1080, 0.1742, 0.3446]],

        [[0.2469, 0.1827, 0.6324, 0.6779]]])
z[:,-1:]

tensor([[[0.4898, 0.2397, 0.3422, 0.3147]],

        [[0.6254, 0.6075, 0.1790, 0.2507]]])
# 降低一个维度
z[:,-1:].squeeze()
tensor([[0.4898, 0.2397, 0.3422, 0.3147],
        [0.6254, 0.6075, 0.1790, 0.2507]])
z[:,-1:].squeeze()*2
tensor([[0.9796, 0.4794, 0.6844, 0.6293],
        [1.2509, 1.2150, 0.3579, 0.5014]])
# num_labels=10

# loss = None
# if num_labels == 1:
#     #  We are doing regression
#     loss_fct = torch.nn.MSELoss()
#     loss = loss_fct(logits.view(-1), batch[1].view(-1).to("cuda"))
# else:
#     loss_fct = torch.nn.CrossEntropyLoss()
#     loss = loss_fct(logits.view(-1, num_labels), batch[1].to("cuda").view(-1))  
out=F.softmax(z[:,-1:].squeeze(),1)
torch.max(out,1)[1]
tensor([0, 0])
loss_fct = torch.nn.CrossEntropyLoss()
loss = loss_fct(logits.view(-1, num_labels), batch[1].to("cuda").view(-1))  

Tags: