-
Notifications
You must be signed in to change notification settings - Fork 0
/
exp_torch.py
50 lines (37 loc) · 960 Bytes
/
exp_torch.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
# import torch
# x = torch.randint(1, 10, (2, 2, 3))
# x = x.view(2, -1)
# print(x)
# print(x.size())
# x1 = torch.argmax(x, 1)
# x1 = x1.view(-1, 1)
# print(x1)
# x2 = x1.repeat(1, 10).view(-1, 1)
# print(x2)
# print(x2.size())
# how to use torch.gather
# x = torch.arange(24).reshape(4, 6).float()
# print(x)
# indices = torch.tensor([1, 4, 2, 3]).unsqueeze(-1)
# indices = torch.tensor([[1, 2, 2, 3, 1, 0]])
# print(indices)
# t = torch.gather(x, 1, indices)
# print(t)
# print(t.squeeze(1))
# print(t.t())
# print(t.t().shape)
# print(torch.max(x, 1))
# print(torch.argmax(x, 1))
# print(21 % 8, 21 // 8)
# xx = torch.randn_like(x).fill_(0.2)
# xx += x
# print(xx)
# print(xx.dtype, xx.device)
# xx /= 1.0*256/32
# # xx //= 5
# print(xx)
# print(torch.mean(xx, 0))
num = 2.3214
print(f'num is {num:.2f}')
from multiprocessing import cpu_count
print("CPU的核数为:{}".format(cpu_count()))