gpt4 book ai didi

pytorch中的numel函数用法说明

转载 作者:qq735679552 更新时间:2022-09-27 22:32:09 26 4
gpt4 key购买 nike

CFSDN坚持开源创造价值,我们致力于搭建一个资源共享平台,让每一个IT人在这里找到属于你的精彩世界.

这篇CFSDN的博客文章pytorch中的numel函数用法说明由作者收集整理,如果你对这篇文章有兴趣,记得点赞哟.

获取tensor中一共包含多少个元素

?
1
2
3
4
5
import torch
x = torch.randn( 3 , 3 )
print ( "number elements of x is " ,x.numel())
y = torch.randn( 3 , 10 , 5 )
print ( "number elements of y is " ,y.numel())

输出:

number elements of x is 9 。

number elements of y is 150 。

27和150分别位x和y中各有多少个元素或变量 。

补充:pytorch获取张量元素个数numel()的用法 。

numel就是"number of elements"的简写.

numel()可以直接返回int类型的元素个数

?
1
2
3
4
5
import torch
a = torch.randn( 1 , 2 , 3 , 4 )
b = a.numel()
print ( type (b)) # int
print (b) # 24

通过numel()函数,我们可以迅速查看一个张量到底又多少元素.

补充:pytorch 卷积结构和numel()函数 。

看代码吧~

?
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
from torch import nn
class CNN(nn.Module):
     def __init__( self , num_channels = 1 , d = 56 , s = 12 , m = 4 ):
         super (CNN, self ).__init__()
         self .first_part = nn.Sequential(
             nn.Conv2d(num_channels, d, kernel_size = 3 , padding = 5 / / 2 ),
             nn.Conv2d(num_channels, d, kernel_size = ( 1 , 3 ), padding = 5 / / 2 ),
             nn.Conv2d(num_channels, d, kernel_size = ( 3 , 1 ), padding = 5 / / 2 ),
             nn.PReLU(d)
         )
 
     def forward( self , x):
         x = self .first_part(x)
         return x
 
model = CNN()
for m in model.first_part:
     if isinstance (m, nn.Conv2d):
         # print('m:',m.weight.data)
         print ( 'm:' ,m.weight.data[ 0 ])
         print ( 'm:' ,m.weight.data[ 0 ][ 0 ])
         print ( 'm:' ,m.weight.data.numel()) #numel() 计算矩阵中元素的个数
 
结果:
m: tensor([[[ - 0.2822 0.0128 , - 0.0244 ],
          [ - 0.2329 0.1037 0.2262 ],
          [ 0.2845 , - 0.3094 0.1443 ]]]) #卷积核大小为3x3
m: tensor([[ - 0.2822 0.0128 , - 0.0244 ],
         [ - 0.2329 0.1037 0.2262 ],
         [ 0.2845 , - 0.3094 0.1443 ]]) #卷积核大小为3x3
m: 504   # = 56 x (3 x 3)  输出通道数为56,卷积核大小为3x3
m: tensor([ - 0.0335 0.2945 0.2512 0.2770 0.2071 0.1133 , - 0.1883 0.2738 ,
          0.0805 0.1339 , - 0.3000 , - 0.1911 , - 0.1760 0.2855 , - 0.0234 , - 0.0843 ,
          0.1815 0.2357 0.2758 0.2689 , - 0.2477 , - 0.2528 , - 0.1447 , - 0.0903 ,
          0.1870 0.0945 , - 0.2786 , - 0.0419 0.1577 , - 0.3100 , - 0.1335 , - 0.3162 ,
         - 0.1570 0.3080 0.0951 0.1953 0.1814 , - 0.1936 0.1466 , - 0.2911 ,
         - 0.1286 0.3024 0.1143 , - 0.0726 , - 0.2694 , - 0.3230 0.2031 , - 0.2963 ,
          0.2965 0.2525 , - 0.2674 0.0564 , - 0.3277 0.2185 , - 0.0476 0.0558 ]) bias偏置的值
m: tensor([[[ 0.5747 , - 0.3421 0.2847 ]]]) 卷积核大小为 1x3
m: tensor([[ 0.5747 , - 0.3421 0.2847 ]]) 卷积核大小为 1x3
m: 168 # = 56 x (1 x 3) 输出通道数为56,卷积核大小为1x3
m: tensor([ 0.5328 , - 0.5711 , - 0.1945 0.2844 0.2012 , - 0.0084 0.4834 , - 0.2020 ,
         - 0.0941 0.4683 , - 0.2386 0.2781 , - 0.1812 , - 0.2990 , - 0.4652 0.1228 ,
         - 0.0627 0.3112 , - 0.2700 0.0825 0.4345 , - 0.0373 , - 0.3220 , - 0.5038 ,
         - 0.3166 , - 0.3823 0.3947 , - 0.3232 0.1028 0.2378 0.4589 0.1675 ,
         - 0.3112 , - 0.0905 , - 0.0705 0.2763 0.5433 0.2768 , - 0.3804 0.4855 ,
         - 0.4880 , - 0.4555 0.4143 0.5474 0.3305 , - 0.0381 0.2483 0.5133 ,
         - 0.3978 0.0407 0.2351 0.1910 , - 0.5385 0.1340 0.1811 , - 0.3008 ]) bias偏置的值
m: tensor([[[ 0.0184 ],
          [ 0.0981 ],
          [ 0.1894 ]]]) 卷积核大小为 3x1
m: tensor([[ 0.0184 ],
         [ 0.0981 ],
         [ 0.1894 ]]) 卷积核大小为 3x1
m: 168 # = 56 x (3 x 1) 输出通道数为56,卷积核大小为3x1
m: tensor([ - 0.2951 , - 0.4475 0.1301 0.4747 , - 0.0512 0.2190 0.3533 , - 0.1158 ,
          0.2237 , - 0.1407 , - 0.4756 0.1637 , - 0.4555 , - 0.2157 0.0577 , - 0.3366 ,
         - 0.3252 0.2807 0.1660 0.2949 , - 0.2886 , - 0.5216 0.1665 0.2193 ,
          0.2038 , - 0.1357 0.2626 0.2036 0.3255 0.2756 0.1283 , - 0.4909 ,
          0.5737 , - 0.4322 , - 0.4930 , - 0.0846 0.2158 0.5565 0.3751 , - 0.3775 ,
         - 0.5096 , - 0.4520 0.2246 , - 0.5367 0.5531 0.3372 , - 0.5593 , - 0.2780 ,
         - 0.5453 , - 0.2863 0.5712 , - 0.2882 0.4788 0.3222 , - 0.4846 0.2170 ]) bias偏置的值
  
'''初始化后'''
class CNN(nn.Module):
     def __init__( self , num_channels = 1 , d = 56 , s = 12 , m = 4 ):
         super (CNN, self ).__init__()
         self .first_part = nn.Sequential(
             nn.Conv2d(num_channels, d, kernel_size = 3 , padding = 5 / / 2 ),
             nn.Conv2d(num_channels, d, kernel_size = ( 1 , 3 ), padding = 5 / / 2 ),
             nn.Conv2d(num_channels, d, kernel_size = ( 3 , 1 ), padding = 5 / / 2 ),
             nn.PReLU(d)
         )
         self ._initialize_weights()
     def _initialize_weights( self ):
         for m in self .first_part:
             if isinstance (m, nn.Conv2d):
                 nn.init.normal_(m.weight.data, mean = 0.0 , std = math.sqrt( 2 / (m.out_channels * m.weight.data[ 0 ][ 0 ].numel())))
                 nn.init.zeros_(m.bias.data)
 
     def forward( self , x):
         x = self .first_part(x)
         return x
 
model = CNN()
for m in model.first_part:
     if isinstance (m, nn.Conv2d):
         # print('m:',m.weight.data)
         print ( 'm:' ,m.weight.data[ 0 ])
         print ( 'm:' ,m.weight.data[ 0 ][ 0 ])
         print ( 'm:' ,m.weight.data.numel()) #numel() 计算矩阵中元素的个数
 
结果:
m: tensor([[[ - 0.0284 , - 0.0585 0.0271 ],
          [ 0.0125 0.0554 0.0511 ],
          [ - 0.0106 0.0574 , - 0.0053 ]]])
m: tensor([[ - 0.0284 , - 0.0585 0.0271 ],
         [ 0.0125 0.0554 0.0511 ],
         [ - 0.0106 0.0574 , - 0.0053 ]])
m: 504
m: tensor([ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
         0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
         0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ])
m: tensor([[[ 0.0059 0.0465 , - 0.0725 ]]])
m: tensor([[ 0.0059 0.0465 , - 0.0725 ]])
m: 168
m: tensor([ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
         0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
         0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ])
m: tensor([[[ 0.0599 ],
          [ - 0.1330 ],
          [ 0.2456 ]]])
m: tensor([[ 0.0599 ],
         [ - 0.1330 ],
         [ 0.2456 ]])
m: 168
m: tensor([ 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
         0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
         0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ])

以上为个人经验,希望能给大家一个参考,也希望大家多多支持我。如有错误或未考虑完全的地方,望不吝赐教.

原文链接:https://blog.csdn.net/schmiloo/article/details/107020922 。

最后此篇关于pytorch中的numel函数用法说明的文章就讲到这里了,如果你想了解更多关于pytorch中的numel函数用法说明的内容请搜索CFSDN的文章或继续浏览相关文章,希望大家以后支持我的博客! 。

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com