当前位置:   article > 正文

(DL笔记)Dive into Deep Learning -- 数据操作_built-in method numel of tensor object at

built-in method numel of tensor object at
  1. import torch
  2. from numpy import exp, math
  3. '''
  4. data operation
  5. '''
  6. #matrix-op
  7. matrix=torch.tensor([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]])
  8. print('original matrix')
  9. print(matrix)
  10. print('--------------------')
  11. print('matrix[1,2]')
  12. print(matrix[1,2])
  13. print('--------------------')
  14. print('matrix[1,:]')
  15. print(matrix[1,:])
  16. print('--------------------')
  17. print('matrix[:,2]')
  18. print(matrix[:,2])
  19. print('--------------------')
  20. print('matrix[1:3,2]')
  21. print(matrix[1:3,2])
  22. print('--------------------')
  23. print('matrix[::2,::2]')
  24. print(matrix[::2,::2])
  25. print('--------------------')
  26. print('matrix[-1]')
  27. print(matrix[-1])
  28. print('--------------------')
  29. '''
  30. OUTPUT
  31. --------------------
  32. original matrix
  33. tensor([[ 1, 2, 3, 4],
  34. [ 5, 6, 7, 8],
  35. [ 9, 10, 11, 12],
  36. [13, 14, 15, 16]])
  37. --------------------
  38. matrix[1,2]
  39. tensor(7)
  40. --------------------
  41. matrix[1,:]
  42. tensor([5, 6, 7, 8])
  43. --------------------
  44. matrix[:,2]
  45. tensor([ 3, 7, 11, 15])
  46. --------------------
  47. matrix[1:3,2]
  48. tensor([ 7, 11])
  49. --------------------
  50. matrix[::2,::2]
  51. tensor([[ 1, 3],
  52. [ 9, 11]])
  53. --------------------
  54. matrix[-1]
  55. tensor([13, 14, 15, 16])
  56. --------------------
  57. '''
  58. x=torch.arange(10)
  59. # x -- int in [0,10)
  60. print(x)
  61. print('shape -----' + str(x.shape))
  62. print('#elements -----' + str(x.numel))
  63. x=x.reshape(2,5)
  64. print(x)
  65. '''
  66. OUTPUT
  67. --------------------
  68. tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
  69. shape -----torch.Size([10])
  70. #elements -----<built-in method numel of Tensor object at 0x7fdbe91210d0>
  71. tensor([[0, 1, 2, 3, 4],
  72. [5, 6, 7, 8, 9]])
  73. '''
  74. i=torch.tensor([1,2,3,4])
  75. j=torch.tensor([4.0,5,6,7]) #float
  76. print('j')
  77. print(j)
  78. print('i+j')
  79. print(i+j)
  80. print('i*j')
  81. print(i*j)
  82. print('exp(j)')
  83. print(exp(j))
  84. '''
  85. OUTPUT
  86. --------------------
  87. j
  88. tensor([4., 5., 6., 7.])
  89. i+j
  90. tensor([ 5., 7., 9., 11.])
  91. i*j
  92. tensor([ 4., 10., 18., 28.])
  93. exp(j)
  94. tensor([ 54.5981, 148.4132, 403.4288, 1096.6332])
  95. '''
  96. #concatenation
  97. X = torch.arange(12, dtype=torch.float32).reshape((3,4))
  98. Y = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) #float
  99. print('------------ \n'+'x' )
  100. print(X)
  101. print(X.shape )
  102. print('------------ \n'+'y' )
  103. print(Y)
  104. print(Y.shape )
  105. print('------------ \n'+'cat(x,y) vertically' )
  106. print(torch.cat((X, Y), dim=0))
  107. print(torch.cat((X, Y), dim=0).shape )
  108. print('------------ \n'+'cat(x,y) horizontally' )
  109. print(torch.cat((X, Y), dim=1))
  110. print(torch.cat((X, Y), dim=1).shape )
  111. '''
  112. OUTPUT
  113. --------------------
  114. x
  115. tensor([[ 0., 1., 2., 3.],
  116. [ 4., 5., 6., 7.],
  117. [ 8., 9., 10., 11.]])
  118. torch.Size([3, 4])
  119. ------------
  120. y
  121. tensor([[2., 1., 4., 3.],
  122. [1., 2., 3., 4.],
  123. [4., 3., 2., 1.]])
  124. torch.Size([3, 4])
  125. ------------
  126. cat(x,y) vertically
  127. tensor([[ 0., 1., 2., 3.],
  128. [ 4., 5., 6., 7.],
  129. [ 8., 9., 10., 11.],
  130. [ 2., 1., 4., 3.],
  131. [ 1., 2., 3., 4.],
  132. [ 4., 3., 2., 1.]])
  133. torch.Size([6, 4])
  134. ------------
  135. cat(x,y) horizontally
  136. tensor([[ 0., 1., 2., 3., 2., 1., 4., 3.],
  137. [ 4., 5., 6., 7., 1., 2., 3., 4.],
  138. [ 8., 9., 10., 11., 4., 3., 2., 1.]])
  139. torch.Size([3, 8])
  140. '''
  141. #memory management -- 尽量不要重新分配内存,+= 或者放入新的矩阵
  142. X = torch.arange(12, dtype=torch.int).reshape((3,4))
  143. Y = torch.tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])
  144. print(id(Y))
  145. Y=Y+X
  146. print(id(Y))
  147. Y+=X
  148. print(id(Y))
  149. print('--------------------')
  150. Z = torch.zeros_like(Y)
  151. print(id(Z))
  152. Z[:]=Y+X
  153. print(id(Z))
  154. '''
  155. OUTPUT
  156. --------------------
  157. 139964398391984
  158. 139964398392944
  159. 139964398392944
  160. --------------------
  161. 139964398391984
  162. 139964398391984
  163. '''
  164. #广播机制 -- 不同shape的矩阵先复制到等同的shape,后做加减运算
  165. a = torch.arange(3).reshape((3, 1))
  166. b = torch.arange(2).reshape((1, 2))
  167. print(a), print(b),print(a+b)
  168. '''
  169. OUTPUT
  170. --------------
  171. tensor([[0],
  172. [1],
  173. [2]])
  174. tensor([[0, 1]])
  175. tensor([[0, 1],
  176. [1, 2],
  177. [2, 3]])
  178. '''

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/知新_RL/article/detail/762175
推荐阅读
相关标签
  

闽ICP备14008679号