在具有二维张量的Python中,我们可以使用tensor[:,:2]
在矩阵左上角对前两个元素的2x2矩阵进行切片,例如:
x = torch.tensor([[-1.4673, 0.9980, -2.1427, -1.1798, -0.0646, -0.2635, -2.8930, -0.2563,
0.4559, -0.7947, -0.4540, 3.3224, 0.2295, 5.5568, -8.0451, -2.4529,
4.8724, 2.1640, 3.3255, 0.6693, -1.2362, 4.4713, -3.5547, -0.0528,
0.1031, -1.2472, -1.6014, 1.8134],
[ 2.1636, -1.1497, -5.0298, 2.8261, -0.5684, 0.6389, 2.9009, -5.1609,
1.7358, -3.1819, -0.9877, 5.5598, 6.7142, 4.5704, -1.2683, -5.3046,
3.0454, 3.2757, -3.2541, 3.6619, -3.6391, -0.2002, 5.7175, 5.7130,
0.6632, -0.0744, -0.3502, 4.8116]])
y, z = x[:,:2].chunk(2,1)
print(y)
print(z)
[输出]:
tensor([[-1.4673],
[ 2.1636]])
tensor([[ 0.9980],
[-1.1497]])
特别是对于PyTorch的ATen,用C ++做到的正确方法是什么?
例如在LSTM中,https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/RNN.cpp#L253
中有gate.chunk(4,1)函数如果我想执行gate[:,:2].chunk(2,1)
来提取门的不同部分,例如auto partial_gates = gates[:,:2].chunk(4, 1);
,该怎么办?
template <typename cell_params>
struct LSTMCell : Cell<std::tuple<Tensor, Tensor>, cell_params> {
using hidden_type = std::tuple<Tensor, Tensor>;
hidden_type operator()(const Tensor& input, const hidden_type& hidden, const cell_params& params) const override {
auto hx = std::get<0>(hidden);
auto cx = std::get<1>(hidden);
if (input.is_cuda()) {
auto igates = params.matmul_ih(input);
auto hgates = params.matmul_hh(hx);
auto result = at::_thnn_fused_lstm_cell(igates, hgates, cx, params.b_ih, params.b_hh);
// Slice off the workspace argument (it's needed only for AD).
return std::make_tuple(std::get<0>(result), std::get<1>(result));
}
auto gates = params.linear_ih(input) + params.linear_hh(hx);
auto chunked_gates = gates.chunk(4, 1);
auto partial_gates = gates[:,:2].chunk(4, 1);
auto ingate = chunked_gates[0].sigmoid();
auto forgetgate = chunked_gates[1].sigmoid();
auto cellgate = chunked_gates[2].tanh();
auto outgate = chunked_gates[3].sigmoid();
auto cy = (forgetgate * cx) + (ingate * cellgate);
auto hy = outgate * cy.tanh();
return std::make_tuple(hy, cy);
}
};
答案 0 :(得分:5)
.slice
Tensor::slice(int64_t dim, int64_t start, int64_t end, int64_t step)
auto partial_gates = gates.slice(1, 0, 3).chunk(4, 1);
Tensor::index
和Tensor::index_put_
using namespace torch::indexing;
auto partial_gates = gates.index({"...", Slice(None, 2)}).chunk(4, 1);
Also supports multimensional indexing
Tensor::index
和Tensor::index_put_
的常规翻译
Python C++ (assuming `using namespace torch::indexing`)
-------------------------------------------------------------------
0 0
None None
... "..." or Ellipsis
: Slice()
start:stop:step Slice(start, stop, step)
True / False true / false
[[1, 2]] torch::tensor({{1, 2}})
答案 1 :(得分:0)
是https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/TensorShape.cpp#L364的.narrow()
auto partial_gates = gates.narrow(1,0,2).chunk(4, 1);