Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Change LoDTensor to DenseTensor #10611

Open
wants to merge 1 commit into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 7 additions & 7 deletions lite/backends/arm/math/sequence2batch.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ class CopyMatrixRowsFunctor {
};

template <typename T>
class LoDTensor2BatchFunctor {
class DenseTensor2BatchFunctor {
// Calculate the length of each sequence and
// sort sequence index by the length.
// example: sequences = {s0, s1, s2}
Expand All @@ -89,7 +89,7 @@ class LoDTensor2BatchFunctor {
if (!is_cal_batch_lod) {
auto lods = batch->lod();
CHECK_GT(lods.size(), 2UL)
<< "The LoD of LoDTensor should inlcude at least 2-level "
<< "The LoD of DenseTensor should inlcude at least 2-level "
"sequence information.";
CHECK_EQ(lods[1].size(), static_cast<size_t>(lod_tensor.dims()[0]))
<< "The LoD information should be consistent with the dims.";
Expand Down Expand Up @@ -141,12 +141,12 @@ class LoDTensor2BatchFunctor {
batch_lods.emplace_back(std::vector<uint64_t>{0});
batch_lods.emplace_back(std::vector<uint64_t>{0});

// batch_lods[0] is the start positions for batch LoDTensor
// batch_lods[0] is the start positions for batch DenseTensor
int max_seqlen = seq_info[0].length;
batch_lods[0].resize(static_cast<size_t>(max_seqlen + 1));
// batch_lods[1] is the raw index in the input LoDTensor
// batch_lods[1] is the raw index in the input DenseTensor
batch_lods[1].resize(static_cast<size_t>(lod_tensor.dims()[0]));
// batch_lods[2] is the sort order for the input LoDTensor.
// batch_lods[2] is the sort order for the input DenseTensor.
batch_lods[2].resize(seq_info.size());

auto batch_starts = batch_lods[0].data();
Expand Down Expand Up @@ -179,12 +179,12 @@ class LoDTensor2BatchFunctor {
};

template <typename T>
class Batch2LoDTensorFunctor {
class Batch2DenseTensorFunctor {
public:
void operator()(const Tensor& batch, Tensor* lod_tensor) const {
auto in_lod = batch.lod();
CHECK_GT(in_lod.size(), 2UL)
<< "The LoD of LoDTensor should inlcude at least 2-level "
<< "The LoD of DenseTensor should inlcude at least 2-level "
"sequence information.";
CHECK_EQ(in_lod[1].size(), static_cast<size_t>(lod_tensor->dims()[0]))
<< "The LoD information should be consistent with the dims.";
Expand Down
20 changes: 10 additions & 10 deletions lite/backends/host/math/sequence_padding.cc
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ static void fast_mem_init(void* dest,
}

template <typename T>
class PaddingLoDTensorFunctor<lite::TargetType::kHost, T> {
class PaddingDenseTensorFunctor<lite::TargetType::kHost, T> {
public:
void operator()(const lite::Context<lite::TargetType::kHost>& context,
const lite::Tensor& seq_tensor,
Expand Down Expand Up @@ -135,7 +135,7 @@ class PaddingLoDTensorFunctor<lite::TargetType::kHost, T> {
};

template <typename T>
class UnpaddingLoDTensorFunctor<lite::TargetType::kHost, T> {
class UnpaddingDenseTensorFunctor<lite::TargetType::kHost, T> {
public:
void operator()(const lite::Context<lite::TargetType::kHost>& context,
const lite::Tensor& pad_tensor,
Expand Down Expand Up @@ -170,15 +170,15 @@ class UnpaddingLoDTensorFunctor<lite::TargetType::kHost, T> {
}
};

template class PaddingLoDTensorFunctor<lite::TargetType::kHost, int>;
template class PaddingLoDTensorFunctor<lite::TargetType::kHost, int64_t>;
template class PaddingLoDTensorFunctor<lite::TargetType::kHost, float>;
template class PaddingLoDTensorFunctor<lite::TargetType::kHost, double>;
template class PaddingDenseTensorFunctor<lite::TargetType::kHost, int>;
template class PaddingDenseTensorFunctor<lite::TargetType::kHost, int64_t>;
template class PaddingDenseTensorFunctor<lite::TargetType::kHost, float>;
template class PaddingDenseTensorFunctor<lite::TargetType::kHost, double>;

template class UnpaddingLoDTensorFunctor<lite::TargetType::kHost, int>;
template class UnpaddingLoDTensorFunctor<lite::TargetType::kHost, int64_t>;
template class UnpaddingLoDTensorFunctor<lite::TargetType::kHost, float>;
template class UnpaddingLoDTensorFunctor<lite::TargetType::kHost, double>;
template class UnpaddingDenseTensorFunctor<lite::TargetType::kHost, int>;
template class UnpaddingDenseTensorFunctor<lite::TargetType::kHost, int64_t>;
template class UnpaddingDenseTensorFunctor<lite::TargetType::kHost, float>;
template class UnpaddingDenseTensorFunctor<lite::TargetType::kHost, double>;

} // namespace math
} // namespace host
Expand Down
9 changes: 5 additions & 4 deletions lite/backends/host/math/sequence_padding.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ inline static void CheckDims(const lite::DDim& seq_tensor_dims,
}

/*
* \brief Padding/Unpadding LoDTensor to/from normal Tensor of the shape
* \brief Padding/Unpadding DenseTensor to/from normal Tensor of the shape
* [max_sequence_length, num_sequences, sequence_width].
*
* Padding sequence:
Expand All @@ -72,7 +72,8 @@ inline static void CheckDims(const lite::DDim& seq_tensor_dims,
* padding (s0, s1, s2, s3; s0, s1, s2, 0; s0, 0, s2, 0; s0, 0, 0, 0)
*
* \param context device context of this functor.
* \param seq LoDTensor which is stored in sequence format, the shape
* \param seq DenseTensor which is stored in sequence format, the
* shape
* is [total_sequence_length, sequence_width] where
* total_sequence_length is the sum of all sequences'
* length.
Expand All @@ -83,7 +84,7 @@ inline static void CheckDims(const lite::DDim& seq_tensor_dims,
* \note transposition is also done in this functor.
*/
template <lite::TargetType Target, typename T>
class PaddingLoDTensorFunctor {
class PaddingDenseTensorFunctor {
public:
void operator()(const lite::Context<Target>& context,
const lite::Tensor& seq_tensor,
Expand All @@ -96,7 +97,7 @@ class PaddingLoDTensorFunctor {
};

template <lite::TargetType Target, typename T>
class UnpaddingLoDTensorFunctor {
class UnpaddingDenseTensorFunctor {
public:
void operator()(const lite::Context<Target>& context,
const lite::Tensor& pad_tensor,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2273,7 +2273,8 @@ typedef enum {
* bounding boxes.
* In this case, input bboxes should be the first case with shape [N,
* M, 4/8/16/24/32].
* 2. A 2-D LoDTensor with shape [M, C]. M is the number of bbox, C is the
* 2. A 2-D DenseTensor with shape [M, C]. M is the number of bbox, C is
* the
* class number.
* In this case, input bboxes should be the second case with shape [M,
* C, 4].
Expand Down
2 changes: 1 addition & 1 deletion lite/backends/x86/math/context_project.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ namespace math {
* For a mini-batch of 2 variable lengths sentences, containing 3, and 1
* time-steps:
*
* Assumed input (X) is a [4, M, N] float LoDTensor, and X->lod()[0] = [0, 3,
* Assumed input (X) is a [4, M, N] float DenseTensor, and X->lod()[0] = [0, 3,
* 4].
* Besides, for the sake of simplicity, we assume M=1 and N=2.
*
Expand Down
8 changes: 4 additions & 4 deletions lite/backends/x86/math/sequence2batch.cc
Original file line number Diff line number Diff line change
Expand Up @@ -54,10 +54,10 @@ class CopyMatrixRowsFunctor<lite::TargetType::kX86, T> {
template class CopyMatrixRowsFunctor<lite::TargetType::kX86, float>;
template class CopyMatrixRowsFunctor<lite::TargetType::kX86, double>;

template class LoDTensor2BatchFunctor<lite::TargetType::kX86, float>;
template class LoDTensor2BatchFunctor<lite::TargetType::kX86, double>;
template class Batch2LoDTensorFunctor<lite::TargetType::kX86, float>;
template class Batch2LoDTensorFunctor<lite::TargetType::kX86, double>;
template class DenseTensor2BatchFunctor<lite::TargetType::kX86, float>;
template class DenseTensor2BatchFunctor<lite::TargetType::kX86, double>;
template class Batch2DenseTensorFunctor<lite::TargetType::kX86, float>;
template class Batch2DenseTensorFunctor<lite::TargetType::kX86, double>;

} // namespace math
} // namespace x86
Expand Down
14 changes: 7 additions & 7 deletions lite/backends/x86/math/sequence2batch.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ class CopyMatrixRowsFunctor {
};

template <lite::TargetType Target, typename T>
class LoDTensor2BatchFunctor {
class DenseTensor2BatchFunctor {
// Calculate the length of each sequence and
// sort sequence index by the length.
// example: sequences = {s0, s1, s2}
Expand All @@ -67,7 +67,7 @@ class LoDTensor2BatchFunctor {
if (!is_cal_batch_lod) {
auto lods = batch->lod();
CHECK_GT(lods.size(), 2UL)
<< "The LoD of LoDTensor should inlcude at least 2-level "
<< "The LoD of DenseTensor should inlcude at least 2-level "
"sequence information.";
CHECK_EQ(lods[1].size(), static_cast<size_t>(lod_tensor.dims()[0]))
<< "The LoD information should be consistent with the dims.";
Expand Down Expand Up @@ -119,12 +119,12 @@ class LoDTensor2BatchFunctor {
LoD* batch_lods = batch->mutable_lod();
batch_lods->resize(3);

// batch_lods[0] is the start positions for batch LoDTensor
// batch_lods[0] is the start positions for batch DenseTensor
int max_seqlen = seq_info[0].length;
batch_lods->at(0).resize(static_cast<size_t>(max_seqlen + 1));
// batch_lods[1] is the raw index in the input LoDTensor
// batch_lods[1] is the raw index in the input DenseTensor
batch_lods->at(1).resize(static_cast<size_t>(lod_tensor.dims()[0]));
// batch_lods[2] is the sort order for the input LoDTensor.
// batch_lods[2] is the sort order for the input DenseTensor.
batch_lods->at(2).resize(seq_info.size());

auto* batch_starts = batch_lods->at(0).data();
Expand Down Expand Up @@ -156,14 +156,14 @@ class LoDTensor2BatchFunctor {
};

template <lite::TargetType Target, typename T>
class Batch2LoDTensorFunctor {
class Batch2DenseTensorFunctor {
public:
void operator()(const lite::Context<Target>& context,
const lite::Tensor& batch,
lite::Tensor* lod_tensor) const {
auto in_lod = batch.lod();
CHECK_GT(in_lod.size(), 2UL)
<< "The LoD of LoDTensor should inlcude at least 2-level "
<< "The LoD of DenseTensor should inlcude at least 2-level "
"sequence information.";
CHECK_EQ(in_lod[1].size(), static_cast<size_t>(lod_tensor->dims()[0]))
<< "The LoD information should be consistent with the dims.";
Expand Down
4 changes: 2 additions & 2 deletions lite/backends/x86/math/sequence_scale.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ namespace x86 {
namespace math {

template <typename T>
class ScaleLoDTensorFunctor<lite::TargetType::kX86, T> {
class ScaleDenseTensorFunctor<lite::TargetType::kX86, T> {
public:
void operator()(const lite::Context<lite::TargetType::kX86>& context,
const T* scales,
Expand All @@ -43,7 +43,7 @@ class ScaleLoDTensorFunctor<lite::TargetType::kX86, T> {
}
};

template class ScaleLoDTensorFunctor<lite::TargetType::kX86, float>;
template class ScaleDenseTensorFunctor<lite::TargetType::kX86, float>;

} // namespace math
} // namespace x86
Expand Down
5 changes: 3 additions & 2 deletions lite/backends/x86/math/sequence_scale.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,8 @@ namespace math {

*
* \param context Device context of this functor.
* \param seq LoDTensor which is stored in sequence format, the shape
* \param seq DenseTensor which is stored in sequence format, the
shape
* is [total_sequence_length, sequence_width] where
* total_sequence_length is the sum of all sequences'
* length.
Expand All @@ -46,7 +47,7 @@ namespace math {
*/

template <lite::TargetType Target, typename T>
class ScaleLoDTensorFunctor {
class ScaleDenseTensorFunctor {
public:
void operator()(const lite::Context<Target>& context,
const T* scales,
Expand Down
8 changes: 4 additions & 4 deletions lite/kernels/arm/gru_compute.cc
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ void GRUComputeRun(const operators::GRUParam& param,
bit_length = param.bit_length;
}

lite::arm::math::LoDTensor2BatchFunctor<float> to_batch;
lite::arm::math::DenseTensor2BatchFunctor<float> to_batch;
to_batch(*input, batch_gate, true, param.is_reverse);

if (bias) {
Expand Down Expand Up @@ -164,7 +164,7 @@ void GRUComputeRun(const operators::GRUParam& param,
}
gru_value.prev_out_value = gru_value.output_value;
}
lite::arm::math::Batch2LoDTensorFunctor<float> to_seq;
lite::arm::math::Batch2DenseTensorFunctor<float> to_seq;
*(batch_hidden->mutable_lod()) = batch_gate->lod();
to_seq(*batch_hidden, hidden);
}
Expand Down Expand Up @@ -211,7 +211,7 @@ void GRUCompute<PRECISION(kFP16)>::Run() {
hidden->mutable_data<float16_t>();
memset(batch_gate_data, 0, batch_gate->numel() * sizeof(float16_t));

lite::arm::math::LoDTensor2BatchFunctor<float16_t> to_batch;
lite::arm::math::DenseTensor2BatchFunctor<float16_t> to_batch;
to_batch(*input, batch_gate, true, param.is_reverse);

if (bias) {
Expand Down Expand Up @@ -267,7 +267,7 @@ void GRUCompute<PRECISION(kFP16)>::Run() {

gru_value.prev_out_value = gru_value.output_value;
}
lite::arm::math::Batch2LoDTensorFunctor<float16_t> to_seq;
lite::arm::math::Batch2DenseTensorFunctor<float16_t> to_seq;
*(batch_hidden->mutable_lod()) = batch_gate->lod();
to_seq(*batch_hidden, hidden);
}
Expand Down
4 changes: 2 additions & 2 deletions lite/kernels/arm/lstm_compute.cc
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ void LSTMComputeRun(const operators::LstmParam& param,
memset(cell_ptr, 0, cell_out->numel() * sizeof(float));

bool is_reverse = param.is_reverse;
lite::arm::math::LoDTensor2BatchFunctor<float> to_batch;
lite::arm::math::DenseTensor2BatchFunctor<float> to_batch;
to_batch(*input, batch_gate, true, is_reverse);

auto in_dims = input->dims();
Expand Down Expand Up @@ -247,7 +247,7 @@ void LSTMComputeRun(const operators::LstmParam& param,
lstm_value.prev_state_value = lstm_value.state_value;
}

lite::arm::math::Batch2LoDTensorFunctor<float> to_seq;
lite::arm::math::Batch2DenseTensorFunctor<float> to_seq;
auto* lod_hidden = batch_hidden.mutable_lod();
*lod_hidden = batch_gate->lod();
to_seq(batch_hidden, hidden_out);
Expand Down
2 changes: 1 addition & 1 deletion lite/kernels/arm/slice_compute.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ void DealTensorArray(const operators::SliceParam& param,
const std::vector<int64_t>& ends,
bool out_is_array) {
auto in_array = param.XTensorList;
// If the input is LoDTensorArray, the rank of input is 1.
// If the input is DenseTensorArray, the rank of input is 1.
int64_t in_size = in_array->size();
int64_t start = starts[0] < 0 ? (starts[0] + in_size) : starts[0];
int64_t end = ends[0] < 0 ? (ends[0] + in_size) : ends[0];
Expand Down
38 changes: 19 additions & 19 deletions lite/kernels/host/beam_search_decode_compute.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ namespace lite {
namespace kernels {
namespace host {

using LoDTensor = lite::Tensor;
using LoDTensorArray = std::vector<lite::Tensor>;
using DenseTensor = lite::Tensor;
using DenseTensorArray = std::vector<lite::Tensor>;

// all the lod have 2 levels.
// The first is source level, the second is sentence level.
Expand Down Expand Up @@ -52,15 +52,15 @@ struct BeamSearchDecoder {
* with word score.
* Param:
* sentence_vector_list: sentence_vector for each source sentence.
* id_tensor: result LoDTensor for sentences of id.
* score_tensor: result LoDTensor for sentences of score.
* id_tensor: result DenseTensor for sentences of id.
* score_tensor: result DenseTensor for sentences of score.
* reverse: whether ids of sentence in sentence_vector_list is reversed
* sort_by_score: whether to sort hypotheses of each sentence by scores.
*/
void ConvertSentenceVectorToLodTensor(
std::vector<SentenceVector<T>> sentence_vector_list,
LoDTensor* id_tensor,
LoDTensor* score_tensor,
DenseTensor* id_tensor,
DenseTensor* score_tensor,
bool reverse = true,
bool sort_by_score = true) const {
size_t src_num = sentence_vector_list.size();
Expand Down Expand Up @@ -129,12 +129,12 @@ struct BeamSearchDecoder {

/**
* Gather the hypotheses for each source sentence by backtrace though the
* LoDTensorArray step_ids whose lods reserve the path in the tree.
* DenseTensorArray step_ids whose lods reserve the path in the tree.
*/
void Backtrace(const LoDTensorArray& step_ids,
const LoDTensorArray& step_scores,
LoDTensor* id_tensor,
LoDTensor* score_tensor) const {
void Backtrace(const DenseTensorArray& step_ids,
const DenseTensorArray& step_scores,
DenseTensor* id_tensor,
DenseTensor* score_tensor) const {
CHECK(!step_ids.empty()) << "step num should be larger than 0";
CHECK_EQ(step_ids.size(), step_scores.size())
<< "step_ids and step_scores should be the same";
Expand Down Expand Up @@ -211,10 +211,10 @@ struct BeamSearchDecoder {
};

struct BeamSearchDecodeFunctor {
BeamSearchDecodeFunctor(const LoDTensorArray& step_ids,
const LoDTensorArray& step_scores,
LoDTensor* id_tensor,
LoDTensor* score_tensor,
BeamSearchDecodeFunctor(const DenseTensorArray& step_ids,
const DenseTensorArray& step_scores,
DenseTensor* id_tensor,
DenseTensor* score_tensor,
size_t beam_size,
int end_id)
: beam_size_(beam_size),
Expand All @@ -233,10 +233,10 @@ struct BeamSearchDecodeFunctor {

size_t beam_size_;
int end_id_;
const LoDTensorArray& step_ids_;
const LoDTensorArray& step_scores_;
LoDTensor* id_tensor_;
LoDTensor* score_tensor_;
const DenseTensorArray& step_ids_;
const DenseTensorArray& step_scores_;
DenseTensor* id_tensor_;
DenseTensor* score_tensor_;
};

template <>
Expand Down
Loading