Skip to content

Commit

Permalink
[TensorV2] Completed integration of remaining functions from Tensor
Browse files Browse the repository at this point in the history
This commit integrated all remaining functions from Tensor class into TensorV2.
This includes fill(), setData(), setValueInt(), sin(), and cos().

Signed-off-by: Donghyeon Jeong <dhyeon.jeong@samsung.com>
  • Loading branch information
djeong20 authored and jijoongmoon committed Mar 14, 2024
1 parent 6b488d7 commit 6c8acb7
Show file tree
Hide file tree
Showing 6 changed files with 164 additions and 6 deletions.
18 changes: 18 additions & 0 deletions nntrainer/tensor/float_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -704,6 +704,24 @@ TensorV2 &FloatTensor::erf(TensorV2 &output) const {
return output;
}

void FloatTensor::sin(TensorV2 &out, float alpha) {
if (!contiguous) {
auto f = [alpha](float val) -> float { return std::sin(alpha * val); };
apply(f, out);
} else {
sine(size(), (float *)getData(), out.getData<float>(), alpha);
}
}

void FloatTensor::cos(TensorV2 &out, float alpha) {
if (!contiguous) {
auto f = [alpha](float val) -> float { return std::cos(alpha * val); };
apply(f, out);
} else {
cosine(size(), (float *)getData(), out.getData<float>(), alpha);
}
}

TensorV2 &FloatTensor::dot(TensorV2 const &input, TensorV2 &output, bool trans,
bool trans_in, float beta) const {
// Comment out with intension to support the calculation wrt. batch and height
Expand Down
10 changes: 10 additions & 0 deletions nntrainer/tensor/float_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -301,6 +301,16 @@ class FloatTensor : public TensorBase {
*/
TensorV2 &erf(TensorV2 &output) const override;

/**
* @copydoc TensorV2::sin(TensorV2 &out, float alpha)
*/
void sin(TensorV2 &out, float alpha) override;

/**
* @copydoc TensorV2::cos(TensorV2 &out, float alpha)
*/
void cos(TensorV2 &out, float alpha) override;

/**
* @copydoc TensorV2::dot(TensorV2 const &input, TensorV2 &output, bool
* trans, bool trans_in, float beta)
Expand Down
11 changes: 11 additions & 0 deletions nntrainer/tensor/tensor_base.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,17 @@ void TensorBase::putData() const {
data->invalidate();
}

void TensorBase::setMemoryData(const std::shared_ptr<MemoryData> buf,
size_t off) {
if (buf) {
data = buf;
offset = off;
} else {
data = nullptr;
offset = 0;
}
}

const std::shared_ptr<MemoryData> TensorBase::getMemoryData() const {
return data;
}
Expand Down
25 changes: 25 additions & 0 deletions nntrainer/tensor/tensor_base.h
Original file line number Diff line number Diff line change
Expand Up @@ -308,6 +308,24 @@ class TensorBase {
*/
virtual TensorV2 &erf(TensorV2 &output) const = 0;

/**
* @brief sin transform function
* @param[out] out out to store the result
*/
virtual void sin(TensorV2 &out, float alpha = 1.0) {
throw std::invalid_argument(
"Tensor::sin not supported in current tensor data type.");
}

/**
* @brief cos transform function
* @param[out] out out to store the result
*/
virtual void cos(TensorV2 &out, float alpha = 1.0) {
throw std::invalid_argument(
"Tensor::cos not supported in current tensor data type.");
}

/**
* @brief Dot Product of Tensor ( equal MxM )
* @details This applies dot of the last dimension of this and
Expand Down Expand Up @@ -411,6 +429,13 @@ class TensorBase {
*/
void putData() const;

/**
* @brief Set the memory buffer for the tensor
* @param buf the memory buffer
* @param off offset
*/
void setMemoryData(const std::shared_ptr<MemoryData> buf, size_t off);

/**
* @brief return Data pointer of Tensor
* @retval template T pointer (float pointer as default)
Expand Down
48 changes: 48 additions & 0 deletions nntrainer/tensor/tensor_v2.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -523,6 +523,20 @@ TensorV2 &TensorV2::erf(TensorV2 &output) const {
return output;
}

void TensorV2::sin(TensorV2 &out, float alpha) {
if (size() != out.size())
throw std::invalid_argument("Error: Size of out of Tensor::sin must match");

itensor->sin(out, alpha);
}

void TensorV2::cos(TensorV2 &out, float alpha) {
if (size() != out.size())
throw std::invalid_argument("Error: Size of out of Tensor::cos must match");

itensor->cos(out, alpha);
}

float TensorV2::l2norm() const { return itensor->l2norm(); }

void TensorV2::normalization_i() {
Expand Down Expand Up @@ -787,6 +801,15 @@ void TensorV2::print(std::ostream &out) const { itensor->print(out); }

void TensorV2::putData() const { itensor->putData(); }

void TensorV2::setData(const std::shared_ptr<MemoryData> buf, size_t off,
bool init) {
itensor->setMemoryData(buf, off);

if (buf && init) {
initialize();
}
}

const std::shared_ptr<MemoryData> TensorV2::getMemoryData() const {
return itensor->getMemoryData();
}
Expand Down Expand Up @@ -930,6 +953,31 @@ TensorV2 &TensorV2::transpose(const std::string &direction,

void TensorV2::reshape(const TensorDim &d) { itensor->reshape(d); }

void TensorV2::fill(const TensorV2 &from, bool allocate) {
if (allocate && this->empty()) {
this->copy(from);
return;
}

if (!from.getContiguous() || !getContiguous()) {
/// @todo enable this if needed
throw nntrainer::exception::not_supported(
"[Tensor::fill] non-contiguous tensors are not supported");
}

if (getDim() != from.getDim()) {
throw std::invalid_argument("[Tensor::fill] dimension must be the same");
}

if (getStrides() != from.getStrides()) {
/// @todo length does not represent buffer size, there should be way to
/// get the buffer size
throw std::invalid_argument("[Tensor::fill] buffer size must be the same");
}

copyData(from);
}

TensorDim TensorV2::getDim() const { return itensor->getDim(); }

TensorDim::TensorType TensorV2::getTensorType() const {
Expand Down
58 changes: 52 additions & 6 deletions nntrainer/tensor/tensor_v2.h
Original file line number Diff line number Diff line change
Expand Up @@ -295,29 +295,31 @@ class TensorV2 {
* @brief return Data pointer of TensorV2
* @retval template T pointer
*/
template <typename T> T *getData() const { return (T *)itensor->getData(); }
template <typename T = float> T *getData() const {
return (T *)itensor->getData();
}

/**
* @brief return Data pointer of TensorV2
* @retval template T pointer
*/
template <typename T> T *getData(size_t idx) const {
template <typename T = float> T *getData(size_t idx) const {
return (T *)itensor->getData(idx);
}

/**
* @brief i data index
* @retval template T pointer (address of ith data)
*/
template <typename T> T *getAddress(unsigned int i) {
template <typename T = float> T *getAddress(unsigned int i) {
return (T *)itensor->getAddress(i);
}

/**
* @brief i data index
* @retval template T pointer (address of ith data)
*/
template <typename T> const T *getAddress(unsigned int i) const {
template <typename T = float> const T *getAddress(unsigned int i) const {
return (T *)itensor->getAddress(i);
}

Expand Down Expand Up @@ -399,6 +401,18 @@ class TensorV2 {
void setValue(unsigned int b, unsigned int c, unsigned int h, unsigned int w,
float value);

/**
* @brief Set the element value
* @param[in] offset offset from start location
* @param[in] value value to be stored
*
* @todo This is a temporary workout. Remove this
*/
void setValueInt(unsigned int offset, int value) noexcept {
int *data_int = (int *)getData();
data_int[offset] = value;
}

/**
* @brief add the element value to the location
* @param[in] b batch location
Expand Down Expand Up @@ -918,6 +932,18 @@ class TensorV2 {
*/
TensorV2 &erf(TensorV2 &output) const;

/**
* @brief sin transform function
* @param[out] out out to store the result
*/
void sin(TensorV2 &out, float alpha = 1.0);

/**
* @brief cos transform function
* @param[out] out out to store the result
*/
void cos(TensorV2 &out, float alpha = 1.0);

/**
* @brief l2norm the Tensor elements
* @retval Calculated l2norm
Expand Down Expand Up @@ -1111,6 +1137,15 @@ class TensorV2 {
*/
void putData() const;

/**
* @brief Set the memory buffer for the tensor
*
* @param buf the memory buffer
* @param init intialize the buffer
*/
void setData(const std::shared_ptr<MemoryData> buf, size_t off = 0,
bool init = false);

/**
* @brief return Data pointer of Tensor
* @retval template T pointer (float pointer as default)
Expand Down Expand Up @@ -1219,6 +1254,17 @@ class TensorV2 {
*/
void reshape(const TensorDim &d);

/**
* @brief fill tensor data with current value,
* if dimension is not exactly same, it is a hard error in this function
* so, only stride is overriden to @a this
*
* @param from Tensor to fill the data from
* @param allocate if unallocated, allocate with from.getDim()
* @throws std::invalid_argument if dimension and stride does not match
*/
void fill(const TensorV2 &from, bool allocate = false);

/**
* @brief return a copy of the Tensor Dim
* @retval TensorDim
Expand Down Expand Up @@ -1385,8 +1431,8 @@ class TensorV2 {
* tensor.
*/
TensorV2 getSharedDataTensor(const TensorDim dim_, size_t offset,
bool reset_stride,
const std::string &name_) const;
bool reset_stride = true,
const std::string &name_ = "") const;

/**
* @brief Swaps Tensor lhs and rhs
Expand Down

0 comments on commit 6c8acb7

Please sign in to comment.