diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 31adbd5..eaf6a59 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -15,6 +15,7 @@ jobs: submodules: recursive - name: Install dependencies run: | + sudo apt-get update sudo apt-get install -y cmake ninja-build ccache scons - name: ccache uses: hendrikmuhs/ccache-action@v1.2 @@ -37,6 +38,7 @@ jobs: submodules: recursive - name: Install dependencies run: | + sudo apt-get update sudo apt-get install -y cmake ninja-build ccache scons - name: ccache uses: hendrikmuhs/ccache-action@v1.2 @@ -81,6 +83,7 @@ jobs: submodules: true - name: Install dependencies run: | + sudo apt-get update sudo apt-get install -y cmake ninja-build ccache gcovr lcov scons - uses: actions/checkout@v4 with: @@ -102,7 +105,7 @@ jobs: cmake --build build --parallel - name: Test run: | - build/bin/run_tests + build/test/run_test env: CTEST_OUTPUT_ON_FAILURE: 1 - name: Generate lcov Coverage Data diff --git a/CMakeLists.txt b/CMakeLists.txt index a8e8360..22307d4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,15 +1,30 @@ cmake_minimum_required(VERSION 3.20) +set(CMAKE_CXX_STANDARD 11) -project(cpp_template) - -include(cmake/configure.cmake) +set(ProjectName "itlab") +project(${ProjectName}) include_directories(include) enable_testing() -add_subdirectory(3rdparty) -add_subdirectory(app) -add_subdirectory(include) +include(FetchContent) +FetchContent_Declare( + googletest + GIT_REPOSITORY https://github.com/google/googletest.git + GIT_TAG v1.14.0 +) +FetchContent_MakeAvailable(googletest) + + +#add_subdirectory(3rdparty/googletest) add_subdirectory(src) add_subdirectory(test) + +# REPORT +message( STATUS "") +message( STATUS "General configuration for ${PROJECT_NAME}") +message( STATUS "======================================") +message( STATUS "") +message( STATUS " Configuration: ${CMAKE_BUILD_TYPE}") +message( STATUS "") \ No newline at end of file diff --git a/include/graph/graph.h b/include/graph/graph.h new file mode 100644 index 0000000..dacaafe --- /dev/null +++ b/include/graph/graph.h @@ -0,0 +1,41 @@ +#ifndef GRAPH_H +#define GRAPH_H + +#include +#include +#include + +#include "./layer/layer.h" +#include "./tensor/tensor.h" + +class Network { + private: + std::unordered_map layers_; + Tensor inputTensor_; + Tensor* outputTensor_; + int start_ = -1; + int end_ = -1; + bool bfs_helper(int start, int vert, bool flag, + std::vector* v_ord) const; + + public: + Network(); + + bool addLayer(Layer& lay, const std::vector& inputs = {}, + const std::vector& outputs = {}); + void addEdge(Layer& layPrev, Layer& layNext); + void removeEdge(Layer& layPrev, Layer& layNext); + void removeLayer(Layer& lay); + int getLayers() const; + int getEdges() const; + bool isEmpty() const; + bool hasPath(Layer& layPrev, Layer& layNext) const; + std::vector inference(int start) const; + void setInput(Layer& lay, Tensor& vec); + void setOutput(Layer& lay, Tensor& vec); + void run(); + std::vector getLayersTypeVector() const; + ~Network(); +}; + +#endif diff --git a/include/layer/ConcatenateLayer.h b/include/layer/ConcatenateLayer.h new file mode 100644 index 0000000..c3a428d --- /dev/null +++ b/include/layer/ConcatenateLayer.h @@ -0,0 +1,30 @@ +#ifndef CONCATENATE_LAYER_H +#define CONCATENATE_LAYER_H + +#include +#include + +#include "layer/layer.h" +#include "tensor/tensor.h" + +class ConcatenateLayerMock : public Layer { + private: + std::vector input_shapes_config_; + Shape output_shape_computed_; + unsigned int concatenation_axis_; + bool configured_ = false; + + public: + explicit ConcatenateLayerMock(int id); + + void configure(const std::vector& inputs_shapes, unsigned int axis, + Shape& output_shape_ref); + + void exec(const Tensor& input, Tensor& output) override; + + Shape get_output_shape() override; + + std::string get_type_name() const override; +}; + +#endif \ No newline at end of file diff --git a/include/layer/ConvLayer.h b/include/layer/ConvLayer.h new file mode 100644 index 0000000..b12b669 --- /dev/null +++ b/include/layer/ConvLayer.h @@ -0,0 +1,44 @@ +#ifndef CONV_LAYER_H +#define CONV_LAYER_H + +#include + +#include "layer/layer.h" +#include "tensor/tensor.h" + +struct ConvPadStrideInfo { + unsigned int stride_x{1}; + unsigned int stride_y{1}; + unsigned int pad_x{0}; + unsigned int pad_y{0}; + + ConvPadStrideInfo(unsigned int sx = 1, unsigned int sy = 1, + unsigned int px = 0, unsigned int py = 0) + : stride_x(sx), stride_y(sy), pad_x(px), pad_y(py) {} +}; + +class ConvolutionLayerMock : public Layer { + private: + ConvPadStrideInfo conv_info_; + Shape input_shape_config_; + Shape weights_shape_config_; + Shape biases_shape_config_; + Shape output_shape_computed_; + bool has_biases_ = false; + bool configured_ = false; + + public: + explicit ConvolutionLayerMock(int id); + + void configure(const Shape& input_s, const Shape& weights_s, + const Shape* biases_s, Shape& output_s_ref, + const ConvPadStrideInfo& info); + + void exec(const Tensor& input, Tensor& output) override; + + Shape get_output_shape() override; + + std::string get_type_name() const override; +}; + +#endif \ No newline at end of file diff --git a/include/layer/ElementwiseLayer.h b/include/layer/ElementwiseLayer.h new file mode 100644 index 0000000..ad97c95 --- /dev/null +++ b/include/layer/ElementwiseLayer.h @@ -0,0 +1,39 @@ +#ifndef ELEMENTWISE_LAYER_H +#define ELEMENTWISE_LAYER_H + +#include +#include + +#include "layer/layer.h" +#include "tensor/tensor.h" + +enum class ElementwiseOp : std::uint8_t { + kAdd, + kSub, + kMul, + kDiv, + kMax, + kMin, + kSquaredDiff +}; + +class ElementwiseLayerMock : public Layer { + private: + ElementwiseOp op_type_; + Shape common_shape_; + bool configured_ = false; + + public: + ElementwiseLayerMock(int id, ElementwiseOp op); + + void configure(const Shape& input1_shape, const Shape& input2_shape, + Shape& output_shape_ref); + + void exec(const Tensor& input, Tensor& output) override; + + Shape get_output_shape() override; + + std::string get_type_name() const override; +}; + +#endif \ No newline at end of file diff --git a/include/layer/MatMulLayer.h b/include/layer/MatMulLayer.h new file mode 100644 index 0000000..399035a --- /dev/null +++ b/include/layer/MatMulLayer.h @@ -0,0 +1,35 @@ +#ifndef MATMUL_LAYER_H +#define MATMUL_LAYER_H + +#include + +#include "layer/layer.h" +#include "tensor/tensor.h" + +struct MatMulInfo { + bool transpose_x{false}; + bool transpose_y{false}; +}; + +class MatMulLayerMock : public Layer { + private: + MatMulInfo matmul_info_; + Shape input_x_shape_; + Shape input_y_shape_; + Shape output_shape_; + bool configured_ = false; + + public: + MatMulLayerMock(int id, const MatMulInfo& info); + + void configure(const Shape& input_x_shape, const Shape& input_y_shape, + Shape& output_shape_ref); + + void exec(const Tensor& input_x, Tensor& output) override; + + Shape get_output_shape() override; + + std::string get_type_name() const override; +}; + +#endif \ No newline at end of file diff --git a/include/layer/PoolingLayer.h b/include/layer/PoolingLayer.h new file mode 100644 index 0000000..950a20c --- /dev/null +++ b/include/layer/PoolingLayer.h @@ -0,0 +1,45 @@ +#ifndef POOLING_LAYER_H +#define POOLING_LAYER_H + +#include +#include +#include + +#include "layer/layer.h" +#include "tensor/tensor.h" + +enum class PoolingType : std::uint8_t { kMax, kAvg, kL2 }; + +struct PoolingLayerInfo { + PoolingType pool_type{PoolingType::kMax}; + int pool_size_x{2}; + int pool_size_y{2}; + int stride_x{1}; + int stride_y{1}; + int pad_x{0}; + int pad_y{0}; + bool exclude_padding{true}; +}; + +class PoolingLayerMock : public Layer { + private: + PoolingLayerInfo pool_info_; + Shape input_shape_; + Shape output_shape_; + size_t h_in_idx_ = 0; + size_t w_in_idx_ = 0; + bool configured_ = false; + + public: + PoolingLayerMock(int id, const PoolingLayerInfo& info); + + void configure(const Shape& input_shape, Shape& output_shape_ref); + + void exec(const Tensor& input, Tensor& output) override; + + Shape get_output_shape() override; + + std::string get_type_name() const override; +}; + +#endif \ No newline at end of file diff --git a/include/layer/ReshapeLayer.h b/include/layer/ReshapeLayer.h new file mode 100644 index 0000000..e72addd --- /dev/null +++ b/include/layer/ReshapeLayer.h @@ -0,0 +1,28 @@ +#ifndef RESHAPE_LAYER_H +#define RESHAPE_LAYER_H + +#include + +#include "layer/layer.h" +#include "tensor/tensor.h" + +class ReshapeLayerMock : public Layer { + private: + Shape input_shape_config_; + Shape target_output_shape_config_; + bool configured_ = false; + + public: + explicit ReshapeLayerMock(int id); + + void configure(const Shape& input_shape, const Shape& target_output_shape, + Shape& output_shape_ref); + + void exec(const Tensor& input, Tensor& output) override; + + Shape get_output_shape() override; + + std::string get_type_name() const override; +}; + +#endif \ No newline at end of file diff --git a/include/layer/SliceLayer.h b/include/layer/SliceLayer.h new file mode 100644 index 0000000..81a4ade --- /dev/null +++ b/include/layer/SliceLayer.h @@ -0,0 +1,31 @@ +#ifndef SLICE_LAYER_H +#define SLICE_LAYER_H + +#include +#include + +#include "layer/layer.h" +#include "tensor/tensor.h" + +class SliceLayerMock : public Layer { + private: + Shape input_shape_config_; + Shape output_shape_computed_; + std::vector slice_starts_; + std::vector slice_sizes_; + bool configured_ = false; + + public: + explicit SliceLayerMock(int id); + + void configure(const Shape& input_shape, const std::vector& starts, + const std::vector& sizes, Shape& output_shape_ref); + + void exec(const Tensor& input, Tensor& output) override; + + Shape get_output_shape() override; + + std::string get_type_name() const override; +}; + +#endif \ No newline at end of file diff --git a/include/layer/SplitLayer.h b/include/layer/SplitLayer.h new file mode 100644 index 0000000..183079c --- /dev/null +++ b/include/layer/SplitLayer.h @@ -0,0 +1,33 @@ +#ifndef SPLIT_LAYER_H +#define SPLIT_LAYER_H + +#include +#include + +#include "layer/layer.h" +#include "tensor/tensor.h" + +class SplitLayerMock : public Layer { + private: + Shape input_shape_config_; + std::vector output_shapes_computed_; + unsigned int split_axis_; + unsigned int num_splits_; + bool configured_ = false; + + public: + explicit SplitLayerMock(int id); + + void configure(const Shape& input_shape, unsigned int axis, + unsigned int num_splits, Shape& first_output_shape_ref); + + void exec(const Tensor& input, Tensor& output) override; + + Shape get_output_shape() override; + + const std::vector& get_all_split_output_shapes() const; + + std::string get_type_name() const override; +}; + +#endif \ No newline at end of file diff --git a/include/layer/layer.h b/include/layer/layer.h new file mode 100644 index 0000000..f561824 --- /dev/null +++ b/include/layer/layer.h @@ -0,0 +1,32 @@ +#ifndef LAYER_H +#define LAYER_H + +#include +#include + +#include "./tensor/tensor.h" + +struct LayerAttributes { + int id = -1; +}; + +class Layer { + protected: + int id_; + + public: + Layer() = default; + explicit Layer(const LayerAttributes& attrs) : id_(attrs.id) {} + virtual ~Layer() = default; + void setID(int id) { id_ = id; } + int getID() const { return id_; } + virtual std::string getInfoString() const; + virtual void exec(const Tensor& input, Tensor& output) = 0; + virtual Shape get_output_shape() = 0; + + virtual std::string get_type_name() const = 0; + void addNeighbor(Layer* neighbor); + void removeNeighbor(Layer* neighbor); + std::list neighbors_; +}; +#endif \ No newline at end of file diff --git a/include/tensor/tensor.h b/include/tensor/tensor.h new file mode 100644 index 0000000..c4143d3 --- /dev/null +++ b/include/tensor/tensor.h @@ -0,0 +1,105 @@ +#ifndef TENSOR_H +#define TENSOR_H + +#include +#include +#include +#include +#include +#include + +struct Shape { + std::vector dimensions; + size_t total_elements; + + Shape() : total_elements(0) {} + Shape(std::vector dims); + size_t get_rank() const; +}; + +enum Layout : std::uint8_t { kNchw, kNhwc, kNd }; + +template +class Tensor { + public: + Shape shape; + Layout layout; + std::vector data; + + Tensor() : layout(Layout::kNd), data() {} + Tensor(const Shape &sh, Layout l = Layout::kNd); + Tensor(std::vector dims, Layout l = Layout::kNd); + size_t get_linear_index(const std::vector &indices) const; + T &at(const std::vector &indices); + const T &at(const std::vector &indices) const; +}; + +template +Tensor::Tensor(const Shape &sh, Layout l) + : shape(sh), layout(l), data(sh.total_elements) {} + +template +Tensor::Tensor(std::vector dims, Layout l) + : Tensor(Shape(std::move(dims)), l) {} + +template +size_t Tensor::get_linear_index(const std::vector &indices) const { + if (indices.size() != shape.get_rank()) { + throw std::runtime_error("Incorrect number of indices provided."); + } + for (size_t i = 0; i < indices.size(); ++i) { + if (indices[i] >= shape.dimensions[i]) { + std::string error_msg = "Index out of range for dimension "; + throw std::out_of_range(error_msg); + } + } + + size_t linear_index = 0; + size_t n = shape.get_rank(); + + if (n == 0) { + if (shape.total_elements == 1 && indices.empty()) { + return 0; + } + if (shape.total_elements == 0 && indices.empty()) { + return 0; + } + throw std::logic_error("Invalid access to rank-0 tensor or empty tensor."); + } + + if (n == 4 && layout == Layout::kNhwc) { + if (shape.dimensions.size() != 4) { + throw std::logic_error( + "kNhwc layout is specified for a tensor not of rank 4."); + } + + size_t c_dim = shape.dimensions[1]; + size_t h_dim = shape.dimensions[2]; + size_t w_dim = shape.dimensions[3]; + + linear_index = indices[0] * (h_dim * w_dim * c_dim) + + indices[2] * (w_dim * c_dim) + indices[3] * (c_dim) + + indices[1]; + } else { + for (size_t i = 0; i < n; ++i) { + size_t term_stride = 1; + for (size_t j = i + 1; j < n; ++j) { + term_stride *= shape.dimensions[j]; + } + linear_index += indices[i] * term_stride; + } + } + return linear_index; +} + +template +T &Tensor::at(const std::vector &indices) { + return data[get_linear_index(indices)]; +} + +template +const T &Tensor::at(const std::vector &indices) const { + return data[get_linear_index(indices)]; +} + +#endif \ No newline at end of file diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index e69de29..af77d7d 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -0,0 +1,7 @@ +file(GLOB_RECURSE HEADER_FILES "${CMAKE_SOURCE_DIR}/include/*.h") +file(GLOB_RECURSE SOURCE_FILES "${CMAKE_SOURCE_DIR}/src/*.cpp") + +add_library(${ProjectName} STATIC ${SOURCE_FILES} ${HEADER_FILES}) +target_sources(${ProjectName} PRIVATE ${HEADER_FILES}) + +target_include_directories(${ProjectName} PUBLIC ${CMAKE_SOURCE_DIR}/src) \ No newline at end of file diff --git a/src/graph/graph.cpp b/src/graph/graph.cpp new file mode 100644 index 0000000..06bd9f5 --- /dev/null +++ b/src/graph/graph.cpp @@ -0,0 +1,274 @@ +#include "./graph/graph.h" + +#include +#include +#include +#include +#include +#include + +#include "./layer/layer.h" +#include "./tensor/tensor.h" + +Network::Network() : outputTensor_(nullptr) {} + +bool Network::addLayer(Layer& lay, const std::vector& inputs, + const std::vector& outputs) { + if (layers_.find(lay.getID()) == layers_.end()) { + layers_[lay.getID()] = &lay; + + for (int input_layer_id : inputs) { + auto it = layers_.find(input_layer_id); + if (it != layers_.end()) { + Layer* prev_layer = it->second; + prev_layer->addNeighbor(&lay); + } + } + + for (int output_layer_id : outputs) { + auto it = layers_.find(output_layer_id); + if (it != layers_.end()) { + Layer* next_layer = it->second; + lay.addNeighbor(next_layer); + } + } + return true; + } + return false; +} + +void Network::addEdge(Layer& layPrev, Layer& layNext) { + if (layPrev.getID() == layNext.getID()) { + throw std::invalid_argument("Cannot add edge from a layer to itself."); + } + if (layers_.find(layPrev.getID()) == layers_.end()) { + addLayer(layPrev, {}, {}); + } + if (layers_.find(layNext.getID()) == layers_.end()) { + addLayer(layNext, {}, {}); + } + layPrev.addNeighbor(&layNext); +} + +void Network::removeEdge(Layer& layPrev, Layer& layNext) { + if (layers_.find(layPrev.getID()) != layers_.end()) { + layPrev.removeNeighbor(&layNext); + } +} + +void Network::removeLayer(Layer& lay) { + int layer_id = lay.getID(); + + if (layers_.find(layer_id) == layers_.end()) { + return; + } + + for (auto& pair : layers_) { + pair.second->removeNeighbor(&lay); + } + + auto it = layers_.find(layer_id); + if (it != layers_.end()) { + layers_.erase(it); + } + + if (start_ == layer_id) { + start_ = -1; + } + if (end_ == layer_id) { + end_ = -1; + } +} + +int Network::getLayers() const { return static_cast(layers_.size()); } + +int Network::getEdges() const { + int count = 0; + for (const auto& layer : layers_) { + count += layer.second->neighbors_.size(); + } + return count; +} + +bool Network::isEmpty() const { return layers_.empty(); } + +bool Network::bfs_helper(int start, int vert, bool flag, + std::vector* v_ord) const { + std::unordered_map visited; + std::queue queue; + + queue.push(start); + visited[start] = true; + + while (!queue.empty()) { + int current = queue.front(); + queue.pop(); + + if (flag && current == vert) { + return true; + } + + if (v_ord != nullptr) { + v_ord->push_back(current); + } + + if (layers_.count(current) > 0) { + Layer* current_layer = layers_.at(current); + + for (Layer* neighbor : current_layer->neighbors_) { + if (visited.find(neighbor->getID()) == visited.end()) { + visited[neighbor->getID()] = true; + queue.push(neighbor->getID()); + } + } + } + } + + return false; +} + +bool Network::hasPath(Layer& layPrev, Layer& layNext) const { + if (layers_.find(layPrev.getID()) == layers_.end() || + layers_.find(layNext.getID()) == layers_.end()) { + return false; + } + return bfs_helper(layPrev.getID(), layNext.getID(), true, nullptr); +} + +std::vector Network::inference(int start) const { + std::vector v_ord; + bfs_helper(start, -1, false, &v_ord); + return v_ord; +} + +void Network::setInput(Layer& lay, Tensor& vec) { + if (start_ != -1) { + throw std::runtime_error("Input layer already set."); + } + if (!layers_.empty()) { + addLayer(lay); + } + inputTensor_ = vec; + start_ = lay.getID(); +} + +void Network::setOutput(Layer& lay, Tensor& vec) { + if (end_ != -1) { + throw std::runtime_error("Output layer already set."); + } + + if (layers_.find(lay.getID()) == layers_.end()) { + addLayer(lay); + } + + end_ = lay.getID(); + outputTensor_ = &vec; +} + +void Network::run() { + if (start_ == -1 || end_ == -1) { + throw std::runtime_error("Input or output layer not set."); + } + + std::vector path = inference(start_); + + bool end_in_path = false; + for (int layer_id : path) { + if (layer_id == end_) { + end_in_path = true; + break; + } + } + if (path.empty() || !end_in_path) { + throw std::runtime_error( + "No path from start to end layer found, or traversal is empty."); + } + + Tensor curr_tensor = inputTensor_; + + std::unordered_map> layer_outputs; + layer_outputs[start_] = inputTensor_; + + bool on_path = false; + + for (int layer_id : path) { + if (layers_.find(layer_id) == layers_.end()) { + throw std::runtime_error( + "Layer_id from BFS traversal not found in graph."); + } + Layer* curr_layer_ptr = layers_.at(layer_id); + if (!curr_layer_ptr) { + throw std::runtime_error("Layer with ID is null."); + } + + Tensor curr_input({0}); + + if (layer_id == start_) { + curr_input = inputTensor_; + on_path = true; + } else if (on_path) { + curr_input = curr_tensor; + } else { + continue; + } + + Tensor temp_tensor(curr_layer_ptr->get_output_shape()); + curr_layer_ptr->exec(curr_input, temp_tensor); + curr_tensor = temp_tensor; + + if (layer_id == end_) { + if (outputTensor_ == nullptr) { + throw std::runtime_error("Output tensor pointer is not set."); + } + *outputTensor_ = curr_tensor; + } + } +} + +std::vector Network::getLayersTypeVector() const { + std::vector layer_types_vector; + + if (start_ == -1) { + layer_types_vector.emplace_back( + "Error: Input layer (start_ ID) has not been set via setInput()."); + return layer_types_vector; + } + + if (layers_.find(start_) == layers_.end()) { + layer_types_vector.emplace_back( + "Error: Start layer with ID not found in the graph's layers map."); + return layer_types_vector; + } + + std::vector traversal_order = inference(start_); + + if (traversal_order.empty()) { + if (layers_.count(start_)) { + layer_types_vector.emplace_back( + "Warning: BFS traversal from start layer ID yielded no layers (or " + "only the start layer was expected)."); + layer_types_vector.emplace_back("Start layer type: " + + layers_.at(start_)->get_type_name()); + } else { + layer_types_vector.emplace_back( + "Error: BFS traversal from start layer ID failed, and start layer " + "itself is not in graph."); + } + return layer_types_vector; + } + + for (int layer_id : traversal_order) { + auto it = layers_.find(layer_id); + if (it == layers_.end()) { + layer_types_vector.emplace_back( + "Error: Layer ID from BFS traversal not found in graph's layers map"); + continue; + } + + Layer* current_layer = it->second; + layer_types_vector.push_back(current_layer->get_type_name()); + } + return layer_types_vector; +} + +Network::~Network() = default; \ No newline at end of file diff --git a/src/layer/ConcatenateLayer.cpp b/src/layer/ConcatenateLayer.cpp new file mode 100644 index 0000000..d910be7 --- /dev/null +++ b/src/layer/ConcatenateLayer.cpp @@ -0,0 +1,79 @@ +#include "./layer/ConcatenateLayer.h" + +#include +#include +#include +#include +#include + +#include "tensor/tensor.h" + +ConcatenateLayerMock::ConcatenateLayerMock(int id) { setID(id); } + +void ConcatenateLayerMock::configure(const std::vector& inputs_shapes, + unsigned int axis, + Shape& output_shape_ref) { + if (inputs_shapes.empty()) { + throw std::runtime_error("ConcatMock: Input shapes list cannot be empty."); + } + + const Shape& first_shape = inputs_shapes[0]; + if (axis >= first_shape.get_rank()) { + throw std::runtime_error( + "ConcatMock: Concatenation axis is out of bounds."); + } + + size_t rank = first_shape.get_rank(); + size_t concatenated_dim_size = 0; + + for (const auto& shape : inputs_shapes) { + if (shape.get_rank() != rank) { + throw std::runtime_error( + "ConcatMock: All input tensors must have the same rank."); + } + for (unsigned int i = 0; i < rank; ++i) { + if (i == axis) { + concatenated_dim_size += shape.dimensions[i]; + } else { + if (shape.dimensions[i] != first_shape.dimensions[i]) { + throw std::runtime_error( + "ConcatMock: Input tensor dimensions must match along " + "non-concatenation axes."); + } + } + } + } + + std::vector output_dims = first_shape.dimensions; + output_dims[axis] = concatenated_dim_size; + + input_shapes_config_ = inputs_shapes; + concatenation_axis_ = axis; + output_shape_computed_ = Shape(output_dims); + output_shape_ref = output_shape_computed_; + + configured_ = true; +} + +void ConcatenateLayerMock::exec(const Tensor&, Tensor& output) { + if (!configured_) { + throw std::logic_error("Concatenate: Not yet implemented"); + } + if (output.shape.dimensions != output_shape_computed_.dimensions) { + throw std::runtime_error( + "ACLConcatenateLayerMock: Output shape mismatch with computed shape."); + } + std::fill(output.data.begin(), output.data.end(), + static_cast(getID()) + 0.6); +} + +Shape ConcatenateLayerMock::get_output_shape() { + if (!configured_) { + throw std::logic_error("Concatenate: Not yet implemented"); + } + return output_shape_computed_; +} + +std::string ConcatenateLayerMock::get_type_name() const { + return "ConcatenateLayerMock"; +} \ No newline at end of file diff --git a/src/layer/ConvLayer.cpp b/src/layer/ConvLayer.cpp new file mode 100644 index 0000000..fce3f71 --- /dev/null +++ b/src/layer/ConvLayer.cpp @@ -0,0 +1,122 @@ +#include "./layer/ConvLayer.h" + +#include +#include +#include +#include +#include + +#include "tensor/tensor.h" + +ConvolutionLayerMock::ConvolutionLayerMock(int id) { setID(id); } + +void ConvolutionLayerMock::configure(const Shape& input_s, + const Shape& weights_s, + const Shape* biases_s, Shape& output_s_ref, + const ConvPadStrideInfo& info) { + input_shape_config_ = input_s; + weights_shape_config_ = weights_s; + conv_info_ = info; + + has_biases_ = static_cast(biases_s); + + if (input_s.get_rank() < 3) { + throw std::runtime_error( + "ConvMockSimp: Input rank must be at least 3 (W, H, C)."); + } + if (weights_s.get_rank() != 4) { + throw std::runtime_error( + "ConvMockSimp: Weights rank must be 4 (KW, KH, IC, OC)."); + } + + size_t c_in; + size_t h_in; + size_t w_in; + + if (input_s.get_rank() == 4) { + c_in = input_s.dimensions[1]; + h_in = input_s.dimensions[2]; + w_in = input_s.dimensions[3]; + } else { + w_in = input_s.dimensions[0]; + h_in = input_s.dimensions[1]; + c_in = input_s.dimensions[2]; + } + + size_t kw = weights_s.dimensions[0]; + size_t kh = weights_s.dimensions[1]; + size_t ic_w = weights_s.dimensions[2]; + size_t oc_w = weights_s.dimensions[3]; + + if (c_in != ic_w) { + throw std::runtime_error( + "ConvMockSimp: Input channels mismatch with weights input channels."); + } + + if (has_biases_) { + biases_shape_config_ = *biases_s; + if (biases_shape_config_.get_rank() != 1 || + biases_shape_config_.dimensions[0] != oc_w) { + throw std::runtime_error( + "ConvMockSimp: Biases must be 1D and size must match output " + "channels"); + } + } + + size_t effective_kernel_w = kw; + size_t effective_kernel_h = kh; + + if (h_in + 2 * conv_info_.pad_y < effective_kernel_h || + w_in + 2 * conv_info_.pad_x < effective_kernel_w) { + throw std::runtime_error( + "ConvMockSimp: Kernel size is larger than padded input dimensions."); + } + + size_t w_out = ((w_in + 2 * conv_info_.pad_x - effective_kernel_w) / + conv_info_.stride_x) + + 1; + size_t h_out = ((h_in + 2 * conv_info_.pad_y - effective_kernel_h) / + conv_info_.stride_y) + + 1; + + std::vector output_dims = {w_out, h_out, oc_w}; + if (input_s.get_rank() > 3) { + output_dims.push_back(input_s.dimensions[0]); + } + + output_shape_computed_ = Shape(output_dims); + output_s_ref = output_shape_computed_; + configured_ = true; +} + +void ConvolutionLayerMock::exec(const Tensor& input, + Tensor& output) { + if (!configured_) { + throw std::runtime_error("ConvolutionLayerMock: Not yet implemented."); + } + if (input.shape.dimensions != input_shape_config_.dimensions) { + throw std::runtime_error( + "ConvolutionLayerMock: Input shape mismatch with configured shape."); + } + if (output.shape.dimensions != output_shape_computed_.dimensions) { + throw std::runtime_error( + "ConvolutionLayerMock: Output shape mismatch with computed shape."); + } + + double fill_value = static_cast(getID()) + 0.5; + if (has_biases_) { + fill_value += 0.01; + } + std::fill(output.data.begin(), output.data.end(), fill_value); +} + +Shape ConvolutionLayerMock::get_output_shape() { + if (!configured_) { + throw std::logic_error("ConvLayer: Not yet implemented"); + } + return output_shape_computed_; +} + +std::string ConvolutionLayerMock::get_type_name() const { + return "ConvolutionLayerMock"; +} \ No newline at end of file diff --git a/src/layer/ElementwiseLayer.cpp b/src/layer/ElementwiseLayer.cpp new file mode 100644 index 0000000..b9dcbcc --- /dev/null +++ b/src/layer/ElementwiseLayer.cpp @@ -0,0 +1,104 @@ +#include "./layer/ElementwiseLayer.h" + +#include +#include +#include + +#include "tensor/tensor.h" + +ElementwiseLayerMock::ElementwiseLayerMock(int id, ElementwiseOp op) + : op_type_(op) { + setID(id); +} + +void ElementwiseLayerMock::configure(const Shape& input1_shape, + const Shape& input2_shape, + Shape& output_shape_ref) { + if (input1_shape.dimensions != input2_shape.dimensions || + input1_shape.total_elements != input2_shape.total_elements) { + throw std::runtime_error( + "ElementwiseMock: Input shapes must match for this mock."); + } + common_shape_ = input1_shape; + output_shape_ref = common_shape_; + configured_ = true; +} + +void ElementwiseLayerMock::exec(const Tensor& input, + Tensor& output) { + if (!configured_) { + throw std::logic_error("Elementwise: Not yet implemented"); + } + if (input.shape.dimensions != common_shape_.dimensions || + input.shape.total_elements != common_shape_.total_elements) { + throw std::runtime_error( + "ElementwiseLayerMock: Input shape mismatch in exec."); + } + if (output.shape.dimensions != common_shape_.dimensions || + output.shape.total_elements != common_shape_.total_elements) { + throw std::runtime_error( + "ElementwiseLayerMock: Output shape mismatch in exec."); + } + + double fill_value_offset = 0.0; + switch (op_type_) { + case ElementwiseOp::kAdd: + fill_value_offset = 10.0; + break; + case ElementwiseOp::kMul: + fill_value_offset = 20.0; + break; + case ElementwiseOp::kMax: + fill_value_offset = 30.0; + break; + case ElementwiseOp::kMin: + fill_value_offset = -10.0; + break; + case ElementwiseOp::kSquaredDiff: + fill_value_offset = 5.0; + break; + default: + fill_value_offset = 1.0; + break; + } + std::fill(output.data.begin(), output.data.end(), + static_cast(getID()) + fill_value_offset + 0.3); +} + +Shape ElementwiseLayerMock::get_output_shape() { + if (!configured_) { + throw std::logic_error("Elementwise: Not yet implemented"); + } + return common_shape_; +} + +std::string ElementwiseLayerMock::get_type_name() const { + std::string op_name; + switch (op_type_) { + case ElementwiseOp::kAdd: + op_name = "Add"; + break; + case ElementwiseOp::kSub: + op_name = "Sub"; + break; + case ElementwiseOp::kMul: + op_name = "Mul"; + break; + case ElementwiseOp::kDiv: + op_name = "Div"; + break; + case ElementwiseOp::kMax: + op_name = "Max"; + break; + case ElementwiseOp::kMin: + op_name = "Min"; + break; + case ElementwiseOp::kSquaredDiff: + op_name = "SquaredDiff"; + break; + default: + op_name = "UnknownOp"; + break; + } + return "Elementwise" + op_name + "LayerMock"; +} \ No newline at end of file diff --git a/src/layer/MatMulLayer.cpp b/src/layer/MatMulLayer.cpp new file mode 100644 index 0000000..99486eb --- /dev/null +++ b/src/layer/MatMulLayer.cpp @@ -0,0 +1,75 @@ +#include "./layer/MatMulLayer.h" + +#include +#include +#include +#include + +#include "tensor/tensor.h" + +MatMulLayerMock::MatMulLayerMock(int id, const MatMulInfo& info) + : matmul_info_(info) { + setID(id); +} + +void MatMulLayerMock::configure(const Shape& input_x_shape, + const Shape& input_y_shape, + Shape& output_shape_ref) { + size_t m; + size_t k_x; + size_t k_y; + size_t n; + + if (input_x_shape.get_rank() != 2 || input_y_shape.get_rank() != 2) { + throw std::runtime_error( + "MatMulMock: Inputs must be 2D tensors for this mock."); + } + + m = matmul_info_.transpose_x ? input_x_shape.dimensions[1] + : input_x_shape.dimensions[0]; + k_x = matmul_info_.transpose_x ? input_x_shape.dimensions[0] + : input_x_shape.dimensions[1]; + + k_y = matmul_info_.transpose_y ? input_y_shape.dimensions[1] + : input_y_shape.dimensions[0]; + n = matmul_info_.transpose_y ? input_y_shape.dimensions[0] + : input_y_shape.dimensions[1]; + + if (k_x != k_y) { + throw std::runtime_error( + "MatMulMock: Inner dimensions do not match for matrix multiplication."); + } + + input_x_shape_ = input_x_shape; + input_y_shape_ = input_y_shape; + output_shape_ = Shape({m, n}); + output_shape_ref = output_shape_; + + configured_ = true; +} + +void MatMulLayerMock::exec(const Tensor& input_x, + Tensor& output) { + if (!configured_) { + throw std::runtime_error("MatMulLayerMock: Not yet implemented"); + } + if (input_x.shape.dimensions != input_x_shape_.dimensions) { + throw std::runtime_error( + "MatMulLayerMock: Input X shape mismatch in exec."); + } + if (output.shape.dimensions != output_shape_.dimensions || + output.shape.total_elements != output_shape_.total_elements) { + throw std::runtime_error("MatMulLayerMock: Output shape mismatch in exec."); + } + std::fill(output.data.begin(), output.data.end(), + static_cast(getID()) + 0.1); +} + +Shape MatMulLayerMock::get_output_shape() { + if (!configured_) { + throw std::logic_error("MatMul: Not yet implemented"); + } + return output_shape_; +} + +std::string MatMulLayerMock::get_type_name() const { return "MatMulLayerMock"; } \ No newline at end of file diff --git a/src/layer/PoolingLayer.cpp b/src/layer/PoolingLayer.cpp new file mode 100644 index 0000000..78233f1 --- /dev/null +++ b/src/layer/PoolingLayer.cpp @@ -0,0 +1,86 @@ +#include "./layer/PoolingLayer.h" + +#include +#include +#include +#include +#include + +#include "tensor/tensor.h" + +PoolingLayerMock::PoolingLayerMock(int id, const PoolingLayerInfo& info) + : pool_info_(info) { + setID(id); +} + +void PoolingLayerMock::configure(const Shape& input_shape, + Shape& output_shape_ref) { + if (input_shape.get_rank() != 4) { + throw std::runtime_error( + "PoolingMock: Input must be a 4D tensor (e.g., NCHW or NHWC) for this " + "mock."); + } + h_in_idx_ = input_shape.get_rank() - 2; + w_in_idx_ = input_shape.get_rank() - 1; + + size_t h_in = input_shape.dimensions[h_in_idx_]; + size_t w_in = input_shape.dimensions[w_in_idx_]; + + size_t h_out = ((h_in + 2 * pool_info_.pad_y - pool_info_.pool_size_y) / + pool_info_.stride_y) + + 1; + size_t w_out = ((w_in + 2 * pool_info_.pad_x - pool_info_.pool_size_x) / + pool_info_.stride_x) + + 1; + + input_shape_ = input_shape; + + std::vector output_dims = input_shape.dimensions; + output_dims[h_in_idx_] = h_out; + output_dims[w_in_idx_] = w_out; + output_shape_ = Shape(output_dims); + + output_shape_ref = output_shape_; + configured_ = true; +} + +void PoolingLayerMock::exec(const Tensor& input, + Tensor& output) { + if (!configured_) { + throw std::runtime_error("Pool Layer: Not yet implemented"); + } + if (input.shape.dimensions != input_shape_.dimensions) { + throw std::runtime_error("PoolingLayerMock: Input shape mismatch in exec."); + } + if (output.shape.dimensions != output_shape_.dimensions || + output.shape.total_elements != output_shape_.total_elements) { + throw std::runtime_error( + "PoolingLayerMock: Output shape mismatch in exec."); + } + + double fill_value = 0.0; + switch (pool_info_.pool_type) { + case PoolingType::kMax: + fill_value = 1.0; + break; + case PoolingType::kAvg: + fill_value = 0.5; + break; + case PoolingType::kL2: + fill_value = 0.7; + break; + } + std::fill(output.data.begin(), output.data.end(), + static_cast(getID()) + fill_value + 0.2); +} + +Shape PoolingLayerMock::get_output_shape() { + if (!configured_) { + throw std::logic_error("Pool Layer: Not yet implemented"); + } + return output_shape_; +} + +std::string PoolingLayerMock::get_type_name() const { + return "PoolingLayerMock"; +} \ No newline at end of file diff --git a/src/layer/ReshapeLayer.cpp b/src/layer/ReshapeLayer.cpp new file mode 100644 index 0000000..1aa5512 --- /dev/null +++ b/src/layer/ReshapeLayer.cpp @@ -0,0 +1,65 @@ +#include "layer/ReshapeLayer.h" + +#include +#include +#include +#include + +#include "tensor/tensor.h" + +ReshapeLayerMock::ReshapeLayerMock(int id) { setID(id); } + +void ReshapeLayerMock::configure(const Shape& input_shape, + const Shape& target_output_shape, + Shape& output_shape_ref) { + if (input_shape.total_elements != target_output_shape.total_elements) { + throw std::runtime_error( + "ReshapeMock: Total number of elements must remain the same for " + "reshape."); + } + for (size_t dim_size : target_output_shape.dimensions) { + if (dim_size == 0 && target_output_shape.total_elements != 0) { + throw std::runtime_error( + "ReshapeMock: Target output shape dimension cannot be zero if " + "total elements is not zero."); + } + } + + input_shape_config_ = input_shape; + target_output_shape_config_ = target_output_shape; + output_shape_ref = target_output_shape_config_; + + configured_ = true; +} + +void ReshapeLayerMock::exec(const Tensor& input, + Tensor& output) { + if (!configured_) { + throw std::runtime_error("Reshape Layer: Not yet implemented."); + } + if (input.shape.dimensions != input_shape_config_.dimensions) { + throw std::runtime_error( + "ReshapeLayerMock: Input shape mismatch with configured shape."); + } + if (output.shape.dimensions != target_output_shape_config_.dimensions) { + throw std::runtime_error( + "ReshapeLayerMock: Output shape mismatch with target shape."); + } + if (input.data.size() != output.data.size()) { + throw std::runtime_error( + "ReshapeLayerMock: Input and output data buffer sizes mismatch."); + } + std::fill(output.data.begin(), output.data.end(), + static_cast(getID()) + 0.9); +} + +Shape ReshapeLayerMock::get_output_shape() { + if (!configured_) { + throw std::logic_error("Reshape Layer: Not yet implemented"); + } + return target_output_shape_config_; +} + +std::string ReshapeLayerMock::get_type_name() const { + return "ReshapeLayerMock"; +} \ No newline at end of file diff --git a/src/layer/SliceLayer.cpp b/src/layer/SliceLayer.cpp new file mode 100644 index 0000000..aaaa9ea --- /dev/null +++ b/src/layer/SliceLayer.cpp @@ -0,0 +1,86 @@ +#include "layer/SliceLayer.h" + +#include +#include +#include +#include +#include + +#include "tensor/tensor.h" + +SliceLayerMock::SliceLayerMock(int id) { setID(id); } + +void SliceLayerMock::configure(const Shape& input_shape, + const std::vector& starts, + const std::vector& sizes, + Shape& output_shape_ref) { + size_t rank = input_shape.get_rank(); + if (starts.size() != rank || sizes.size() != rank) { + throw std::runtime_error( + "SliceMock: 'starts' and 'sizes' vectors must match input rank."); + } + + std::vector output_dims(rank); + for (size_t i = 0; i < rank; ++i) { + if (starts[i] < 0 || + static_cast(starts[i]) >= input_shape.dimensions[i]) { + throw std::runtime_error( + "SliceMock: Start coordinate out of bounds for axis."); + } + + size_t current_size; + if (sizes[i] == -1) { + current_size = input_shape.dimensions[i] - static_cast(starts[i]); + } else if (sizes[i] < 0) { + throw std::runtime_error( + "SliceMock: Size cannot be negative (unless -1 for 'to end') for " + "axis."); + } else { + current_size = static_cast(sizes[i]); + } + + if (static_cast(starts[i]) + current_size > + input_shape.dimensions[i]) { + throw std::runtime_error( + "SliceMock: Slice (start + size) exceeds dimension for axis."); + } + if (current_size == 0) { + throw std::runtime_error( + "SliceMock: Slice size cannot be zero for axis."); + } + output_dims[i] = current_size; + } + input_shape_config_ = input_shape; + slice_starts_ = starts; + slice_sizes_ = sizes; + output_shape_computed_ = Shape(output_dims); + output_shape_ref = output_shape_computed_; + + configured_ = true; +} + +void SliceLayerMock::exec(const Tensor& input, Tensor& output) { + if (!configured_) { + throw std::logic_error("Slice Layer: Not yet implemented"); + } + if (input.shape.dimensions != input_shape_config_.dimensions) { + throw std::runtime_error( + "SliceLayerMock: Input shape mismatch with configured shape."); + } + if (output.shape.dimensions != output_shape_computed_.dimensions) { + throw std::runtime_error( + "SliceLayerMock: Output shape mismatch with computed shape."); + } + + std::fill(output.data.begin(), output.data.end(), + static_cast(getID()) + 0.8); +} + +Shape SliceLayerMock::get_output_shape() { + if (!configured_) { + throw std::logic_error("Slice Layer: Not yet implemented"); + } + return output_shape_computed_; +} + +std::string SliceLayerMock::get_type_name() const { return "SliceLayerMock"; } \ No newline at end of file diff --git a/src/layer/SplitLayer.cpp b/src/layer/SplitLayer.cpp new file mode 100644 index 0000000..db12259 --- /dev/null +++ b/src/layer/SplitLayer.cpp @@ -0,0 +1,80 @@ +#include "layer/SplitLayer.h" + +#include +#include +#include +#include +#include + +#include "tensor/tensor.h" + +SplitLayerMock::SplitLayerMock(int id) { setID(id); } + +void SplitLayerMock::configure(const Shape& input_shape, unsigned int axis, + unsigned int num_splits, + Shape& first_output_shape_ref) { + if (num_splits == 0) { + throw std::runtime_error("SplitMock: Number of splits cannot be zero."); + } + if (axis >= input_shape.get_rank()) { + throw std::runtime_error("SplitMock: Split axis is out of bounds."); + } + if (input_shape.dimensions[axis] % num_splits != 0) { + throw std::runtime_error( + "SplitMock: Dimension size along split axis must be divisible by " + "num_splits."); + } + + input_shape_config_ = input_shape; + split_axis_ = axis; + num_splits_ = num_splits; + output_shapes_computed_.clear(); + + size_t split_dim_size = input_shape.dimensions[axis] / num_splits; + for (unsigned int i = 0; i < num_splits; ++i) { + std::vector part_dims = input_shape.dimensions; + part_dims[axis] = split_dim_size; + output_shapes_computed_.emplace_back(part_dims); + } + + if (!output_shapes_computed_.empty()) { + first_output_shape_ref = output_shapes_computed_[0]; + } + + configured_ = true; +} + +void SplitLayerMock::exec(const Tensor& input, Tensor& output) { + if (!configured_) { + throw std::logic_error("Split Layer: Not yet implemented"); + } + if (input.shape.dimensions != input_shape_config_.dimensions) { + throw std::runtime_error( + "SplitLayerMock: Input shape mismatch with configured shape."); + } + if (output_shapes_computed_.empty() || + output.shape.dimensions != output_shapes_computed_[0].dimensions) { + throw std::runtime_error( + "SplitLayerMock: Output shape must match the shape of the first split " + "part."); + } + + std::fill(output.data.begin(), output.data.end(), + static_cast(getID()) + 0.7); +} + +Shape SplitLayerMock::get_output_shape() { + if (!configured_ || output_shapes_computed_.empty()) { + throw std::logic_error("SplitLayerMock: Not yet implemented."); + } + return output_shapes_computed_[0]; +} + +const std::vector& SplitLayerMock::get_all_split_output_shapes() const { + if (!configured_) { + throw std::logic_error("Split Layer: Not yet implemented"); + } + return output_shapes_computed_; +} + +std::string SplitLayerMock::get_type_name() const { return "SplitLayerMock"; } \ No newline at end of file diff --git a/src/layer/layer.cpp b/src/layer/layer.cpp new file mode 100644 index 0000000..d12a40a --- /dev/null +++ b/src/layer/layer.cpp @@ -0,0 +1,15 @@ +#include "./layer/layer.h" + +#include + +void Layer::addNeighbor(Layer* neighbor) { + if (neighbor != nullptr) { + neighbors_.push_back(neighbor); + } +} + +void Layer::removeNeighbor(Layer* neighbor) { neighbors_.remove(neighbor); } + +std::string Layer::getInfoString() const { + return "Layer (ID: " + std::to_string(id_) + ")"; +} \ No newline at end of file diff --git a/src/tensor/tensor.cpp b/src/tensor/tensor.cpp new file mode 100644 index 0000000..01a6bfd --- /dev/null +++ b/src/tensor/tensor.cpp @@ -0,0 +1,18 @@ +#include "./tensor/tensor.h" + +#include +#include +#include +#include + +Shape::Shape(std::vector dims) : dimensions(std::move(dims)) { + if (this->dimensions.empty()) { + total_elements = 1; + } else { + total_elements = std::accumulate(dimensions.begin(), dimensions.end(), + static_cast(1), + [](size_t a, size_t b) { return a * b; }); + } +} + +size_t Shape::get_rank() const { return dimensions.size(); } \ No newline at end of file diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 9dada0c..d950884 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -1,6 +1,12 @@ -file(GLOB_RECURSE TEST_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp) -add_executable(run_tests ${TEST_SRC_FILES}) -target_link_libraries(run_tests PUBLIC - gtest_main -) +file(GLOB_RECURSE TEST_FILES ./*.cpp) + +set(TestsName "run_test") + +add_executable(${TestsName} ${TEST_FILES}) + +#target_link_libraries(${TestsName} PRIVATE ${ProjectName} gtest) +target_link_libraries(${TestsName} PRIVATE ${ProjectName} GTest::gtest GTest::gtest_main) + +enable_testing() +add_test(NAME ${TestsName} COMMAND ${TestsName}) \ No newline at end of file diff --git a/test/graph/test_graph.cpp b/test/graph/test_graph.cpp new file mode 100644 index 0000000..f0a4375 --- /dev/null +++ b/test/graph/test_graph.cpp @@ -0,0 +1,291 @@ +#include +#include +#include +#include +#include +#include + +#include "./graph/graph.h" +#include "./layer/ConvLayer.h" +#include "./layer/PoolingLayer.h" +#include "./tensor/tensor.h" +#include "gtest/gtest.h" + +TEST(NetworkTest, IsEmpty_InitiallyTrue) { + Network network; + + EXPECT_TRUE(network.isEmpty()); + EXPECT_EQ(network.getLayers(), 0); + EXPECT_EQ(network.getEdges(), 0); +} + +TEST(NetworkTest, AddLayer_IncrementsLayerCount) { + Network network; + ConvolutionLayerMock conv1(1); + + network.addLayer(conv1); + + EXPECT_FALSE(network.isEmpty()); + EXPECT_EQ(network.getLayers(), 1); +} + +TEST(NetworkTest, AddExistingLayer_DoesNotIncrementCount) { + Network network; + ConvolutionLayerMock conv1(1); + + network.addLayer(conv1); + network.addLayer(conv1); + + EXPECT_EQ(network.getLayers(), 1); +} + +TEST(NetworkTest, AddEdge_IncrementsEdgeCount) { + Network network; + ConvolutionLayerMock conv1(1); + ConvolutionLayerMock conv2(2); + + network.addLayer(conv1); + network.addLayer(conv2); + network.addEdge(conv1, conv2); + + EXPECT_EQ(network.getEdges(), 1); +} + +TEST(NetworkTest, AddEdge_LayersNotInGraph_AddsThem) { + Network network; + ConvolutionLayerMock conv1(1); + ConvolutionLayerMock conv2(2); + + network.addEdge(conv1, conv2); + + EXPECT_EQ(network.getLayers(), 2); + EXPECT_EQ(network.getEdges(), 1); +} + +TEST(NetworkTest, RemoveLayer_DecrementsCountsAndRemovesEdges) { + Network network; + ConvolutionLayerMock conv1(1); + ConvolutionLayerMock conv2(2); + ConvolutionLayerMock conv3(3); + + network.addEdge(conv1, conv2); + network.addEdge(conv2, conv3); + network.removeLayer(conv2); + + EXPECT_EQ(network.getLayers(), 2); + EXPECT_EQ(network.getEdges(), 0); + EXPECT_FALSE(network.hasPath(conv1, conv3)); +} + +TEST(NetworkTest, HasPath_SimplePath_ReturnsTrue) { + Network network; + ConvolutionLayerMock conv1(1); + ConvolutionLayerMock conv2(2); + + network.addEdge(conv1, conv2); + + EXPECT_TRUE(network.hasPath(conv1, conv2)); +} + +TEST(NetworkTest, HasPath_NoPath_ReturnsFalse) { + Network network; + ConvolutionLayerMock conv1(1); + ConvolutionLayerMock conv2(2); + ConvolutionLayerMock conv3(3); + + network.addEdge(conv1, conv2); + + EXPECT_FALSE(network.hasPath(conv1, conv3)); + EXPECT_FALSE(network.hasPath(conv3, conv1)); +} + +TEST(NetworkTest, HasPath_LayerNotInGraph_ReturnsFalse) { + Network network; + ConvolutionLayerMock conv1(1); + ConvolutionLayerMock conv2(2); + + network.addLayer(conv1); + + EXPECT_FALSE(network.hasPath(conv1, conv2)); +} + +TEST(NetworkTest, Inference_ReturnsCorrectOrder) { + Network network; + ConvolutionLayerMock l1(1); + ConvolutionLayerMock l2(2); + ConvolutionLayerMock l3(3); + ConvolutionLayerMock l4(4); + + network.addEdge(l1, l2); + network.addEdge(l1, l3); + network.addEdge(l2, l4); + std::vector order = network.inference(l1.getID()); + + ASSERT_EQ(order.size(), 4); + EXPECT_EQ(order[0], 1); + bool found2 = false; + bool found3 = false; + for (size_t i = 1; i < 3; ++i) { + if (order[i] == 2) { + found2 = true; + } + if (order[i] == 3) { + found3 = true; + } + } + EXPECT_TRUE(found2); + EXPECT_TRUE(found3); +} + +TEST(NetworkTest, Run_SimpleLinearNet_Success) { + Network network; + ConvPadStrideInfo conv_info_default; + PoolingLayerInfo pool_info_default; + pool_info_default.pool_size_x = 1; + pool_info_default.pool_size_y = 1; + pool_info_default.stride_x = 1; + pool_info_default.stride_y = 1; + + ConvolutionLayerMock conv1(10); + Shape input_s1({1, 1, 3, 3}); + Shape weights_s1({3, 3, 1, 1}); + Shape output_s1_ref; + PoolingLayerMock pool2(20, pool_info_default); + Shape output_s2_ref; + Tensor net_input(input_s1); + Tensor net_output(output_s2_ref); + + conv1.configure(input_s1, weights_s1, nullptr, output_s1_ref, + conv_info_default); + pool2.configure(output_s1_ref, output_s2_ref); + network.addLayer(conv1); + network.addLayer(pool2); + network.addEdge(conv1, pool2); + std::fill(net_input.data.begin(), net_input.data.end(), 1.0); + network.setInput(conv1, net_input); + network.setOutput(pool2, net_output); + + ASSERT_NO_THROW(network.run()); +} + +TEST(NetworkTest, Run_Fail_NoInputSet) { + Network network; + ConvolutionLayerMock conv1(1); + Tensor dummy_out(Shape({1})); + + network.addLayer(conv1); + network.setOutput(conv1, dummy_out); + + EXPECT_THROW(network.run(), std::runtime_error); +} + +TEST(NetworkTest, Run_Fail_NoOutputSet) { + Network network; + ConvolutionLayerMock conv1(1); + Tensor dummy_in(Shape({1})); + + network.addLayer(conv1); + network.setInput(conv1, dummy_in); + + EXPECT_THROW(network.run(), std::runtime_error); +} + +TEST(NetworkTest, Run_Fail_NoPathFromStartToEnd) { + Network network; + ConvolutionLayerMock conv1(1); + ConvolutionLayerMock conv2(2); + ConvolutionLayerMock conv3(3); + Tensor dummy_in(Shape({1})); + Tensor dummy_out(Shape({1})); + + network.addEdge(conv1, conv2); + network.setInput(conv1, dummy_in); + network.setOutput(conv3, dummy_out); + + EXPECT_THROW(network.run(), std::runtime_error); +} + +TEST(NetworkTest, GetLayersTypeVector_SimpleNet) { + Network network; + ConvolutionLayerMock conv1(1); + PoolingLayerInfo pool_info_default; + PoolingLayerMock pool1(2, pool_info_default); + Tensor dummy_input(Shape({1})); + + network.addEdge(conv1, pool1); + network.setInput(conv1, dummy_input); + std::vector types = network.getLayersTypeVector(); + + ASSERT_EQ(types.size(), 2); + EXPECT_EQ(types[0], conv1.get_type_name()); + EXPECT_EQ(types[1], pool1.get_type_name()); +} + +TEST(NetworkTest, GetLayersTypeVector_NoStartSet_ReturnsError) { + Network network; + ConvolutionLayerMock conv1(1); + + network.addLayer(conv1); + std::vector types = network.getLayersTypeVector(); + + ASSERT_EQ(types.size(), 1); + EXPECT_TRUE( + types[0].find("Error: Input layer (start_ ID) has not been set") != + std::string::npos); +} + +TEST(NetworkTest, Build_DiamondShapeGraph_StructureIsCorrect) { + Network network; + ConvolutionLayerMock start_node(1); + ConvolutionLayerMock left_node(2); + ConvolutionLayerMock right_node(3); + ConvolutionLayerMock merge_node(4); + + network.addEdge(start_node, left_node); + network.addEdge(start_node, right_node); + network.addEdge(left_node, merge_node); + network.addEdge(right_node, merge_node); + + EXPECT_EQ(network.getLayers(), 4); + EXPECT_EQ(network.getEdges(), 4); + + EXPECT_TRUE(network.hasPath(start_node, merge_node)); + EXPECT_TRUE(network.hasPath(start_node, left_node)); + EXPECT_TRUE(network.hasPath(start_node, right_node)); + + EXPECT_FALSE(network.hasPath(left_node, right_node)); + EXPECT_FALSE(network.hasPath(right_node, left_node)); +} + +TEST(NetworkTest, Build_GraphWithSideInput_InferenceFindsAllNodes) { + Network network; + + ConvolutionLayerMock input_node(1); + ConvolutionLayerMock split_node(2); + ConvolutionLayerMock left_branch(3); + ConvolutionLayerMock right_branch(4); + ConvolutionLayerMock merge_node(5); + ConvolutionLayerMock side_input(6); + + network.addEdge(input_node, split_node); + network.addEdge(split_node, left_branch); + network.addEdge(split_node, right_branch); + network.addEdge(left_branch, merge_node); + network.addEdge(right_branch, merge_node); + network.addEdge(side_input, merge_node); + + std::vector order = network.inference(input_node.getID()); + + EXPECT_EQ(order.size(), 5); + + std::unordered_set visited_nodes(order.begin(), order.end()); + + EXPECT_EQ(visited_nodes.count(1), 1); + EXPECT_EQ(visited_nodes.count(2), 1); + EXPECT_EQ(visited_nodes.count(3), 1); + EXPECT_EQ(visited_nodes.count(4), 1); + EXPECT_EQ(visited_nodes.count(5), 1); + EXPECT_EQ(visited_nodes.count(6), 0); + + EXPECT_TRUE(network.hasPath(side_input, merge_node)); +} \ No newline at end of file diff --git a/test/layer/test_convlayermock.cpp b/test/layer/test_convlayermock.cpp new file mode 100644 index 0000000..6cc6f1c --- /dev/null +++ b/test/layer/test_convlayermock.cpp @@ -0,0 +1,156 @@ +#include + +#include "./layer/ConvLayer.h" +#include "./tensor/tensor.h" +#include "gtest/gtest.h" + +TEST(ConvolutionLayerMockTest, configure_success_simple_conv_no_bias) { + ConvolutionLayerMock layer(50); + Shape input_s({32, 32, 3}); + Shape weights_s({3, 3, 3, 16}); + Shape output_s_ref; + ConvPadStrideInfo conv_info(1, 1, 0, 0); + Shape expected_output_shape({30, 30, 16}); + + ASSERT_NO_THROW( + layer.configure(input_s, weights_s, nullptr, output_s_ref, conv_info)); + EXPECT_EQ(output_s_ref.dimensions, expected_output_shape.dimensions); + EXPECT_EQ(layer.get_output_shape().dimensions, + expected_output_shape.dimensions); +} + +TEST(ConvolutionLayerMockTest, configure_success_conv_with_bias_and_padding) { + ConvolutionLayerMock layer(51); + Shape input_s({32, 32, 3}); + Shape weights_s({3, 3, 3, 16}); + Shape biases_s({16}); + Shape output_s_ref; + ConvPadStrideInfo conv_info(1, 1, 1, 1); + Shape expected_output_shape({32, 32, 16}); + + ASSERT_NO_THROW( + layer.configure(input_s, weights_s, &biases_s, output_s_ref, conv_info)); + EXPECT_EQ(output_s_ref.dimensions, expected_output_shape.dimensions); +} + +TEST(ConvolutionLayerMockTest, configure_success_conv_with_stride) { + ConvolutionLayerMock layer(52); + Shape input_s({32, 32, 3}); + Shape weights_s({3, 3, 3, 16}); + Shape output_s_ref; + ConvPadStrideInfo conv_info(2, 2, 0, 0); + Shape expected_output_shape({15, 15, 16}); + + ASSERT_NO_THROW( + layer.configure(input_s, weights_s, nullptr, output_s_ref, conv_info)); + EXPECT_EQ(output_s_ref.dimensions, expected_output_shape.dimensions); +} + +TEST(ConvolutionLayerMockTest, configure_fail_input_rank_too_low) { + ConvolutionLayerMock layer(53); + Shape input_s({32, 32}); + Shape weights_s({3, 3, 3, 16}); + Shape output_s_ref; + ConvPadStrideInfo conv_info; + + EXPECT_THROW( + layer.configure(input_s, weights_s, nullptr, output_s_ref, conv_info), + std::runtime_error); +} + +TEST(ConvolutionLayerMockTest, configure_fail_weights_rank_not_4) { + ConvolutionLayerMock layer(54); + Shape input_s({32, 32, 3}); + Shape weights_s({3, 3, 3}); + Shape output_s_ref; + ConvPadStrideInfo conv_info; + + EXPECT_THROW( + layer.configure(input_s, weights_s, nullptr, output_s_ref, conv_info), + std::runtime_error); +} + +TEST(ConvolutionLayerMockTest, configure_fail_channel_mismatch) { + ConvolutionLayerMock layer(55); + Shape input_s({32, 32, 3}); + Shape weights_s({3, 3, 1, 16}); + Shape output_s_ref; + ConvPadStrideInfo conv_info; + + EXPECT_THROW( + layer.configure(input_s, weights_s, nullptr, output_s_ref, conv_info), + std::runtime_error); +} + +TEST(ConvolutionLayerMockTest, configure_fail_bias_rank_not_1) { + ConvolutionLayerMock layer(56); + Shape input_s({32, 32, 3}); + Shape weights_s({3, 3, 3, 16}); + Shape biases_s({16, 1}); + Shape output_s_ref; + ConvPadStrideInfo conv_info; + + EXPECT_THROW( + layer.configure(input_s, weights_s, &biases_s, output_s_ref, conv_info), + std::runtime_error); +} + +TEST(ConvolutionLayerMockTest, configure_fail_bias_size_mismatch) { + ConvolutionLayerMock layer(57); + Shape input_s({32, 32, 3}); + Shape weights_s({3, 3, 3, 16}); + Shape biases_s({15}); + Shape output_s_ref; + ConvPadStrideInfo conv_info; + + EXPECT_THROW( + layer.configure(input_s, weights_s, &biases_s, output_s_ref, conv_info), + std::runtime_error); +} + +TEST(ConvolutionLayerMockTest, + configure_fail_kernel_too_large_for_padded_input) { + ConvolutionLayerMock layer(58); + Shape input_s({3, 3, 1}); + Shape weights_s({5, 5, 1, 1}); + Shape output_s_ref; + ConvPadStrideInfo conv_info(1, 1, 0, 0); + + EXPECT_THROW( + layer.configure(input_s, weights_s, nullptr, output_s_ref, conv_info), + std::runtime_error); +} + +TEST(ConvolutionLayerMockTest, exec_before_configure_fail) { + ConvolutionLayerMock layer(59); + Tensor input(Shape({1, 1, 1})); + Tensor output(Shape({1, 1, 1})); + + EXPECT_THROW(layer.exec(input, output), std::runtime_error); +} + +TEST(ConvolutionLayerMockTest, get_output_shape_before_configure_fail) { + ConvolutionLayerMock layer(60); + + EXPECT_THROW(layer.get_output_shape(), std::logic_error); +} + +TEST(ConvolutionLayerMockTest, exec_success_after_configure) { + ConvolutionLayerMock layer(61); + Shape input_s({3, 3, 1}); + Shape weights_s({3, 3, 1, 1}); + Shape out_ref; + ConvPadStrideInfo info; + + layer.configure(input_s, weights_s, nullptr, out_ref, info); + Tensor t_in(input_s); + Tensor t_out(out_ref); + + ASSERT_NO_THROW(layer.exec(t_in, t_out)); +} + +TEST(ConvolutionLayerMockTest, get_type_name_returns_correct_name) { + ConvolutionLayerMock layer(62); + + EXPECT_EQ(layer.get_type_name(), "ConvolutionLayerMock"); +} \ No newline at end of file diff --git a/test/layer/test_elemlayermock.cpp b/test/layer/test_elemlayermock.cpp new file mode 100644 index 0000000..a0b2f06 --- /dev/null +++ b/test/layer/test_elemlayermock.cpp @@ -0,0 +1,74 @@ +#include + +#include "./layer/ElementwiseLayer.h" +#include "./tensor/tensor.h" +#include "gtest/gtest.h" + +TEST(ElementwiseLayerMockTest, configure_success_matching_shapes) { + ElementwiseLayerMock layer(100, ElementwiseOp::kAdd); + Shape shape1({10, 20, 3}); + Shape shape2({10, 20, 3}); + Shape output_shape_ref; + + ASSERT_NO_THROW(layer.configure(shape1, shape2, output_shape_ref)); + EXPECT_EQ(output_shape_ref.dimensions, shape1.dimensions); + EXPECT_EQ(layer.get_output_shape().dimensions, shape1.dimensions); +} + +TEST(ElementwiseLayerMockTest, configure_fail_shape_mismatch) { + ElementwiseLayerMock layer(101, ElementwiseOp::kAdd); + Shape shape1({10, 20, 3}); + Shape shape2({10, 20, 4}); + Shape output_shape_ref; + + EXPECT_THROW(layer.configure(shape1, shape2, output_shape_ref), + std::runtime_error); +} + +TEST(ElementwiseLayerMockTest, configure_fail_rank_mismatch) { + ElementwiseLayerMock layer(102, ElementwiseOp::kAdd); + Shape shape1({10, 20, 3}); + Shape shape2({10, 20}); + Shape output_shape_ref; + + EXPECT_THROW(layer.configure(shape1, shape2, output_shape_ref), + std::runtime_error); +} + +TEST(ElementwiseLayerMockTest, exec_before_configure_fail) { + ElementwiseLayerMock layer(103, ElementwiseOp::kAdd); + Tensor input(Shape({1, 1, 1})); + Tensor output(Shape({1, 1, 1})); + + EXPECT_THROW(layer.exec(input, output), std::logic_error); +} + +TEST(ElementwiseLayerMockTest, get_output_shape_before_configure_fail) { + ElementwiseLayerMock layer(104, ElementwiseOp::kAdd); + + EXPECT_THROW(layer.get_output_shape(), std::logic_error); +} + +TEST(ElementwiseLayerMockTest, exec_success_after_configure) { + ElementwiseLayerMock layer(105, ElementwiseOp::kAdd); + Shape s({5, 5}); + Shape out_ref; + + layer.configure(s, s, out_ref); + Tensor t_in(s); + Tensor t_out(out_ref); + + ASSERT_NO_THROW(layer.exec(t_in, t_out)); +} + +TEST(ElementwiseLayerMockTest, get_type_name_returns_correct_name_for_add) { + ElementwiseLayerMock layer(106, ElementwiseOp::kAdd); + + EXPECT_EQ(layer.get_type_name(), "ElementwiseAddLayerMock"); +} + +TEST(ElementwiseLayerMockTest, get_type_name_returns_correct_name_for_mul) { + ElementwiseLayerMock layer(107, ElementwiseOp::kMul); + + EXPECT_EQ(layer.get_type_name(), "ElementwiseMulLayerMock"); +} \ No newline at end of file diff --git a/include/CMakeLists.txt b/test/layer/test_layer.cpp similarity index 100% rename from include/CMakeLists.txt rename to test/layer/test_layer.cpp diff --git a/test/layer/test_matmullayermock.cpp b/test/layer/test_matmullayermock.cpp new file mode 100644 index 0000000..1302368 --- /dev/null +++ b/test/layer/test_matmullayermock.cpp @@ -0,0 +1,125 @@ +#include "./layer/MatMulLayer.h" +#include "./tensor/tensor.h" +#include "gtest/gtest.h" + +TEST(MatMulLayerMockTest, configure_success_simple_mat_mul) { + MatMulInfo mm_info; + MatMulLayerMock layer(70, mm_info); + Shape shape_x({2, 3}); + Shape shape_y({3, 4}); + Shape output_shape_ref; + Shape expected_output_shape({2, 4}); + + ASSERT_NO_THROW(layer.configure(shape_x, shape_y, output_shape_ref)); + EXPECT_EQ(output_shape_ref.dimensions, expected_output_shape.dimensions); + EXPECT_EQ(layer.get_output_shape().dimensions, + expected_output_shape.dimensions); +} + +TEST(MatMulLayerMockTest, configure_success_transpose_x) { + MatMulInfo mm_info; + mm_info.transpose_x = true; + MatMulLayerMock layer(71, mm_info); + Shape shape_x({3, 2}); + Shape shape_y({3, 4}); + Shape output_shape_ref; + Shape expected_output_shape({2, 4}); + + ASSERT_NO_THROW(layer.configure(shape_x, shape_y, output_shape_ref)); + EXPECT_EQ(output_shape_ref.dimensions, expected_output_shape.dimensions); +} + +TEST(MatMulLayerMockTest, configure_success_transpose_y) { + MatMulInfo mm_info; + mm_info.transpose_y = true; + MatMulLayerMock layer(72, mm_info); + Shape shape_x({2, 3}); + Shape shape_y({4, 3}); + Shape output_shape_ref; + Shape expected_output_shape({2, 4}); + + ASSERT_NO_THROW(layer.configure(shape_x, shape_y, output_shape_ref)); + EXPECT_EQ(output_shape_ref.dimensions, expected_output_shape.dimensions); +} + +TEST(MatMulLayerMockTest, configure_success_transpose_both) { + MatMulInfo mm_info; + mm_info.transpose_x = true; + mm_info.transpose_y = true; + MatMulLayerMock layer(73, mm_info); + Shape shape_x({3, 2}); + Shape shape_y({4, 3}); + Shape output_shape_ref; + Shape expected_output_shape({2, 4}); + + ASSERT_NO_THROW(layer.configure(shape_x, shape_y, output_shape_ref)); + EXPECT_EQ(output_shape_ref.dimensions, expected_output_shape.dimensions); +} + +TEST(MatMulLayerMockTest, configure_fail_input_x_not_2d) { + MatMulInfo mm_info; + MatMulLayerMock layer(74, mm_info); + Shape shape_x({2, 3, 1}); + Shape shape_y({3, 4}); + Shape output_shape_ref; + + EXPECT_THROW(layer.configure(shape_x, shape_y, output_shape_ref), + std::runtime_error); +} + +TEST(MatMulLayerMockTest, configure_fail_input_y_not_2d) { + MatMulInfo mm_info; + MatMulLayerMock layer(75, mm_info); + Shape shape_x({2, 3}); + Shape shape_y({3, 4, 1}); + Shape output_shape_ref; + + EXPECT_THROW(layer.configure(shape_x, shape_y, output_shape_ref), + std::runtime_error); +} + +TEST(MatMulLayerMockTest, configure_fail_dimension_mismatch) { + MatMulInfo mm_info; + MatMulLayerMock layer(76, mm_info); + Shape shape_x({2, 3}); + Shape shape_y({4, 4}); + Shape output_shape_ref; + + EXPECT_THROW(layer.configure(shape_x, shape_y, output_shape_ref), + std::runtime_error); +} + +TEST(MatMulLayerMockTest, exec_before_configure_fail) { + MatMulInfo mm_info; + MatMulLayerMock layer(77, mm_info); + Tensor input(Shape({1, 1})); + Tensor output(Shape({1, 1})); + + EXPECT_THROW(layer.exec(input, output), std::runtime_error); +} + +TEST(MatMulLayerMockTest, get_output_shape_before_configure_fail) { + MatMulInfo mm_info; + MatMulLayerMock layer(78, mm_info); + + EXPECT_THROW(layer.get_output_shape(), std::logic_error); +} + +TEST(MatMulLayerMockTest, exec_success_after_configure) { + MatMulInfo mm_info; + MatMulLayerMock layer(79, mm_info); + Shape shape_x({2, 3}), shape_y({3, 4}), out_ref; + + layer.configure(shape_x, shape_y, out_ref); + Tensor t_in(shape_x); + Tensor t_out(out_ref); + + ASSERT_NO_THROW(layer.exec(t_in, t_out)); +} + +TEST(MatMulLayerMockTest, get_type_name_returns_correct_name) { + MatMulInfo mm_info; + MatMulLayerMock layer(80, mm_info); + + EXPECT_EQ(layer.get_type_name(), "MatMulLayerMock"); +} \ No newline at end of file diff --git a/test/layer/test_poolinglayermock.cpp b/test/layer/test_poolinglayermock.cpp new file mode 100644 index 0000000..5087422 --- /dev/null +++ b/test/layer/test_poolinglayermock.cpp @@ -0,0 +1,81 @@ +#include + +#include "./layer/PoolingLayer.h" +#include "./tensor/tensor.h" +#include "gtest/gtest.h" + +TEST(PoolingLayerMockTest, configure_success_simple_pooling) { + PoolingLayerInfo pool_info; + PoolingLayerMock layer(90, pool_info); + Shape input_shape({1, 3, 32, 32}); + Shape output_shape_ref; + Shape expected_output_shape({1, 3, 31, 31}); + + ASSERT_NO_THROW(layer.configure(input_shape, output_shape_ref)); + EXPECT_EQ(output_shape_ref.dimensions, expected_output_shape.dimensions); + EXPECT_EQ(layer.get_output_shape().dimensions, + expected_output_shape.dimensions); +} + +TEST(PoolingLayerMockTest, configure_success_pooling_with_stride_and_pad) { + PoolingLayerInfo pool_info; + pool_info.pool_size_x = 3; + pool_info.pool_size_y = 3; + pool_info.stride_x = 2; + pool_info.stride_y = 2; + pool_info.pad_x = 1; + pool_info.pad_y = 1; + PoolingLayerMock layer(91, pool_info); + Shape input_shape({1, 3, 32, 32}); + Shape output_shape_ref; + Shape expected_output_shape({1, 3, 16, 16}); + + ASSERT_NO_THROW(layer.configure(input_shape, output_shape_ref)); + EXPECT_EQ(output_shape_ref.dimensions, expected_output_shape.dimensions); +} + +TEST(PoolingLayerMockTest, configure_fail_input_rank_not_4) { + PoolingLayerInfo pool_info; + PoolingLayerMock layer(92, pool_info); + Shape input_shape({1, 32, 32}); + Shape output_shape_ref; + + EXPECT_THROW(layer.configure(input_shape, output_shape_ref), + std::runtime_error); +} + +TEST(PoolingLayerMockTest, exec_before_configure_fail) { + PoolingLayerInfo pool_info; + PoolingLayerMock layer(93, pool_info); + Tensor input(Shape({1, 1, 1, 1})); + Tensor output(Shape({1, 1, 1, 1})); + + EXPECT_THROW(layer.exec(input, output), std::runtime_error); +} + +TEST(PoolingLayerMockTest, get_output_shape_before_configure_fail) { + PoolingLayerInfo pool_info; + PoolingLayerMock layer(94, pool_info); + + EXPECT_THROW(layer.get_output_shape(), std::logic_error); +} + +TEST(PoolingLayerMockTest, exec_success_after_configure) { + PoolingLayerInfo pool_info; + PoolingLayerMock layer(95, pool_info); + Shape input_s({1, 3, 4, 4}); + Shape out_ref; + + layer.configure(input_s, out_ref); + Tensor t_in(input_s); + Tensor t_out(out_ref); + + ASSERT_NO_THROW(layer.exec(t_in, t_out)); +} + +TEST(PoolingLayerMockTest, get_type_name_returns_correct_name) { + PoolingLayerInfo pool_info; + PoolingLayerMock layer(96, pool_info); + + EXPECT_EQ(layer.get_type_name(), "PoolingLayerMock"); +} \ No newline at end of file diff --git a/test/layer/test_slicelayermock.cpp b/test/layer/test_slicelayermock.cpp new file mode 100644 index 0000000..84fd5c7 --- /dev/null +++ b/test/layer/test_slicelayermock.cpp @@ -0,0 +1,135 @@ +#include +#include + +#include "./layer/SliceLayer.h" +#include "./tensor/tensor.h" +#include "gtest/gtest.h" + +TEST(SliceLayerMockTest, configure_success_simple_slice) { + SliceLayerMock layer(30); + Shape input_shape({10, 20, 5}); + std::vector starts = {1, 2, 0}; + std::vector sizes = {5, 10, 3}; + Shape output_shape_ref; + Shape expected_output_shape({5, 10, 3}); + + ASSERT_NO_THROW( + layer.configure(input_shape, starts, sizes, output_shape_ref)); + EXPECT_EQ(output_shape_ref.dimensions, expected_output_shape.dimensions); + EXPECT_EQ(layer.get_output_shape().dimensions, + expected_output_shape.dimensions); +} + +TEST(SliceLayerMockTest, configure_success_slice_to_end) { + SliceLayerMock layer(31); + Shape input_shape({10, 20, 5}); + std::vector starts = {1, 2, 0}; + std::vector sizes = {5, -1, 3}; + Shape output_shape_ref; + Shape expected_output_shape({5, 18, 3}); + + ASSERT_NO_THROW( + layer.configure(input_shape, starts, sizes, output_shape_ref)); + EXPECT_EQ(output_shape_ref.dimensions, expected_output_shape.dimensions); +} + +TEST(SliceLayerMockTest, configure_fail_starts_sizes_rank_mismatch) { + SliceLayerMock layer(32); + Shape input_shape({10, 20, 5}); + std::vector starts = {1, 2}; + std::vector sizes = {5, 10, 3}; + Shape output_shape_ref; + + EXPECT_THROW(layer.configure(input_shape, starts, sizes, output_shape_ref), + std::runtime_error); +} + +TEST(SliceLayerMockTest, configure_fail_start_out_of_bounds) { + SliceLayerMock layer(33); + Shape input_shape({10, 20, 5}); + std::vector starts = {1, 20, 0}; + std::vector sizes = {5, 1, 3}; + Shape output_shape_ref; + + EXPECT_THROW(layer.configure(input_shape, starts, sizes, output_shape_ref), + std::runtime_error); +} + +TEST(SliceLayerMockTest, configure_fail_negative_start) { + SliceLayerMock layer(34); + Shape input_shape({10, 20, 5}); + std::vector starts = {1, -1, 0}; + std::vector sizes = {5, 1, 3}; + Shape output_shape_ref; + + EXPECT_THROW(layer.configure(input_shape, starts, sizes, output_shape_ref), + std::runtime_error); +} + +TEST(SliceLayerMockTest, configure_fail_negative_size_not_minus_one) { + SliceLayerMock layer(35); + Shape input_shape({10, 20, 5}); + std::vector starts = {1, 2, 0}; + std::vector sizes = {5, -2, 3}; + Shape output_shape_ref; + + EXPECT_THROW(layer.configure(input_shape, starts, sizes, output_shape_ref), + std::runtime_error); +} + +TEST(SliceLayerMockTest, configure_fail_slice_exceeds_dimension) { + SliceLayerMock layer(36); + Shape input_shape({10, 20, 5}); + std::vector starts = {1, 2, 0}; + std::vector sizes = {5, 19, 3}; + Shape output_shape_ref; + + EXPECT_THROW(layer.configure(input_shape, starts, sizes, output_shape_ref), + std::runtime_error); +} + +TEST(SliceLayerMockTest, configure_fail_zero_size) { + SliceLayerMock layer(37); + Shape input_shape({10, 20, 5}); + std::vector starts = {1, 2, 0}; + std::vector sizes = {5, 0, 3}; + Shape output_shape_ref; + + EXPECT_THROW(layer.configure(input_shape, starts, sizes, output_shape_ref), + std::runtime_error); +} + +TEST(SliceLayerMockTest, exec_before_configure_fail) { + SliceLayerMock layer(38); + Tensor input(Shape({1, 1, 1})); + Tensor output(Shape({1, 1, 1})); + + EXPECT_THROW(layer.exec(input, output), std::logic_error); +} + +TEST(SliceLayerMockTest, get_output_shape_before_configure_fail) { + SliceLayerMock layer(39); + + EXPECT_THROW(layer.get_output_shape(), std::logic_error); +} + +TEST(SliceLayerMockTest, exec_success_after_configure) { + SliceLayerMock layer(40); + Shape input_shape({10, 20, 5}); + std::vector starts = {1, 2, 0}; + std::vector sizes = {5, 10, 3}; + Shape output_shape_ref; + + layer.configure(input_shape, starts, sizes, output_shape_ref); + + Tensor input_tensor(input_shape); + Tensor output_tensor(output_shape_ref); + + ASSERT_NO_THROW(layer.exec(input_tensor, output_tensor)); +} + +TEST(SliceLayerMockTest, get_type_name_returns_correct_name) { + SliceLayerMock layer(41); + + EXPECT_EQ(layer.get_type_name(), "SliceLayerMock"); +} \ No newline at end of file diff --git a/test/layer/test_splitlayermock.cpp b/test/layer/test_splitlayermock.cpp new file mode 100644 index 0000000..cfd6e86 --- /dev/null +++ b/test/layer/test_splitlayermock.cpp @@ -0,0 +1,112 @@ +#include + +#include "./layer/SplitLayer.h" +#include "./tensor/tensor.h" +#include "gtest/gtest.h" + +TEST(SplitLayerMockTest, configure_success_simple_split) { + SplitLayerMock layer(1); + Shape input_shape({10, 20, 30}); + unsigned int axis = 1; + unsigned int num_splits = 2; + Shape first_out_shape_ref; + Shape expected_first_out_shape({10, 10, 30}); + + ASSERT_NO_THROW( + layer.configure(input_shape, axis, num_splits, first_out_shape_ref)); + EXPECT_EQ(first_out_shape_ref.dimensions, + expected_first_out_shape.dimensions); + EXPECT_EQ(layer.get_output_shape().dimensions, + expected_first_out_shape.dimensions); + const auto& all_shapes = layer.get_all_split_output_shapes(); + ASSERT_EQ(all_shapes.size(), num_splits); + for (const auto& shape : all_shapes) { + EXPECT_EQ(shape.dimensions, expected_first_out_shape.dimensions); + } +} + +TEST(SplitLayerMockTest, configure_fail_zero_splits) { + SplitLayerMock layer(2); + Shape input_shape({10, 20, 30}); + Shape first_out_shape_ref; + + EXPECT_THROW(layer.configure(input_shape, 1, 0, first_out_shape_ref), + std::runtime_error); +} + +TEST(SplitLayerMockTest, configure_fail_axis_out_of_bounds) { + SplitLayerMock layer(3); + Shape input_shape({10, 20, 30}); + Shape first_out_shape_ref; + + EXPECT_THROW(layer.configure(input_shape, 3, 2, first_out_shape_ref), + std::runtime_error); +} + +TEST(SplitLayerMockTest, configure_fail_not_divisible) { + SplitLayerMock layer(4); + Shape input_shape({10, 21, 30}); + Shape first_out_shape_ref; + + EXPECT_THROW(layer.configure(input_shape, 1, 2, first_out_shape_ref), + std::runtime_error); +} + +TEST(SplitLayerMockTest, exec_before_configure_fail) { + SplitLayerMock layer(5); + Tensor input_tensor(Shape({10, 20, 30})); + Tensor output_tensor(Shape({10, 10, 30})); + + EXPECT_THROW(layer.exec(input_tensor, output_tensor), std::logic_error); +} + +TEST(SplitLayerMockTest, get_output_shape_before_configure_fail) { + SplitLayerMock layer(6); + + EXPECT_THROW(layer.get_output_shape(), std::logic_error); +} + +TEST(SplitLayerMockTest, exec_success_after_configure) { + SplitLayerMock layer(7); + Shape input_shape({10, 20, 30}); + Shape first_out_shape_ref; + + layer.configure(input_shape, 1, 2, first_out_shape_ref); + + Tensor input_tensor(input_shape); + Tensor output_tensor(first_out_shape_ref); + + ASSERT_NO_THROW(layer.exec(input_tensor, output_tensor)); +} + +TEST(SplitLayerMockTest, exec_fail_input_shape_mismatch) { + SplitLayerMock layer(8); + Shape config_input_shape({10, 20, 30}); + Shape first_out_shape_ref; + + layer.configure(config_input_shape, 1, 2, first_out_shape_ref); + + Tensor wrong_input_tensor(Shape({5, 5, 5})); + Tensor output_tensor(first_out_shape_ref); + EXPECT_THROW(layer.exec(wrong_input_tensor, output_tensor), + std::runtime_error); +} + +TEST(SplitLayerMockTest, exec_fail_output_shape_mismatch) { + SplitLayerMock layer(9); + Shape input_shape({10, 20, 30}); + Shape first_out_shape_ref; + + layer.configure(input_shape, 1, 2, first_out_shape_ref); + + Tensor input_tensor(input_shape); + Tensor wrong_output_tensor(Shape({5, 5, 5})); + EXPECT_THROW(layer.exec(input_tensor, wrong_output_tensor), + std::runtime_error); +} + +TEST(SplitLayerMockTest, get_type_name_returns_correct_name) { + SplitLayerMock layer(10); + + EXPECT_EQ(layer.get_type_name(), "SplitLayerMock"); +} \ No newline at end of file diff --git a/test/main.cpp b/test/main.cpp index 4d820af..9a17845 100644 --- a/test/main.cpp +++ b/test/main.cpp @@ -1,6 +1,6 @@ -#include +#include "gtest/gtest.h" -int main(int argc, char **argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); -} +} \ No newline at end of file diff --git a/test/tensor/test_tensor.cpp b/test/tensor/test_tensor.cpp new file mode 100644 index 0000000..0cd9766 --- /dev/null +++ b/test/tensor/test_tensor.cpp @@ -0,0 +1,86 @@ +#include + +#include "./tensor/tensor.h" +#include "gtest/gtest.h" + +TEST(ShapeTest, get_rank_and_elem_checks) { + Shape s({2, 3, 4}); + + ASSERT_EQ(s.get_rank(), 3); + ASSERT_EQ(s.total_elements, 24); +} + +TEST(TensorTestDouble, can_at_to_tensor) { + Tensor t({2, 3}, Layout::kNd); + + t.at({0, 0}) = 1.0; + t.at({0, 1}) = 2.0; + t.at({0, 2}) = 3.0; + t.at({1, 0}) = 4.0; + t.at({1, 1}) = 5.0; + t.at({1, 2}) = 6.0; + + ASSERT_DOUBLE_EQ(t.at({0, 0}), 1.0); + ASSERT_DOUBLE_EQ(t.at({0, 1}), 2.0); + ASSERT_DOUBLE_EQ(t.at({0, 2}), 3.0); + ASSERT_DOUBLE_EQ(t.at({1, 0}), 4.0); + ASSERT_DOUBLE_EQ(t.at({1, 1}), 5.0); + ASSERT_DOUBLE_EQ(t.at({1, 2}), 6.0); + + const Tensor &ct = t; + ASSERT_DOUBLE_EQ(ct.at({0, 1}), 2.0); +} + +TEST(TensorTestDouble, can_get_linear_index2D_ND_RowMajor) { + Tensor t({2, 3}, Layout::kNd); + + ASSERT_EQ(t.get_linear_index({0, 0}), 0 * 3 + 0); + ASSERT_EQ(t.get_linear_index({0, 1}), 0 * 3 + 1); + ASSERT_EQ(t.get_linear_index({0, 2}), 0 * 3 + 2); + ASSERT_EQ(t.get_linear_index({1, 0}), 1 * 3 + 0); + ASSERT_EQ(t.get_linear_index({1, 1}), 1 * 3 + 1); + ASSERT_EQ(t.get_linear_index({1, 2}), 1 * 3 + 2); +} + +TEST(TensorTestDouble, can_get_linear_index4D_NCHW) { + Tensor t({2, 3, 4, 5}, Layout::kNchw); + + ASSERT_EQ(t.get_linear_index({0, 0, 0, 0}), 0); + ASSERT_EQ(t.get_linear_index({1, 2, 3, 4}), 119); +} + +TEST(TensorTestDouble, can_get_linear_index4D_NHWC) { + Tensor t({2, 3, 4, 5}, Layout::kNhwc); + + ASSERT_EQ(t.get_linear_index({0, 0, 0, 0}), 0); + ASSERT_EQ(t.get_linear_index({1, 2, 3, 4}), 119); + ASSERT_EQ(t.get_linear_index({0, 1, 2, 3}), 40); +} + +TEST(TensorTestDouble, can_get_linear_index4D_ND_is_RowMajor) { + Tensor t4d_nd({2, 3, 4, 5}, Layout::kNd); + + ASSERT_EQ(t4d_nd.get_linear_index({0, 0, 0, 0}), 0); + ASSERT_EQ(t4d_nd.get_linear_index({1, 2, 3, 4}), 119); +} + +TEST(TensorTestDouble, cant_get_linear_index_out_of_bounds) { + Tensor t2d({2, 3}, Layout::kNd); + Tensor t4d_nchw({2, 3, 4, 5}, Layout::kNchw); + Tensor t4d_nhwc({2, 3, 4, 5}, Layout::kNhwc); + Tensor t4d_nd({2, 3, 4, 5}, Layout::kNd); + + EXPECT_THROW(t2d.get_linear_index({2, 0}), std::out_of_range); + EXPECT_THROW(t2d.get_linear_index({0, 3}), std::out_of_range); + EXPECT_THROW(t4d_nchw.get_linear_index({2, 0, 0, 0}), std::out_of_range); + EXPECT_THROW(t4d_nhwc.get_linear_index({0, 3, 0, 0}), std::out_of_range); + EXPECT_THROW(t4d_nd.get_linear_index({0, 0, 4, 0}), std::out_of_range); +} + +TEST(TensorTestDouble, cant_get_linear_index_with_wrong_num_of_indicies) { + Tensor t2d({2, 3}, Layout::kNd); + Tensor t4d_nchw({2, 3, 4, 5}, Layout::kNchw); + + EXPECT_THROW(t2d.get_linear_index({0}), std::runtime_error); + EXPECT_THROW(t4d_nchw.get_linear_index({0, 0, 0}), std::runtime_error); +} \ No newline at end of file