|
28 | 28 | #include <string>
|
29 | 29 |
|
30 | 30 | namespace holoscan::ops {
|
31 |
| -class PreCompiledPVAExecutor : public Operator |
32 |
| -{ |
33 |
| -public: |
34 |
| - HOLOSCAN_OPERATOR_FORWARD_ARGS(PreCompiledPVAExecutor); |
35 |
| - PreCompiledPVAExecutor() = default; |
36 |
| - |
37 |
| - void setup(OperatorSpec &spec) override |
38 |
| - { |
39 |
| - // HOLOSCAN_LOG_INFO("PreCompiledPVAExecutor::setup"); |
40 |
| - spec.param(allocator_, "allocator", "Allocator", "Allocator to allocate output tensor."); |
41 |
| - spec.input<gxf::Entity>("input"); |
42 |
| - spec.output<gxf::Entity>("output"); |
| 31 | +class PreCompiledPVAExecutor : public Operator { |
| 32 | + public: |
| 33 | + HOLOSCAN_OPERATOR_FORWARD_ARGS(PreCompiledPVAExecutor); |
| 34 | + PreCompiledPVAExecutor() = default; |
| 35 | + |
| 36 | + void setup(OperatorSpec& spec) override { |
| 37 | + // HOLOSCAN_LOG_INFO("PreCompiledPVAExecutor::setup"); |
| 38 | + spec.param(allocator_, "allocator", "Allocator", "Allocator to allocate output tensor."); |
| 39 | + spec.input<gxf::Entity>("input"); |
| 40 | + spec.output<gxf::Entity>("output"); |
| 41 | + } |
| 42 | + void compute(InputContext& op_input, OutputContext& op_output, |
| 43 | + ExecutionContext& context) override { |
| 44 | + // HOLOSCAN_LOG_INFO("PreCompiledPVAExecutor::compute"); |
| 45 | + auto maybe_input_message = op_input.receive<gxf::Entity>("input"); |
| 46 | + if (!maybe_input_message.has_value()) { |
| 47 | + HOLOSCAN_LOG_ERROR("Failed to receive input message gxf::Entity"); |
| 48 | + return; |
| 49 | + } |
| 50 | + auto input_tensor = maybe_input_message.value().get<holoscan::Tensor>(); |
| 51 | + if (!input_tensor) { |
| 52 | + HOLOSCAN_LOG_ERROR("Failed to receive holoscan::Tensor from input message gxf::Entity"); |
| 53 | + return; |
| 54 | + } |
43 | 55 |
|
| 56 | + // get handle to underlying nvidia::gxf::Allocator from std::shared_ptr<holoscan::Allocator> |
| 57 | + auto allocator = nvidia::gxf::Handle<nvidia::gxf::Allocator>::Create( |
| 58 | + fragment()->executor().context(), allocator_->gxf_cid()); |
| 59 | + |
| 60 | + // cast Holoscan::Tensor to nvidia::gxf::Tensor to use its APIs directly |
| 61 | + nvidia::gxf::Tensor input_tensor_gxf{input_tensor->dl_ctx()}; |
| 62 | + |
| 63 | + auto out_message = CreateTensorMap( |
| 64 | + context.context(), |
| 65 | + allocator.value(), |
| 66 | + {{"output", |
| 67 | + nvidia::gxf::MemoryStorageType::kDevice, |
| 68 | + input_tensor_gxf.shape(), |
| 69 | + nvidia::gxf::PrimitiveType::kUnsigned8, |
| 70 | + 0, |
| 71 | + nvidia::gxf::ComputeTrivialStrides( |
| 72 | + input_tensor_gxf.shape(), |
| 73 | + nvidia::gxf::PrimitiveTypeSize(nvidia::gxf::PrimitiveType::kUnsigned8))}}, |
| 74 | + false); |
| 75 | + |
| 76 | + if (!out_message) { std::runtime_error("failed to create out_message"); } |
| 77 | + const auto output_tensor = out_message.value().get<nvidia::gxf::Tensor>(); |
| 78 | + if (!output_tensor) { std::runtime_error("failed to create out_tensor"); } |
| 79 | + |
| 80 | + uint8_t* input_tensor_data = static_cast<uint8_t*>(input_tensor->data()); |
| 81 | + uint8_t* output_tensor_data = static_cast<uint8_t*>(output_tensor.value()->pointer()); |
| 82 | + if (output_tensor_data == nullptr) { |
| 83 | + throw std::runtime_error("Failed to allocate memory for the output image"); |
44 | 84 | }
|
45 |
| - void compute(InputContext& op_input, OutputContext& op_output, |
46 |
| - ExecutionContext& context) override { |
47 |
| - // HOLOSCAN_LOG_INFO("PreCompiledPVAExecutor::compute"); |
48 |
| - auto maybe_input_message = op_input.receive<gxf::Entity>("input"); |
49 |
| - if (!maybe_input_message.has_value()) { |
50 |
| - HOLOSCAN_LOG_ERROR("Failed to receive input message gxf::Entity"); |
51 |
| - return; |
52 |
| - } |
53 |
| - auto input_tensor = maybe_input_message.value().get<holoscan::Tensor>(); |
54 |
| - if (!input_tensor) { |
55 |
| - HOLOSCAN_LOG_ERROR("Failed to receive holoscan::Tensor from input message gxf::Entity"); |
56 |
| - return; |
57 |
| - } |
58 |
| - |
59 |
| - // get handle to underlying nvidia::gxf::Allocator from std::shared_ptr<holoscan::Allocator> |
60 |
| - auto allocator = nvidia::gxf::Handle<nvidia::gxf::Allocator>::Create( |
61 |
| - fragment()->executor().context(), allocator_->gxf_cid()); |
62 |
| - |
63 |
| - // cast Holoscan::Tensor to nvidia::gxf::Tensor to use its APIs directly |
64 |
| - nvidia::gxf::Tensor input_tensor_gxf{input_tensor->dl_ctx()}; |
65 |
| - |
66 |
| - auto out_message = CreateTensorMap( |
67 |
| - context.context(), |
68 |
| - allocator.value(), |
69 |
| - {{"output", |
70 |
| - nvidia::gxf::MemoryStorageType::kDevice, |
71 |
| - input_tensor_gxf.shape(), |
72 |
| - nvidia::gxf::PrimitiveType::kUnsigned8, |
73 |
| - 0, |
74 |
| - nvidia::gxf::ComputeTrivialStrides( |
75 |
| - input_tensor_gxf.shape(), |
76 |
| - nvidia::gxf::PrimitiveTypeSize(nvidia::gxf::PrimitiveType::kUnsigned8))}}, |
77 |
| - false); |
78 |
| - |
79 |
| - if (!out_message) { std::runtime_error("failed to create out_message"); } |
80 |
| - const auto output_tensor = out_message.value().get<nvidia::gxf::Tensor>(); |
81 |
| - if (!output_tensor) { std::runtime_error("failed to create out_tensor"); } |
82 |
| - |
83 |
| - uint8_t* input_tensor_data = static_cast<uint8_t*>(input_tensor->data()); |
84 |
| - uint8_t* output_tensor_data = static_cast<uint8_t*>(output_tensor.value()->pointer()); |
85 |
| - if (output_tensor_data == nullptr) { |
86 |
| - throw std::runtime_error("Failed to allocate memory for the output image"); |
87 |
| - } |
88 |
| - |
89 |
| - const int32_t imageWidth{static_cast<int32_t>(input_tensor->shape()[1])}; |
90 |
| - const int32_t imageHeight{static_cast<int32_t>(input_tensor->shape()[0])}; |
91 |
| - const int32_t inputLinePitch{static_cast<int32_t>(input_tensor->shape()[1])}; |
92 |
| - const int32_t outputLinePitch{static_cast<int32_t>(input_tensor->shape()[1])}; |
93 |
| - |
94 |
| - if (!pvaOperatorTask_.isInitialized()) { |
95 |
| - pvaOperatorTask_.init(imageWidth, imageHeight, inputLinePitch, outputLinePitch); |
96 |
| - } |
97 |
| - pvaOperatorTask_.process(input_tensor_data, output_tensor_data); |
98 |
| - auto result = gxf::Entity(std::move(out_message.value())); |
99 |
| - |
100 |
| - op_output.emit(result, "output"); |
| 85 | + |
| 86 | + const int32_t imageWidth{static_cast<int32_t>(input_tensor->shape()[1])}; |
| 87 | + const int32_t imageHeight{static_cast<int32_t>(input_tensor->shape()[0])}; |
| 88 | + const int32_t inputLinePitch{static_cast<int32_t>(input_tensor->shape()[1])}; |
| 89 | + const int32_t outputLinePitch{static_cast<int32_t>(input_tensor->shape()[1])}; |
| 90 | + |
| 91 | + if (!pvaOperatorTask_.isInitialized()) { |
| 92 | + pvaOperatorTask_.init(imageWidth, imageHeight, inputLinePitch, outputLinePitch); |
101 | 93 | }
|
| 94 | + pvaOperatorTask_.process(input_tensor_data, output_tensor_data); |
| 95 | + auto result = gxf::Entity(std::move(out_message.value())); |
| 96 | + |
| 97 | + op_output.emit(result, "output"); |
| 98 | + } |
102 | 99 |
|
103 |
| -private: |
104 |
| - Parameter<std::shared_ptr<Allocator>> allocator_; |
105 |
| - PvaUnsharpMask pvaOperatorTask_; |
| 100 | + private: |
| 101 | + Parameter<std::shared_ptr<Allocator>> allocator_; |
| 102 | + PvaUnsharpMask pvaOperatorTask_; |
106 | 103 | };
|
107 |
| -} // namespace holoscan::ops |
108 |
| - |
109 |
| -class App : public holoscan::Application |
110 |
| -{ |
111 |
| -public: |
112 |
| - void compose() override |
113 |
| - { |
114 |
| - using namespace holoscan; |
115 |
| - |
116 |
| - uint32_t max_width{1920}; |
117 |
| - uint32_t max_height{1080}; |
118 |
| - int64_t source_block_size = max_width * max_height * 3 * 4; |
119 |
| - |
120 |
| - std::shared_ptr<BlockMemoryPool> pva_allocator = |
121 |
| - make_resource<BlockMemoryPool>("allocator", 1, source_block_size, 1); |
122 |
| - |
123 |
| - auto precompiledpva = make_operator<ops::PreCompiledPVAExecutor>( |
124 |
| - "precompiledpva", Arg("allocator") = pva_allocator); |
125 |
| - |
126 |
| - auto source = make_operator<ops::VideoStreamReplayerOp>("replayer", from_config("replayer")); |
127 |
| - |
128 |
| - auto recorder = make_operator<ops::VideoStreamRecorderOp>("recorder", from_config("recorder")); |
129 |
| - auto visualizer1 = |
130 |
| - make_operator<ops::HolovizOp>("holoviz1", |
131 |
| - from_config("holoviz"), |
132 |
| - Arg("window_title") = std::string("Original Stream")); |
133 |
| - auto visualizer2 = make_operator<ops::HolovizOp>( |
134 |
| - "holoviz2", |
135 |
| - from_config("holoviz"), |
136 |
| - Arg("window_title") = std::string("Image Sharpened Stream")); |
137 |
| - |
138 |
| - add_flow(source, precompiledpva); |
139 |
| - add_flow(source, visualizer1, {{"output", "receivers"}}); |
140 |
| - // add_flow(precompiledpva, recorder); |
141 |
| - add_flow(precompiledpva, visualizer2, {{"output", "receivers"}}); |
142 |
| - } |
| 104 | +} // namespace holoscan::ops |
| 105 | + |
| 106 | +class App : public holoscan::Application { |
| 107 | + public: |
| 108 | + void compose() override { |
| 109 | + using namespace holoscan; |
| 110 | + |
| 111 | + uint32_t max_width{1920}; |
| 112 | + uint32_t max_height{1080}; |
| 113 | + int64_t source_block_size = max_width * max_height * 3 * 4; |
| 114 | + |
| 115 | + std::shared_ptr<BlockMemoryPool> pva_allocator = |
| 116 | + make_resource<BlockMemoryPool>("allocator", 1, source_block_size, 1); |
| 117 | + |
| 118 | + auto precompiledpva = make_operator<ops::PreCompiledPVAExecutor>( |
| 119 | + "precompiledpva", Arg("allocator") = pva_allocator); |
| 120 | + |
| 121 | + auto source = make_operator<ops::VideoStreamReplayerOp>("replayer", from_config("replayer")); |
| 122 | + |
| 123 | + auto recorder = make_operator<ops::VideoStreamRecorderOp>("recorder", from_config("recorder")); |
| 124 | + auto visualizer1 = make_operator<ops::HolovizOp>( |
| 125 | + "holoviz1", from_config("holoviz"), Arg("window_title") = std::string("Original Stream")); |
| 126 | + auto visualizer2 = |
| 127 | + make_operator<ops::HolovizOp>("holoviz2", |
| 128 | + from_config("holoviz"), |
| 129 | + Arg("window_title") = std::string("Image Sharpened Stream")); |
| 130 | + |
| 131 | + add_flow(source, precompiledpva); |
| 132 | + add_flow(source, visualizer1, {{"output", "receivers"}}); |
| 133 | + // add_flow(precompiledpva, recorder); |
| 134 | + add_flow(precompiledpva, visualizer2, {{"output", "receivers"}}); |
| 135 | + } |
143 | 136 | };
|
144 | 137 |
|
145 |
| -int main(int argc, char **argv) |
146 |
| -{ |
147 |
| - auto app = holoscan::make_application<App>(); |
| 138 | +int main(int argc, char** argv) { |
| 139 | + auto app = holoscan::make_application<App>(); |
148 | 140 |
|
149 |
| - auto config_path = std::filesystem::canonical(argv[0]).parent_path(); |
150 |
| - config_path += "/main.yaml"; |
151 |
| - app->config(config_path); |
| 141 | + auto config_path = std::filesystem::canonical(argv[0]).parent_path(); |
| 142 | + config_path += "/main.yaml"; |
| 143 | + app->config(config_path); |
152 | 144 |
|
153 |
| - app->run(); |
| 145 | + app->run(); |
154 | 146 |
|
155 |
| - return 0; |
| 147 | + return 0; |
156 | 148 | }
|
0 commit comments