Skip to content

Commit

Permalink
Commit version 2.4.4
Browse files Browse the repository at this point in the history
Commit version 2.4.4
  • Loading branch information
mencagli committed Dec 29, 2019
1 parent 08ab72d commit bc7867b
Show file tree
Hide file tree
Showing 21 changed files with 705 additions and 158 deletions.
Binary file modified .DS_Store
Binary file not shown.
20 changes: 18 additions & 2 deletions wf/accumulator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ class Accumulator: public ff::ff_farm
using key_t = typename std::remove_reference<decltype(std::get<0>(tmp.getControlFields()))>::type;
// friendships with other classes in the library
friend class MultiPipe;
bool used; // true if the operator has been added/chained in a MultiPipe
// class Accumulator_Node
class Accumulator_Node: public ff::ff_node_t<tuple_t, result_t>
{
Expand Down Expand Up @@ -233,7 +234,7 @@ class Accumulator: public ff::ff_farm
size_t _pardegree,
std::string _name,
closing_func_t _closing_func,
routing_func_t _routing_func)
routing_func_t _routing_func): used(false)
{
// check the validity of the parallelism degree
if (_pardegree == 0) {
Expand Down Expand Up @@ -269,7 +270,7 @@ class Accumulator: public ff::ff_farm
size_t _pardegree,
std::string _name,
closing_func_t _closing_func,
routing_func_t _routing_func)
routing_func_t _routing_func): used(false)
{
// check the validity of the parallelism degree
if (_pardegree == 0) {
Expand All @@ -289,6 +290,21 @@ class Accumulator: public ff::ff_farm
// when the Accumulator will be destroyed we need aslo to destroy the emitter, workers and collector
ff::ff_farm::cleanup_all();
}

/**
* \brief Check whether the Accumulator has been used in a MultiPipe
* \return true if the Accumulator has been added/chained to an existing MultiPipe
*/
bool isUsed() const
{
return used;
}

/// deleted constructors/operators
Accumulator(const Accumulator &) = delete; // copy constructor
Accumulator(Accumulator &&) = delete; // move constructor
Accumulator &operator=(const Accumulator &) = delete; // copy assignment operator
Accumulator &operator=(Accumulator &&) = delete; // move assignment operator
};

} // namespace wf
Expand Down
6 changes: 3 additions & 3 deletions wf/basic.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -70,9 +70,9 @@ inline unsigned long current_time_nsecs()
}

/// utility macros
#define DEFAULT_COLOR_VECTOR_CAPACITY 500 //< default capacity of vectors used internally by the library
#define DEFAULT_COLOR_BATCH_SIZE_TB 1000 //< inital batch size (in no. of tuples) used by GPU operators with time-based windows
#define DEFAULT_COLOR_CUDA_NUM_THREAD_BLOCK 256 //< default number of threads per block used by GPU operators
#define DEFAULT_VECTOR_CAPACITY 500 //< default capacity of vectors used internally by the library
#define DEFAULT_BATCH_SIZE_TB 1000 //< inital batch size (in no. of tuples) used by GPU operators with time-based windows
#define DEFAULT_CUDA_NUM_THREAD_BLOCK 256 //< default number of threads per block used by GPU operators
#define gpuErrChk(ans) { gpuAssert((ans), __FILE__, __LINE__); }

// supported processing modes of the PipeGraph
Expand Down
72 changes: 36 additions & 36 deletions wf/builders.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -755,7 +755,7 @@ class WinSeqGPU_Builder
uint64_t slide_len = 1;
win_type_t winType = CB;
size_t batch_len = 1;
size_t n_thread_block = DEFAULT_COLOR_CUDA_NUM_THREAD_BLOCK;
size_t n_thread_block = DEFAULT_CUDA_NUM_THREAD_BLOCK;
std::string name = "anonymous_seq_gpu";
size_t scratchpad_size = 0;

Expand Down Expand Up @@ -804,7 +804,7 @@ class WinSeqGPU_Builder
* \param _n_thread_block number of threads per block
* \return the object itself
*/
WinSeqGPU_Builder<F_t>& withBatch(size_t _batch_len, size_t _n_thread_block=DEFAULT_COLOR_CUDA_NUM_THREAD_BLOCK)
WinSeqGPU_Builder<F_t>& withBatch(size_t _batch_len, size_t _n_thread_block=DEFAULT_CUDA_NUM_THREAD_BLOCK)
{
batch_len = _batch_len;
n_thread_block = _n_thread_block;
Expand Down Expand Up @@ -867,9 +867,9 @@ template<typename T>
class WinFarm_Builder
{
private:
T input;
T &input;
// type of the operator to be created by this builder
using winfarm_t = decltype(get_WF_nested_type(input));
using winfarm_t = std::remove_reference_t<decltype(*get_WF_nested_type(input))>;
// type of the closing function
using closing_func_t = std::function<void(RuntimeContext&)>;
uint64_t win_len = 1;
Expand All @@ -882,7 +882,7 @@ class WinFarm_Builder

// window parameters initialization (input is a Pane_Farm)
template<typename ...Args>
void initWindowConf(Pane_Farm<Args...> _pf)
void initWindowConf(Pane_Farm<Args...> &_pf)
{
win_len = _pf.win_len;
slide_len = _pf.slide_len;
Expand All @@ -891,7 +891,7 @@ class WinFarm_Builder

// window parameters initialization (input is a Win_MapReduce)
template<typename ...Args>
void initWindowConf(Win_MapReduce<Args...> _wm)
void initWindowConf(Win_MapReduce<Args...> &_wm)
{
win_len = _wm.win_len;
slide_len = _wm.slide_len;
Expand All @@ -900,7 +900,7 @@ class WinFarm_Builder

// window parameters initialization (input is a function)
template<typename T2>
void initWindowConf(T2 f)
void initWindowConf(T2 &f)
{
win_len = 1;
slide_len = 1;
Expand All @@ -913,7 +913,7 @@ class WinFarm_Builder
*
* \param _input can be either a function or an already instantiated Pane_Farm or Win_MapReduce operator.
*/
WinFarm_Builder(T _input): input(_input)
WinFarm_Builder(T &_input): input(_input)
{
initWindowConf(input);
}
Expand Down Expand Up @@ -1042,22 +1042,22 @@ template<typename T>
class WinFarmGPU_Builder
{
private:
T input;
T &input;
// type of the operator to be created by this builder
using winfarm_gpu_t = decltype(get_WF_GPU_nested_type(input));
using winfarm_gpu_t = std::remove_reference_t<decltype(*get_WF_GPU_nested_type(input))>;
uint64_t win_len = 1;
uint64_t slide_len = 1;
win_type_t winType = CB;
size_t pardegree = 1;
size_t batch_len = 1;
size_t n_thread_block = DEFAULT_COLOR_CUDA_NUM_THREAD_BLOCK;
size_t n_thread_block = DEFAULT_CUDA_NUM_THREAD_BLOCK;
std::string name = "anonymous_wf_gpu";
size_t scratchpad_size = 0;
opt_level_t opt_level = LEVEL2;

// window parameters initialization (input is a Pane_Farm_GPU)
template<typename ...Args>
void initWindowConf(Pane_Farm_GPU<Args...> _pf)
void initWindowConf(Pane_Farm_GPU<Args...> &_pf)
{
win_len = _pf.win_len;
slide_len = _pf.slide_len;
Expand All @@ -1068,7 +1068,7 @@ class WinFarmGPU_Builder

// window parameters initialization (input is a Win_MapReduce_GPU)
template<typename ...Args>
void initWindowConf(Win_MapReduce_GPU<Args...> _wm)
void initWindowConf(Win_MapReduce_GPU<Args...> &_wm)
{
win_len = _wm.win_len;
slide_len = _wm.slide_len;
Expand All @@ -1079,13 +1079,13 @@ class WinFarmGPU_Builder

// window parameters initialization (input is a function)
template<typename T2>
void initWindowConf(T2 f)
void initWindowConf(T2 &f)
{
win_len = 1;
slide_len = 1;
winType = CB;
batch_len = 1;
n_thread_block = DEFAULT_COLOR_CUDA_NUM_THREAD_BLOCK;
n_thread_block = DEFAULT_CUDA_NUM_THREAD_BLOCK;
}

public:
Expand All @@ -1094,7 +1094,7 @@ class WinFarmGPU_Builder
*
* \param _input can be either a host/device function or an already instantiated Pane_Farm_GPU or Win_MapReduce_GPU operator.
*/
WinFarmGPU_Builder(T _input): input(_input) {
WinFarmGPU_Builder(T &_input): input(_input) {
initWindowConf(input);
}

Expand Down Expand Up @@ -1147,7 +1147,7 @@ class WinFarmGPU_Builder
* \param _n_thread_block number of threads per block
* \return the object itself
*/
WinFarmGPU_Builder<T>& withBatch(size_t _batch_len, size_t _n_thread_block=DEFAULT_COLOR_CUDA_NUM_THREAD_BLOCK)
WinFarmGPU_Builder<T>& withBatch(size_t _batch_len, size_t _n_thread_block=DEFAULT_CUDA_NUM_THREAD_BLOCK)
{
batch_len = _batch_len;
n_thread_block = _n_thread_block;
Expand Down Expand Up @@ -1222,9 +1222,9 @@ template<typename T>
class KeyFarm_Builder
{
private:
T input;
T &input;
// type of the operator to be created by this builder
using keyfarm_t = decltype(get_KF_nested_type(input));
using keyfarm_t = std::remove_reference_t<decltype(*get_KF_nested_type(input))>;
// type of the closing function
using closing_func_t = std::function<void(RuntimeContext&)>;
// type of the function to map the key hashcode onto an identifier starting from zero to pardegree-1
Expand All @@ -1240,7 +1240,7 @@ class KeyFarm_Builder

// window parameters initialization (input is a Pane_Farm)
template<typename ...Args>
void initWindowConf(Pane_Farm<Args...> _pf)
void initWindowConf(Pane_Farm<Args...> &_pf)
{
win_len = _pf.win_len;
slide_len = _pf.slide_len;
Expand All @@ -1249,7 +1249,7 @@ class KeyFarm_Builder

// window parameters initialization (input is a Win_MapReduce)
template<typename ...Args>
void initWindowConf(Win_MapReduce<Args...> _wm)
void initWindowConf(Win_MapReduce<Args...> &_wm)
{
win_len = _wm.win_len;
slide_len = _wm.slide_len;
Expand All @@ -1258,7 +1258,7 @@ class KeyFarm_Builder

// window parameters initialization (input is a function)
template<typename T2>
void initWindowConf(T2 f)
void initWindowConf(T2 &f)
{
win_len = 1;
slide_len = 1;
Expand All @@ -1271,7 +1271,7 @@ class KeyFarm_Builder
*
* \param _input can be either a function or an already instantiated Pane_Farm or Win_MapReduce operator.
*/
KeyFarm_Builder(T _input): input(_input)
KeyFarm_Builder(T &_input): input(_input)
{
initWindowConf(input);
}
Expand Down Expand Up @@ -1398,25 +1398,25 @@ template<typename T>
class KeyFarmGPU_Builder
{
private:
T input;
T &input;
// type of the function to map the key hashcode onto an identifier starting from zero to pardegree-1
using routing_func_t = std::function<size_t(size_t, size_t)>;
// type of the operator to be created by this builder
using keyfarm_gpu_t = decltype(get_KF_GPU_nested_type(input));
using keyfarm_gpu_t = std::remove_reference_t<decltype(*get_KF_GPU_nested_type(input))>;
uint64_t win_len = 1;
uint64_t slide_len = 1;
win_type_t winType = CB;
size_t pardegree = 1;
size_t batch_len = 1;
size_t n_thread_block = DEFAULT_COLOR_CUDA_NUM_THREAD_BLOCK;
size_t n_thread_block = DEFAULT_CUDA_NUM_THREAD_BLOCK;
std::string name = "anonymous_wf_gpu";
size_t scratchpad_size = 0;
routing_func_t routing_func = [](size_t k, size_t n) { return k%n; };
opt_level_t opt_level = LEVEL2;

// window parameters initialization (input is a Pane_Farm_GPU)
template<typename ...Args>
void initWindowConf(Pane_Farm_GPU<Args...> _pf)
void initWindowConf(Pane_Farm_GPU<Args...> &_pf)
{
win_len = _pf.win_len;
slide_len = _pf.slide_len;
Expand All @@ -1427,7 +1427,7 @@ class KeyFarmGPU_Builder

// window parameters initialization (input is a Win_MapReduce_GPU)
template<typename ...Args>
void initWindowConf(Win_MapReduce_GPU<Args...> _wm)
void initWindowConf(Win_MapReduce_GPU<Args...> &_wm)
{
win_len = _wm.win_len;
slide_len = _wm.slide_len;
Expand All @@ -1438,13 +1438,13 @@ class KeyFarmGPU_Builder

// window parameters initialization (input is a function)
template<typename T2>
void initWindowConf(T2 f)
void initWindowConf(T2 &f)
{
win_len = 1;
slide_len = 1;
winType = CB;
batch_len = 1;
n_thread_block = DEFAULT_COLOR_CUDA_NUM_THREAD_BLOCK;
n_thread_block = DEFAULT_CUDA_NUM_THREAD_BLOCK;
}

public:
Expand All @@ -1453,7 +1453,7 @@ class KeyFarmGPU_Builder
*
* \param _input can be either a host/device function or an already instantiated Pane_Farm_GPU or Win_MapReduce_GPU operator.
*/
KeyFarmGPU_Builder(T _input): input(_input) {
KeyFarmGPU_Builder(T &_input): input(_input) {
initWindowConf(input);
}

Expand Down Expand Up @@ -1506,7 +1506,7 @@ class KeyFarmGPU_Builder
* \param _n_thread_block number of threads per block
* \return the object itself
*/
KeyFarmGPU_Builder<T>& withBatch(size_t _batch_len, size_t _n_thread_block=DEFAULT_COLOR_CUDA_NUM_THREAD_BLOCK)
KeyFarmGPU_Builder<T>& withBatch(size_t _batch_len, size_t _n_thread_block=DEFAULT_CUDA_NUM_THREAD_BLOCK)
{
batch_len = _batch_len;
n_thread_block = _n_thread_block;
Expand Down Expand Up @@ -1751,7 +1751,7 @@ class PaneFarmGPU_Builder
size_t plq_degree = 1;
size_t wlq_degree = 1;
size_t batch_len = 1;
size_t n_thread_block = DEFAULT_COLOR_CUDA_NUM_THREAD_BLOCK;
size_t n_thread_block = DEFAULT_CUDA_NUM_THREAD_BLOCK;
std::string name = "anonymous_pf_gpu";
size_t scratchpad_size = 0;
opt_level_t opt_level = LEVEL0;
Expand Down Expand Up @@ -1818,7 +1818,7 @@ class PaneFarmGPU_Builder
* \param _n_thread_block number of threads per block
* \return the object itself
*/
PaneFarmGPU_Builder<F_t, G_t>& withBatch(size_t _batch_len, size_t _n_thread_block=DEFAULT_COLOR_CUDA_NUM_THREAD_BLOCK)
PaneFarmGPU_Builder<F_t, G_t>& withBatch(size_t _batch_len, size_t _n_thread_block=DEFAULT_CUDA_NUM_THREAD_BLOCK)
{
batch_len = _batch_len;
n_thread_block = _n_thread_block;
Expand Down Expand Up @@ -2075,7 +2075,7 @@ class WinMapReduceGPU_Builder
size_t map_degree = 2;
size_t reduce_degree = 1;
size_t batch_len = 1;
size_t n_thread_block = DEFAULT_COLOR_CUDA_NUM_THREAD_BLOCK;
size_t n_thread_block = DEFAULT_CUDA_NUM_THREAD_BLOCK;
std::string name = "anonymous_wmw_gpu";
size_t scratchpad_size = 0;
opt_level_t opt_level = LEVEL0;
Expand Down Expand Up @@ -2142,7 +2142,7 @@ class WinMapReduceGPU_Builder
* \param _n_thread_block number of threads per block
* \return the object itself
*/
WinMapReduceGPU_Builder<F_t, G_t>& withBatch(size_t _batch_len, size_t _n_thread_block=DEFAULT_COLOR_CUDA_NUM_THREAD_BLOCK)
WinMapReduceGPU_Builder<F_t, G_t>& withBatch(size_t _batch_len, size_t _n_thread_block=DEFAULT_CUDA_NUM_THREAD_BLOCK)
{
batch_len = _batch_len;
n_thread_block = _n_thread_block;
Expand Down
Loading

0 comments on commit bc7867b

Please sign in to comment.