diff --git a/studies/cuda-kernels/reducers/awkward_reduce_argmax_tree_reduction.py b/studies/cuda-kernels/reducers/awkward_reduce_argmax_tree_reduction.py new file mode 100644 index 0000000000..011dd3b594 --- /dev/null +++ b/studies/cuda-kernels/reducers/awkward_reduce_argmax_tree_reduction.py @@ -0,0 +1,90 @@ +import cupy as cp + +cuda_kernel = """ +extern "C" { + __global__ void awkward_reduce_argmax_a(int* toptr, int* fromptr, int* parents, int lenparents, int outlength, int* partial) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + toptr[thread_id] = -1; + } + } +} + +extern "C" { + __global__ void awkward_reduce_argmax_b(int* toptr, int* fromptr, int* parents, int lenparents, int outlength, int* partial) { + extern __shared__ int shared[]; + + int idx = threadIdx.x; + int thread_id = blockIdx.x * blockDim.x + idx; + + if (thread_id < lenparents) { + shared[idx] = thread_id; + } else { + shared[idx] = -1; + } + __syncthreads(); + + for (int stride = 1; stride < blockDim.x; stride *= 2) { + int index = -1; + if (idx >= stride && thread_id < lenparents && parents[thread_id] == parents[thread_id - stride]) { + index = shared[idx - stride]; + } + if (index != -1 && (shared[idx] == -1 || fromptr[index] > fromptr[shared[idx]])) { + shared[idx] = index; + } + __syncthreads(); + } + + if (thread_id < lenparents) { + int parent = parents[thread_id]; + if (idx == blockDim.x - 1 || thread_id == lenparents - 1 || parents[thread_id] != parents[thread_id + 1]) { + partial[blockIdx.x * outlength + parent] = shared[idx]; + } + } + } +} + +extern "C" { + __global__ void awkward_reduce_argmax_c(int* toptr, int* fromptr, int* parents, int lenparents, int outlength, int* partial) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + int max_index = -1; + int blocks = (lenparents + blockDim.x - 1) / blockDim.x; + for (int i = 0; i < blocks; ++i) { + int index = partial[i * outlength + thread_id]; + if (index != -1 && (max_index == -1 || fromptr[index] > fromptr[max_index])) { + max_index = index; + } + } + toptr[thread_id] = max_index; + } + } +} +""" + +parents = cp.array([0, 1, 1, 2, 2, 2, 2, 2, 2, 5], dtype=cp.int32) +fromptr = cp.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=cp.int32) +lenparents = len(parents) +outlength = int(cp.max(parents)) + 1 +toptr = cp.full(outlength, -1, dtype=cp.int32) + + +block_size = [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] +for i in range (len(block_size)): + partial = cp.full((outlength * ((lenparents + block_size[i] - 1) // block_size[i])), -1, dtype=cp.int32) + grid_size = (lenparents + block_size[i] - 1) // block_size[i] + shared_mem_size = block_size[i] * cp.int32().nbytes + + raw_module = cp.RawModule(code=cuda_kernel) + + awkward_reduce_argmax_a = raw_module.get_function('awkward_reduce_argmax_a') + awkward_reduce_argmax_b = raw_module.get_function('awkward_reduce_argmax_b') + awkward_reduce_argmax_c = raw_module.get_function('awkward_reduce_argmax_c') + + awkward_reduce_argmax_a((grid_size,), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial)) + awkward_reduce_argmax_b((grid_size,), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial), shared_mem=shared_mem_size) + awkward_reduce_argmax_c(((outlength + block_size[i] - 1) // block_size[i],), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial)) + + assert cp.array_equal(toptr, cp.array([0, 2, 8, -1, -1, 9])) \ No newline at end of file diff --git a/studies/cuda-kernels/reducers/awkward_reduce_argmin_tree_reduction.py b/studies/cuda-kernels/reducers/awkward_reduce_argmin_tree_reduction.py new file mode 100644 index 0000000000..972dbaf4dc --- /dev/null +++ b/studies/cuda-kernels/reducers/awkward_reduce_argmin_tree_reduction.py @@ -0,0 +1,90 @@ +import cupy as cp + +cuda_kernel = """ +extern "C" { + __global__ void awkward_reduce_argmin_a(int* toptr, int* fromptr, int* parents, int lenparents, int outlength, int* partial) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + toptr[thread_id] = -1; + } + } +} + +extern "C" { + __global__ void awkward_reduce_argmin_b(int* toptr, int* fromptr, int* parents, int lenparents, int outlength, int* partial) { + extern __shared__ int shared[]; + + int idx = threadIdx.x; + int thread_id = blockIdx.x * blockDim.x + idx; + + if (thread_id < lenparents) { + shared[idx] = thread_id; + } else { + shared[idx] = -1; + } + __syncthreads(); + + for (int stride = 1; stride < blockDim.x; stride *= 2) { + int index = -1; + if (idx >= stride && thread_id < lenparents && parents[thread_id] == parents[thread_id - stride]) { + index = shared[idx - stride]; + } + if (index != -1 && (shared[idx] == -1 || fromptr[index] < fromptr[shared[idx]])) { + shared[idx] = index; + } + __syncthreads(); + } + + if (thread_id < lenparents) { + int parent = parents[thread_id]; + if (idx == blockDim.x - 1 || thread_id == lenparents - 1 || parents[thread_id] != parents[thread_id + 1]) { + partial[blockIdx.x * outlength + parent] = shared[idx]; + } + } + } +} + +extern "C" { + __global__ void awkward_reduce_argmin_c(int* toptr, int* fromptr, int* parents, int lenparents, int outlength, int* partial) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + int min_index = -1; + int blocks = (lenparents + blockDim.x - 1) / blockDim.x; + for (int i = 0; i < blocks; ++i) { + int index = partial[i * outlength + thread_id]; + if (index != -1 && (min_index == -1 || fromptr[index] < fromptr[min_index])) { + min_index = index; + } + } + toptr[thread_id] = min_index; + } + } +} +""" + +parents = cp.array([0, 1, 1, 2, 2, 2, 2, 2, 2, 5], dtype=cp.int32) +fromptr = cp.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=cp.int32) +lenparents = len(parents) +outlength = int(cp.max(parents)) + 1 +toptr = cp.full(outlength, -1, dtype=cp.int32) + + +block_size = [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] +for i in range (len(block_size)): + partial = cp.full((outlength * ((lenparents + block_size[i] - 1) // block_size[i])), -1, dtype=cp.int32) + grid_size = (lenparents + block_size[i] - 1) // block_size[i] + shared_mem_size = block_size[i] * cp.int32().nbytes + + raw_module = cp.RawModule(code=cuda_kernel) + + awkward_reduce_argmin_a = raw_module.get_function('awkward_reduce_argmin_a') + awkward_reduce_argmin_b = raw_module.get_function('awkward_reduce_argmin_b') + awkward_reduce_argmin_c = raw_module.get_function('awkward_reduce_argmin_c') + + awkward_reduce_argmin_a((grid_size,), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial)) + awkward_reduce_argmin_b((grid_size,), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial), shared_mem=shared_mem_size) + awkward_reduce_argmin_c(((outlength + block_size[i] - 1) // block_size[i],), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial)) + + assert cp.array_equal(toptr, cp.array([0, 1, 3, -1, -1, 9])) \ No newline at end of file diff --git a/studies/cuda-kernels/reducers/awkward_reduce_count_64_tree_reduction.py b/studies/cuda-kernels/reducers/awkward_reduce_count_64_tree_reduction.py new file mode 100644 index 0000000000..0c894fee35 --- /dev/null +++ b/studies/cuda-kernels/reducers/awkward_reduce_count_64_tree_reduction.py @@ -0,0 +1,84 @@ +import cupy as cp + +cuda_kernel = """ +extern "C" { + __global__ void awkward_reduce_countnonzero_a(int* toptr, int* fromptr, int* parents, int lenparents, int outlength, int* partial) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + toptr[thread_id] = 0; + } + } +} + +extern "C" { + __global__ void awkward_reduce_countnonzero_b(int* toptr, int* fromptr, int* parents, int lenparents, int outlength, int* partial) { + extern __shared__ int shared[]; + + int idx = threadIdx.x; + int thread_id = blockIdx.x * blockDim.x + idx; + + if (thread_id < lenparents) { + shared[idx] = 1; + } else { + shared[idx] = 0; + } + __syncthreads(); + + for (int stride = 1; stride < blockDim.x; stride *= 2) { + int val = 0; + if (idx >= stride && thread_id < lenparents && parents[thread_id] == parents[thread_id - stride]) { + val = shared[idx - stride]; + } + shared[idx] += val; + __syncthreads(); + } + + if (thread_id < lenparents) { + int parent = parents[thread_id]; + if (idx == blockDim.x - 1 || thread_id == lenparents - 1 || parents[thread_id] != parents[thread_id + 1]) { + partial[blockIdx.x * outlength + parent] = shared[idx]; + } + } + } +} + +extern "C" { + __global__ void awkward_reduce_countnonzero_c(int* toptr, int* fromptr, int* parents, int lenparents, int outlength, int* partial) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + int countnonzero = 0; + int blocks = (lenparents + blockDim.x - 1) / blockDim.x; + for (int i = 0; i < blocks; ++i) { + countnonzero += partial[i * outlength + thread_id]; + } + toptr[thread_id] = countnonzero; + } + } +} +""" + +parents = cp.array([0, 1, 1, 2, 2, 2, 2, 2, 2, 5], dtype=cp.int32) +fromptr = cp.array([1, 2, 3, 0, 5, 6, 0, 8, 9, 0], dtype=cp.int32) +lenparents = len(parents) +outlength = int(cp.max(parents)) + 1 +toptr = cp.zeros(outlength, dtype=cp.int32) + +block_size = [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] +for i in range (len(block_size)): + partial = cp.zeros((outlength * ((lenparents + block_size[i] - 1) // block_size[i])), dtype=cp.int32) + grid_size = (lenparents + block_size[i] - 1) // block_size[i] + shared_mem_size = block_size[i] * cp.int32().nbytes + + raw_module = cp.RawModule(code=cuda_kernel) + + awkward_reduce_countnonzero_a = raw_module.get_function('awkward_reduce_countnonzero_a') + awkward_reduce_countnonzero_b = raw_module.get_function('awkward_reduce_countnonzero_b') + awkward_reduce_countnonzero_c = raw_module.get_function('awkward_reduce_countnonzero_c') + + awkward_reduce_countnonzero_a((grid_size,), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial)) + awkward_reduce_countnonzero_b((grid_size,), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial), shared_mem=shared_mem_size) + awkward_reduce_countnonzero_c(((outlength + block_size[i] - 1) // block_size[i],), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial)) + + assert cp.array_equal(toptr, cp.array([1, 2, 6, 0, 0, 1])) \ No newline at end of file diff --git a/studies/cuda-kernels/reducers/awkward_reduce_countnonzero_tree_reduction.py b/studies/cuda-kernels/reducers/awkward_reduce_countnonzero_tree_reduction.py new file mode 100644 index 0000000000..774c3531b5 --- /dev/null +++ b/studies/cuda-kernels/reducers/awkward_reduce_countnonzero_tree_reduction.py @@ -0,0 +1,84 @@ +import cupy as cp + +cuda_kernel = """ +extern "C" { + __global__ void awkward_reduce_countnonzero_a(int* toptr, int* fromptr, int* parents, int lenparents, int outlength, int* partial) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + toptr[thread_id] = 0; + } + } +} + +extern "C" { + __global__ void awkward_reduce_countnonzero_b(int* toptr, int* fromptr, int* parents, int lenparents, int outlength, int* partial) { + extern __shared__ int shared[]; + + int idx = threadIdx.x; + int thread_id = blockIdx.x * blockDim.x + idx; + + if (thread_id < lenparents) { + shared[idx] = (fromptr[thread_id] != 0) ? 1 : 0; + } else { + shared[idx] = 0; + } + __syncthreads(); + + for (int stride = 1; stride < blockDim.x; stride *= 2) { + int val = 0; + if (idx >= stride && thread_id < lenparents && parents[thread_id] == parents[thread_id - stride]) { + val = shared[idx - stride]; + } + shared[idx] += val; + __syncthreads(); + } + + if (thread_id < lenparents) { + int parent = parents[thread_id]; + if (idx == blockDim.x - 1 || thread_id == lenparents - 1 || parents[thread_id] != parents[thread_id + 1]) { + partial[blockIdx.x * outlength + parent] = shared[idx]; + } + } + } +} + +extern "C" { + __global__ void awkward_reduce_countnonzero_c(int* toptr, int* fromptr, int* parents, int lenparents, int outlength, int* partial) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + int countnonzero = 0; + int blocks = (lenparents + blockDim.x - 1) / blockDim.x; + for (int i = 0; i < blocks; ++i) { + countnonzero += partial[i * outlength + thread_id]; + } + toptr[thread_id] = countnonzero; + } + } +} +""" + +parents = cp.array([0, 1, 1, 2, 2, 2, 2, 2, 2, 5], dtype=cp.int32) +fromptr = cp.array([1, 2, 3, 0, 5, 6, 0, 8, 9, 0], dtype=cp.int32) +lenparents = len(parents) +outlength = int(cp.max(parents)) + 1 +toptr = cp.zeros(outlength, dtype=cp.int32) + +block_size = [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] +for i in range (len(block_size)): + partial = cp.zeros((outlength * ((lenparents + block_size[i] - 1) // block_size[i])), dtype=cp.int32) + grid_size = (lenparents + block_size[i] - 1) // block_size[i] + shared_mem_size = block_size[i] * cp.int32().nbytes + + raw_module = cp.RawModule(code=cuda_kernel) + + awkward_reduce_countnonzero_a = raw_module.get_function('awkward_reduce_countnonzero_a') + awkward_reduce_countnonzero_b = raw_module.get_function('awkward_reduce_countnonzero_b') + awkward_reduce_countnonzero_c = raw_module.get_function('awkward_reduce_countnonzero_c') + + awkward_reduce_countnonzero_a((grid_size,), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial)) + awkward_reduce_countnonzero_b((grid_size,), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial), shared_mem=shared_mem_size) + awkward_reduce_countnonzero_c(((outlength + block_size[i] - 1) // block_size[i],), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial)) + + assert cp.array_equal(toptr, cp.array([1, 2, 4, 0, 0, 0])) \ No newline at end of file diff --git a/studies/cuda-kernels/reducers/awkward_reduce_max_complex_tree_reduction.py b/studies/cuda-kernels/reducers/awkward_reduce_max_complex_tree_reduction.py new file mode 100644 index 0000000000..4d81b60a80 --- /dev/null +++ b/studies/cuda-kernels/reducers/awkward_reduce_max_complex_tree_reduction.py @@ -0,0 +1,98 @@ +import cupy as cp + +cuda_kernel = """ +extern "C" { + __global__ void awkward_reduce_max_complex_a(float* toptr, float* fromptr, int* parents, int lenparents, int outlength, int identity, int* partial) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + toptr[thread_id * 2] = identity; + toptr[thread_id * 2 + 1] = 0; + } + } +} + +extern "C" { + __global__ void awkward_reduce_max_complex_b(float* toptr, float* fromptr, int* parents, int lenparents, int outlength, int identity, int* partial) { + extern __shared__ float shared[]; + + int idx = threadIdx.x; + int thread_id = blockIdx.x * blockDim.x + idx; + + if (thread_id < lenparents) { + shared[idx * 2] = fromptr[thread_id * 2]; + shared[idx * 2 + 1] = fromptr[thread_id * 2 + 1]; + } + __syncthreads(); + + for (int stride = 1; stride < blockDim.x; stride *= 2) { + float real = identity; + float imag = 0; + if (idx >= stride && thread_id < lenparents && parents[thread_id] == parents[thread_id - stride]) { + real = shared[(idx - stride) * 2]; + imag = shared[(idx - stride) * 2 + 1]; + } + __syncthreads(); + if (shared[idx * 2] < real || shared[idx * 2] == real && shared[idx * 2 + 1] < imag) { + shared[idx * 2] = real; + shared[idx * 2 + 1] = imag; + } + __syncthreads(); + } + + if (thread_id < lenparents) { + int parent = parents[thread_id]; + if (idx == blockDim.x - 1 || thread_id == lenparents - 1 || parents[thread_id] != parents[thread_id + 1]) { + partial[(blockIdx.x * outlength + parent) * 2 ] = shared[idx * 2]; + partial[(blockIdx.x * outlength + parent) * 2 + 1] = shared[idx * 2 + 1]; + } + } + } +} + +extern "C" { + __global__ void awkward_reduce_max_complex_c(float* toptr, float* fromptr, int* parents, int lenparents, int outlength, int identity, int* partial) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + float real = identity; + float imag = 0; + int blocks = (lenparents + blockDim.x - 1) / blockDim.x; + for (int i = 0; i < blocks; ++i) { + if (real < partial[2 * (i * outlength + thread_id)] || + (partial[2 * (i * outlength + thread_id)] == real && + imag < partial[2 * (i * outlength + thread_id) + 1])) { + real = partial[(i * outlength + thread_id) * 2]; + imag = partial[(i * outlength + thread_id) * 2 + 1]; + } + } + toptr[thread_id * 2] = real; + toptr[thread_id * 2 + 1] = imag; + } + } +} +""" + +parents = cp.array([0, 1, 1, 2, 2, 2, 2, 2, 2, 5], dtype=cp.int32) +fromptr = cp.array([1, 0, 2, 1, 3, 2, 4, 3, 5, 4, 6, 5, 7, 6, 8, 7, 9, 8, 10, 0], dtype=cp.float32) +lenparents = len(parents) +outlength = int(cp.max(parents)) + 1 +identity = cp.finfo(cp.float32).min +toptr = cp.full(outlength * 2, identity, dtype=cp.float32) +block_size = 2 +partial = cp.full((outlength * ((lenparents + block_size - 1) // block_size)), identity, dtype=cp.float32) +grid_size = (lenparents + block_size - 1) // block_size +shared_mem_size = block_size * cp.float32().nbytes + +raw_module = cp.RawModule(code=cuda_kernel) + +awkward_reduce_max_complex_a = raw_module.get_function('awkward_reduce_max_complex_a') +awkward_reduce_max_complex_b = raw_module.get_function('awkward_reduce_max_complex_b') +awkward_reduce_max_complex_c = raw_module.get_function('awkward_reduce_max_complex_c') + +awkward_reduce_max_complex_a((grid_size,), (block_size,), (toptr, fromptr, parents, lenparents, outlength, identity, partial)) +awkward_reduce_max_complex_b((grid_size,), (block_size,), (toptr, fromptr, parents, lenparents, outlength, identity, partial), shared_mem=shared_mem_size) +awkward_reduce_max_complex_c(((outlength + block_size - 1) // block_size,), (block_size,), (toptr, fromptr, parents, lenparents, outlength, identity, partial)) + +toptr_host = toptr[0::2] + 1j * toptr[1::2] +print("tree reduction toptr:", toptr_host) \ No newline at end of file diff --git a/studies/cuda-kernels/reducers/awkward_reduce_max_tree_reduction.py b/studies/cuda-kernels/reducers/awkward_reduce_max_tree_reduction.py new file mode 100644 index 0000000000..cecc63d415 --- /dev/null +++ b/studies/cuda-kernels/reducers/awkward_reduce_max_tree_reduction.py @@ -0,0 +1,86 @@ +import cupy as cp + +cuda_kernel = """ +extern "C" { + __global__ void awkward_reduce_max_a(int* toptr, int* fromptr, int* parents, int lenparents, int outlength, int identity, int* partial) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + toptr[thread_id] = identity; + } + } +} + +extern "C" { + __global__ void awkward_reduce_max_b(int* toptr, int* fromptr, int* parents, int lenparents, int outlength, int identity, int* partial) { + extern __shared__ int shared[]; + + int idx = threadIdx.x; + int thread_id = blockIdx.x * blockDim.x + idx; + + if (thread_id < lenparents) { + shared[idx] = fromptr[thread_id]; + } else { + shared[idx] = identity; + } + __syncthreads(); + + for (int stride = 1; stride < blockDim.x; stride *= 2) { + int val = identity; + if (idx >= stride && thread_id < lenparents && parents[thread_id] == parents[thread_id - stride]) { + val = shared[idx - stride]; + } + shared[idx] = max(shared[idx], val); + __syncthreads(); + } + + if (thread_id < lenparents) { + int parent = parents[thread_id]; + if (idx == blockDim.x - 1 || thread_id == lenparents - 1 || parents[thread_id] != parents[thread_id + 1]) { + partial[blockIdx.x * outlength + parent] = shared[idx]; + } + } + } +} + +extern "C" { + __global__ void awkward_reduce_max_c(int* toptr, int* fromptr, int* parents, int lenparents, int outlength, int identity, int* partial) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + int maximum = identity; + int blocks = (lenparents + blockDim.x - 1) / blockDim.x; + for (int i = 0; i < blocks; ++i) { + maximum = max(maximum, partial[i * outlength + thread_id]); + } + toptr[thread_id] = maximum; + } + } +} +""" + +parents = cp.array([0, 1, 1, 2, 2, 2, 2, 2, 2, 5], dtype=cp.int32) +fromptr = cp.array([1, -2, -3, 4, 5, 6, 7, 8, 9, 10], dtype=cp.int32) +lenparents = len(parents) +outlength = int(cp.max(parents)) + 1 +identity = cp.iinfo(cp.int32).min +toptr = cp.full(outlength, identity, dtype=cp.int32) + + +block_size = [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] +for i in range (len(block_size)): + partial = cp.full((outlength * ((lenparents + block_size[i] - 1) // block_size[i])), identity, dtype=cp.int32) + grid_size = (lenparents + block_size[i] - 1) // block_size[i] + shared_mem_size = block_size[i] * cp.int32().nbytes + + raw_module = cp.RawModule(code=cuda_kernel) + + awkward_reduce_max_a = raw_module.get_function('awkward_reduce_max_a') + awkward_reduce_max_b = raw_module.get_function('awkward_reduce_max_b') + awkward_reduce_max_c = raw_module.get_function('awkward_reduce_max_c') + + awkward_reduce_max_a((grid_size,), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, identity, partial)) + awkward_reduce_max_b((grid_size,), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, identity, partial), shared_mem=shared_mem_size) + awkward_reduce_max_c(((outlength + block_size[i] - 1) // block_size[i],), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, identity, partial)) + + assert cp.array_equal(toptr, cp.array([1, -2, 9, -2147483648, -2147483648, 10])) \ No newline at end of file diff --git a/studies/cuda-kernels/reducers/awkward_reduce_min_complex_tree_reduction.py b/studies/cuda-kernels/reducers/awkward_reduce_min_complex_tree_reduction.py new file mode 100644 index 0000000000..f9cad9abab --- /dev/null +++ b/studies/cuda-kernels/reducers/awkward_reduce_min_complex_tree_reduction.py @@ -0,0 +1,98 @@ +import cupy as cp + +cuda_kernel = """ +extern "C" { + __global__ void awkward_reduce_min_complex_a(float* toptr, float* fromptr, int* parents, int lenparents, int outlength, int identity, int* partial) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + toptr[thread_id * 2] = identity; + toptr[thread_id * 2 + 1] = 0; + } + } +} + +extern "C" { + __global__ void awkward_reduce_min_complex_b(float* toptr, float* fromptr, int* parents, int lenparents, int outlength, int identity, int* partial) { + extern __shared__ float shared[]; + + int idx = threadIdx.x; + int thread_id = blockIdx.x * blockDim.x + idx; + + if (thread_id < lenparents) { + shared[idx * 2] = fromptr[thread_id * 2]; + shared[idx * 2 + 1] = fromptr[thread_id * 2 + 1]; + } + __syncthreads(); + + for (int stride = 1; stride < blockDim.x; stride *= 2) { + float real = identity; + float imag = 0; + if (idx >= stride && thread_id < lenparents && parents[thread_id] == parents[thread_id - stride]) { + real = shared[(idx - stride) * 2]; + imag = shared[(idx - stride) * 2 + 1]; + } + __syncthreads(); + if (shared[idx * 2] > real || shared[idx * 2] == real && shared[idx * 2 + 1] > imag) { + shared[idx * 2] = real; + shared[idx * 2 + 1] = imag; + } + __syncthreads(); + } + + if (thread_id < lenparents) { + int parent = parents[thread_id]; + if (idx == blockDim.x - 1 || thread_id == lenparents - 1 || parents[thread_id] != parents[thread_id + 1]) { + partial[(blockIdx.x * outlength + parent) * 2 ] = shared[idx * 2]; + partial[(blockIdx.x * outlength + parent) * 2 + 1] = shared[idx * 2 + 1]; + } + } + } +} + +extern "C" { + __global__ void awkward_reduce_min_complex_c(float* toptr, float* fromptr, int* parents, int lenparents, int outlength, int identity, int* partial) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + float real = identity; + float imag = 0; + int blocks = (lenparents + blockDim.x - 1) / blockDim.x; + for (int i = 0; i < blocks; ++i) { + if (real > partial[2 * (i * outlength + thread_id)] || + (partial[2 * (i * outlength + thread_id)] == real && + imag > partial[2 * (i * outlength + thread_id) + 1])) { + real = partial[(i * outlength + thread_id) * 2]; + imag = partial[(i * outlength + thread_id) * 2 + 1]; + } + } + toptr[thread_id * 2] = real; + toptr[thread_id * 2 + 1] = imag; + } + } +} +""" + +parents = cp.array([0, 1, 1, 2, 2, 2, 2, 2, 2, 5], dtype=cp.int32) +fromptr = cp.array([1, 0, 2, 1, 3, 2, 4, 3, 5, 4, 6, 5, 7, 6, 8, 7, 9, 8, 10, 0], dtype=cp.float32) +lenparents = len(parents) +outlength = int(cp.max(parents)) + 1 +identity = 111111 +toptr = cp.full(outlength * 2, identity, dtype=cp.float32) +block_size = 2 +partial = cp.full((outlength * ((lenparents + block_size - 1) // block_size)), identity, dtype=cp.float32) +grid_size = (lenparents + block_size - 1) // block_size +shared_mem_size = block_size * cp.float32().nbytes + +raw_module = cp.RawModule(code=cuda_kernel) + +awkward_reduce_min_complex_a = raw_module.get_function('awkward_reduce_min_complex_a') +awkward_reduce_min_complex_b = raw_module.get_function('awkward_reduce_min_complex_b') +awkward_reduce_min_complex_c = raw_module.get_function('awkward_reduce_min_complex_c') + +awkward_reduce_min_complex_a((grid_size,), (block_size,), (toptr, fromptr, parents, lenparents, outlength, identity, partial)) +awkward_reduce_min_complex_b((grid_size,), (block_size,), (toptr, fromptr, parents, lenparents, outlength, identity, partial), shared_mem=shared_mem_size) +awkward_reduce_min_complex_c(((outlength + block_size - 1) // block_size,), (block_size,), (toptr, fromptr, parents, lenparents, outlength, identity, partial)) + +toptr_host = toptr[0::2] + 1j * toptr[1::2] +print("tree reduction toptr:", toptr_host) \ No newline at end of file diff --git a/studies/cuda-kernels/reducers/awkward_reduce_min_tree_reduction.py b/studies/cuda-kernels/reducers/awkward_reduce_min_tree_reduction.py new file mode 100644 index 0000000000..fcc3d7e133 --- /dev/null +++ b/studies/cuda-kernels/reducers/awkward_reduce_min_tree_reduction.py @@ -0,0 +1,85 @@ +import cupy as cp + +cuda_kernel = """ +extern "C" { + __global__ void awkward_reduce_min_a(int* toptr, int* fromptr, int* parents, int lenparents, int outlength, int identity, int* partial) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + toptr[thread_id] = identity; + } + } +} + +extern "C" { + __global__ void awkward_reduce_min_b(int* toptr, int* fromptr, int* parents, int lenparents, int outlength, int identity, int* partial) { + extern __shared__ int shared[]; + + int idx = threadIdx.x; + int thread_id = blockIdx.x * blockDim.x + idx; + + if (thread_id < lenparents) { + shared[idx] = fromptr[thread_id]; + } else { + shared[idx] = identity; + } + __syncthreads(); + + for (int stride = 1; stride < blockDim.x; stride *= 2) { + int val = identity; + if (idx >= stride && thread_id < lenparents && parents[thread_id] == parents[thread_id - stride]) { + val = shared[idx - stride]; + } + shared[idx] = min(shared[idx], val); + __syncthreads(); + } + + if (thread_id < lenparents) { + int parent = parents[thread_id]; + if (idx == blockDim.x - 1 || thread_id == lenparents - 1 || parents[thread_id] != parents[thread_id + 1]) { + partial[blockIdx.x * outlength + parent] = shared[idx]; + } + } + } +} + +extern "C" { + __global__ void awkward_reduce_min_c(int* toptr, int* fromptr, int* parents, int lenparents, int outlength, int identity, int* partial) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + int minimum = identity; + int blocks = (lenparents + blockDim.x - 1) / blockDim.x; + for (int i = 0; i < blocks; ++i) { + minimum = min(minimum, partial[i * outlength + thread_id]); + } + toptr[thread_id] = minimum; + } + } +} +""" + +parents = cp.array([0, 1, 1, 2, 2, 2, 2, 2, 2, 5], dtype=cp.int32) +fromptr = cp.array([1, -2, -3, 4, 5, 6, 7, 8, 9, 10], dtype=cp.int32) +lenparents = len(parents) +outlength = int(cp.max(parents)) + 1 +identity = cp.iinfo(cp.int32).max +toptr = cp.full(outlength, identity, dtype=cp.int32) + +block_size = [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] +for i in range (len(block_size)): + partial = cp.full((outlength * ((lenparents + block_size[i] - 1) // block_size[i])), identity, dtype=cp.int32) + grid_size = (lenparents + block_size[i] - 1) // block_size[i] + shared_mem_size = block_size[i] * cp.int32().nbytes + + raw_module = cp.RawModule(code=cuda_kernel) + + awkward_reduce_min_a = raw_module.get_function('awkward_reduce_min_a') + awkward_reduce_min_b = raw_module.get_function('awkward_reduce_min_b') + awkward_reduce_min_c = raw_module.get_function('awkward_reduce_min_c') + + awkward_reduce_min_a((grid_size,), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, identity, partial)) + awkward_reduce_min_b((grid_size,), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, identity, partial), shared_mem=shared_mem_size) + awkward_reduce_min_c(((outlength + block_size[i] - 1) // block_size[i],), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, identity, partial)) + + assert cp.array_equal(toptr, cp.array([1, -3, 4, 2147483647, 2147483647, 10])) \ No newline at end of file diff --git a/studies/cuda-kernels/reducers/awkward_reduce_prod_bool_tree_reduction.py b/studies/cuda-kernels/reducers/awkward_reduce_prod_bool_tree_reduction.py new file mode 100644 index 0000000000..a66ca772c2 --- /dev/null +++ b/studies/cuda-kernels/reducers/awkward_reduce_prod_bool_tree_reduction.py @@ -0,0 +1,84 @@ +import cupy as cp + +cuda_kernel = """ +extern "C" { + __global__ void awkward_reduce_prod_bool_a(bool* toptr, int* fromptr, int* parents, int lenparents, int outlength, int* partial) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + toptr[thread_id] = true; + } + } +} + +extern "C" { + __global__ void awkward_reduce_prod_bool_b(bool* toptr, int* fromptr, int* parents, int lenparents, int outlength, int* partial) { + extern __shared__ int shared[]; + + int idx = threadIdx.x; + int thread_id = blockIdx.x * blockDim.x + idx; + + if (thread_id < lenparents) { + shared[idx] = fromptr[thread_id]; + } else { + shared[idx] = 1; + } + __syncthreads(); + + for (int stride = 1; stride < blockDim.x; stride *= 2) { + int val = 1; + if (idx >= stride && thread_id < lenparents && parents[thread_id] == parents[thread_id - stride]) { + val = shared[idx - stride]; + } + shared[idx] &= (val != 0); + __syncthreads(); + } + + if (thread_id < lenparents) { + int parent = parents[thread_id]; + if (idx == blockDim.x - 1 || thread_id == lenparents - 1 || parents[thread_id] != parents[thread_id + 1]) { + partial[blockIdx.x * outlength + parent] = shared[idx]; + } + } + } +} + +extern "C" { + __global__ void awkward_reduce_prod_bool_c(bool* toptr, int* fromptr, int* parents, int lenparents, int outlength, int* partial) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + bool prod = 1; + int blocks = (lenparents + blockDim.x - 1) / blockDim.x; + for (int i = 0; i < blocks; ++i) { + prod &= (partial[i * outlength + thread_id] != 0); + } + toptr[thread_id] = prod; + } + } +} +""" + +parents = cp.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3], dtype=cp.int32) +fromptr = cp.array([1, 0, 0, 1, 1, 1, 1, 0, 0, 1], dtype=cp.int32) +lenparents = len(parents) +outlength = int(cp.max(parents)) + 1 +toptr = cp.ones(outlength, dtype=cp.bool_) + +block_size = [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] +for i in range (len(block_size)): + partial = cp.ones((outlength * ((lenparents + block_size[i] - 1) // block_size[i])), dtype=cp.int32) + grid_size = (lenparents + block_size[i] - 1) // block_size[i] + shared_mem_size = block_size[i] * cp.int32().nbytes + + raw_module = cp.RawModule(code=cuda_kernel) + + awkward_reduce_prod_bool_a = raw_module.get_function('awkward_reduce_prod_bool_a') + awkward_reduce_prod_bool_b = raw_module.get_function('awkward_reduce_prod_bool_b') + awkward_reduce_prod_bool_c = raw_module.get_function('awkward_reduce_prod_bool_c') + + awkward_reduce_prod_bool_a((grid_size,), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial)) + awkward_reduce_prod_bool_b((grid_size,), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial), shared_mem=shared_mem_size) + awkward_reduce_prod_bool_c(((outlength + block_size[i] - 1) // block_size[i],), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial)) + + assert cp.array_equal(toptr, cp.array([0, 1, 0, 1])) \ No newline at end of file diff --git a/studies/cuda-kernels/reducers/awkward_reduce_prod_complex_tree_reduction.py b/studies/cuda-kernels/reducers/awkward_reduce_prod_complex_tree_reduction.py new file mode 100644 index 0000000000..0a85c488ac --- /dev/null +++ b/studies/cuda-kernels/reducers/awkward_reduce_prod_complex_tree_reduction.py @@ -0,0 +1,86 @@ +import cupy as cp + +cuda_kernel = """ +#include + +extern "C" { + __global__ void awkward_reduce_prod_complex_a(float* toptr, float* fromptr, int* parents, int lenparents, int outlength, float* partial) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + toptr[thread_id * 2] = 1.0f; + toptr[thread_id * 2 + 1] = 0.0f; + } + } + + __global__ void awkward_reduce_prod_complex_b(float* toptr, float* fromptr, int* parents, int lenparents, int outlength, float* partial) { + extern __shared__ cuda::std::complex shared[]; + + int idx = threadIdx.x; + int thread_id = blockIdx.x * blockDim.x + idx; + + if (thread_id < lenparents) { + shared[idx] = cuda::std::complex(fromptr[thread_id * 2], fromptr[thread_id * 2 + 1]); + } + __syncthreads(); + + for (int stride = 1; stride < blockDim.x; stride *= 2) { + cuda::std::complex val(1.0f, 0.0f); + if (idx >= stride && thread_id < lenparents && parents[thread_id] == parents[thread_id - stride]) { + val = shared[idx - stride]; + } + __syncthreads(); + shared[idx] *= val; + __syncthreads(); + } + + if (thread_id < lenparents) { + int parent = parents[thread_id]; + if (idx == blockDim.x - 1 || thread_id == lenparents - 1 || parents[thread_id] != parents[thread_id + 1]) { + partial[(blockIdx.x * outlength + parent) * 2] = shared[idx].real(); + partial[(blockIdx.x * outlength + parent) * 2 + 1] = shared[idx].imag(); + } + } + } + + __global__ void awkward_reduce_prod_complex_c(float* toptr, float* fromptr, int* parents, int lenparents, int outlength, float* partial) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + cuda::std::complex prod(1.0f, 0.0f); + int blocks = (lenparents + blockDim.x - 1) / blockDim.x; + for (int i = 0; i < blocks; ++i) { + cuda::std::complex val(partial[(i * outlength + thread_id) * 2], partial[(i * outlength + thread_id) * 2 + 1]); + prod = prod * val; + } + toptr[thread_id * 2] = prod.real(); + toptr[thread_id * 2 + 1] = prod.imag(); + } + } +} +""" + +raw_module = cp.RawModule(code=cuda_kernel, options=('-I', '/usr/local/cuda-12.3/include/'),) + +parents = cp.array([0, 1, 1, 2, 2, 2, 2, 2, 5, 5], dtype=cp.int32) +fromptr = cp.array([1, 0, 2, 1, 3, 2, 4, 3, 5, 4, 6, 5, 7, 6, 8, 7, 9, 8, 6, 0], dtype=cp.float32) +lenparents = len(parents) +outlength = int(cp.max(parents)) + 1 +toptr = cp.zeros(2 * outlength, dtype=cp.float32) + +block_size = [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] +for i in range(len(block_size)): + partial = cp.zeros(2 * outlength * ((lenparents + block_size[i] - 1) // block_size[i]), dtype=cp.float32) + grid_size = (lenparents + block_size[i] - 1) // block_size[i] + shared_mem_size = block_size[i] * 2 * cp.float32().nbytes + + + awkward_reduce_prod_complex_a = raw_module.get_function('awkward_reduce_prod_complex_a') + awkward_reduce_prod_complex_b = raw_module.get_function('awkward_reduce_prod_complex_b') + awkward_reduce_prod_complex_c = raw_module.get_function('awkward_reduce_prod_complex_c') + + awkward_reduce_prod_complex_a((grid_size,), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial)) + awkward_reduce_prod_complex_b((grid_size,), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial), shared_mem=shared_mem_size) + awkward_reduce_prod_complex_c(((outlength + block_size[i] - 1) // block_size[i],), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial)) + + print(block_size[i], toptr.get()) \ No newline at end of file diff --git a/studies/cuda-kernels/reducers/awkward_reduce_prod_tree_reduction.py b/studies/cuda-kernels/reducers/awkward_reduce_prod_tree_reduction.py new file mode 100644 index 0000000000..38e97fe8f4 --- /dev/null +++ b/studies/cuda-kernels/reducers/awkward_reduce_prod_tree_reduction.py @@ -0,0 +1,84 @@ +import cupy as cp + +cuda_kernel = """ +extern "C" { + __global__ void awkward_reduce_prod_a(int* toptr, int* fromptr, int* parents, int lenparents, int outlength, int* partial) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + toptr[thread_id] = 1; + } + } +} + +extern "C" { + __global__ void awkward_reduce_prod_b(int* toptr, int* fromptr, int* parents, int lenparents, int outlength, int* partial) { + extern __shared__ int shared[]; + + int idx = threadIdx.x; + int thread_id = blockIdx.x * blockDim.x + idx; + + if (thread_id < lenparents) { + shared[idx] = fromptr[thread_id]; + } else { + shared[idx] = 1; + } + __syncthreads(); + + for (int stride = 1; stride < blockDim.x; stride *= 2) { + int val = 1; + if (idx >= stride && thread_id < lenparents && parents[thread_id] == parents[thread_id - stride]) { + val = shared[idx - stride]; + } + shared[idx] *= val; + __syncthreads(); + } + + if (thread_id < lenparents) { + int parent = parents[thread_id]; + if (idx == blockDim.x - 1 || thread_id == lenparents - 1 || parents[thread_id] != parents[thread_id + 1]) { + partial[blockIdx.x * outlength + parent] = shared[idx]; + } + } + } +} + +extern "C" { + __global__ void awkward_reduce_prod_c(int* toptr, int* fromptr, int* parents, int lenparents, int outlength, int* partial) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + int prod = 1; + int blocks = (lenparents + blockDim.x - 1) / blockDim.x; + for (int i = 0; i < blocks; ++i) { + prod *= partial[i * outlength + thread_id]; + } + toptr[thread_id] = prod; + } + } +} +""" + +parents = cp.array([0, 1, 1, 2, 2, 2, 2, 2, 2, 5], dtype=cp.int32) +fromptr = cp.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=cp.int32) +lenparents = len(parents) +outlength = int(cp.max(parents)) + 1 +toptr = cp.zeros(outlength, dtype=cp.int32) + +block_size = [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] +for i in range (len(block_size)): + partial = cp.ones((outlength * ((lenparents + block_size[i] - 1) // block_size[i])), dtype=cp.int32) + grid_size = (lenparents + block_size[i] - 1) // block_size[i] + shared_mem_size = block_size[i] * cp.int32().nbytes + + raw_module = cp.RawModule(code=cuda_kernel) + + awkward_reduce_prod_a = raw_module.get_function('awkward_reduce_prod_a') + awkward_reduce_prod_b = raw_module.get_function('awkward_reduce_prod_b') + awkward_reduce_prod_c = raw_module.get_function('awkward_reduce_prod_c') + + awkward_reduce_prod_a((grid_size,), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial)) + awkward_reduce_prod_b((grid_size,), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial), shared_mem=shared_mem_size) + awkward_reduce_prod_c(((outlength + block_size[i] - 1) // block_size[i],), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial)) + + assert cp.array_equal(toptr, cp.array([1, 6, 60480, 1, 1, 10])) \ No newline at end of file diff --git a/studies/cuda-kernels/reducers/awkward_reduce_sum_atomics.py b/studies/cuda-kernels/reducers/awkward_reduce_sum_atomics.py new file mode 100644 index 0000000000..dc2d6185ad --- /dev/null +++ b/studies/cuda-kernels/reducers/awkward_reduce_sum_atomics.py @@ -0,0 +1,43 @@ +import cupy as cp + +cuda_kernel = """ +extern "C" { + __global__ void awkward_reduce_sum_a(int* toptr, int* fromptr, int* parents, int lenparents, int outlength) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + toptr[thread_id] = 0; + } + } +} +extern "C" { + __global__ void awkward_reduce_sum_b(int* toptr, int* fromptr, int* parents, int lenparents, int outlength) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + int stride = blockDim.x * gridDim.x; + + for (int i = thread_id; i < lenparents; i += stride) { + atomicAdd(&toptr[parents[i]], fromptr[i]); + } + } +} +""" + +parents = cp.array([0, 1, 1, 2, 2, 3, 3, 3, 5], dtype=cp.int32) +fromptr = cp.array([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=cp.int32) +lenparents = len(parents) +outlength = int(cp.max(parents)) + 1 +toptr = cp.zeros(outlength, dtype=cp.int32) + +block_size = 256 +grid_size = (lenparents + block_size - 1) // block_size + +raw_module = cp.RawModule(code=cuda_kernel) + +awkward_reduce_sum_a = raw_module.get_function('awkward_reduce_sum_a') +awkward_reduce_sum_b = raw_module.get_function('awkward_reduce_sum_b') + +awkward_reduce_sum_a((grid_size,), (block_size,), (toptr, fromptr, parents, lenparents, outlength)) +awkward_reduce_sum_b((grid_size,), (block_size,), (toptr, fromptr, parents, lenparents, outlength)) + +toptr_host = toptr.get() +print("atomic toptr:", toptr_host) \ No newline at end of file diff --git a/studies/cuda-kernels/reducers/awkward_reduce_sum_bool_tree_reduction.py b/studies/cuda-kernels/reducers/awkward_reduce_sum_bool_tree_reduction.py new file mode 100644 index 0000000000..3f9ab1bae6 --- /dev/null +++ b/studies/cuda-kernels/reducers/awkward_reduce_sum_bool_tree_reduction.py @@ -0,0 +1,84 @@ +import cupy as cp + +cuda_kernel = """ +extern "C" { + __global__ void awkward_reduce_sum_bool_a(bool* toptr, int* fromptr, int* parents, int lenparents, int outlength, int* partial) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + toptr[thread_id] = false; + } + } +} + +extern "C" { + __global__ void awkward_reduce_sum_bool_b(bool* toptr, int* fromptr, int* parents, int lenparents, int outlength, int* partial) { + extern __shared__ int shared[]; + + int idx = threadIdx.x; + int thread_id = blockIdx.x * blockDim.x + idx; + + if (thread_id < lenparents) { + shared[idx] = fromptr[thread_id]; + } else { + shared[idx] = 0; + } + __syncthreads(); + + for (int stride = 1; stride < blockDim.x; stride *= 2) { + int val = 0; + if (idx >= stride && thread_id < lenparents && parents[thread_id] == parents[thread_id - stride]) { + val = shared[idx - stride]; + } + shared[idx] |= (val != 0); + __syncthreads(); + } + + if (thread_id < lenparents) { + int parent = parents[thread_id]; + if (idx == blockDim.x - 1 || thread_id == lenparents - 1 || parents[thread_id] != parents[thread_id + 1]) { + partial[blockIdx.x * outlength + parent] = shared[idx]; + } + } + } +} + +extern "C" { + __global__ void awkward_reduce_sum_bool_c(bool* toptr, int* fromptr, int* parents, int lenparents, int outlength, int* partial) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + bool sum = 0; + int blocks = (lenparents + blockDim.x - 1) / blockDim.x; + for (int i = 0; i < blocks; ++i) { + sum |= (partial[i * outlength + thread_id] != 0); + } + toptr[thread_id] = sum; + } + } +} +""" + +parents = cp.array([0, 0, 0, 2, 2, 3, 4, 4, 5], dtype=cp.int32) +fromptr = cp.array([1, 0, 1, 0, 0, 1, 0, 1, 1], dtype=cp.int32) +lenparents = len(parents) +outlength = int(cp.max(parents)) + 1 +toptr = cp.zeros(outlength, dtype=cp.bool_) + +block_size = [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] +for i in range (len(block_size)): + partial = cp.zeros((outlength * ((lenparents + block_size[i] - 1) // block_size[i])), dtype=cp.int32) + grid_size = (lenparents + block_size[i] - 1) // block_size[i] + shared_mem_size = block_size[i] * cp.int32().nbytes + + raw_module = cp.RawModule(code=cuda_kernel) + + awkward_reduce_sum_bool_a = raw_module.get_function('awkward_reduce_sum_bool_a') + awkward_reduce_sum_bool_b = raw_module.get_function('awkward_reduce_sum_bool_b') + awkward_reduce_sum_bool_c = raw_module.get_function('awkward_reduce_sum_bool_c') + + awkward_reduce_sum_bool_a((grid_size,), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial)) + awkward_reduce_sum_bool_b((grid_size,), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial), shared_mem=shared_mem_size) + awkward_reduce_sum_bool_c(((outlength + block_size[i] - 1) // block_size[i],), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial)) + + assert cp.array_equal(toptr, cp.array([1, 0, 0, 1, 1, 1])) \ No newline at end of file diff --git a/studies/cuda-kernels/reducers/awkward_reduce_sum_complex_tree_reduction.py b/studies/cuda-kernels/reducers/awkward_reduce_sum_complex_tree_reduction.py new file mode 100644 index 0000000000..5c3f20ca56 --- /dev/null +++ b/studies/cuda-kernels/reducers/awkward_reduce_sum_complex_tree_reduction.py @@ -0,0 +1,86 @@ +import cupy as cp + +cuda_kernel = """ +#include + +extern "C" { + __global__ void awkward_reduce_sum_complex_a(float* toptr, float* fromptr, int* parents, int lenparents, int outlength, float* partial) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + toptr[thread_id * 2] = 0.0f; + toptr[thread_id * 2 + 1] = 0.0f; + } + } + + __global__ void awkward_reduce_sum_complex_b(float* toptr, float* fromptr, int* parents, int lenparents, int outlength, float* partial) { + extern __shared__ cuda::std::complex shared[]; + + int idx = threadIdx.x; + int thread_id = blockIdx.x * blockDim.x + idx; + + if (thread_id < lenparents) { + shared[idx] = cuda::std::complex(fromptr[thread_id * 2], fromptr[thread_id * 2 + 1]); + } + __syncthreads(); + + for (int stride = 1; stride < blockDim.x; stride *= 2) { + cuda::std::complex val(0.0f, 0.0f); + if (idx >= stride && thread_id < lenparents && parents[thread_id] == parents[thread_id - stride]) { + val = shared[idx - stride]; + } + __syncthreads(); + shared[idx] += val; + __syncthreads(); + } + + if (thread_id < lenparents) { + int parent = parents[thread_id]; + if (idx == blockDim.x - 1 || thread_id == lenparents - 1 || parents[thread_id] != parents[thread_id + 1]) { + partial[(blockIdx.x * outlength + parent) * 2] = shared[idx].real(); + partial[(blockIdx.x * outlength + parent) * 2 + 1] = shared[idx].imag(); + } + } + } + + __global__ void awkward_reduce_sum_complex_c(float* toptr, float* fromptr, int* parents, int lenparents, int outlength, float* partial) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + cuda::std::complex sum(0.0f, 0.0f); + int blocks = (lenparents + blockDim.x - 1) / blockDim.x; + for (int i = 0; i < blocks; ++i) { + cuda::std::complex val(partial[(i * outlength + thread_id) * 2], partial[(i * outlength + thread_id) * 2 + 1]); + sum += val; + } + toptr[thread_id * 2] = sum.real(); + toptr[thread_id * 2 + 1] = sum.imag(); + } + } +} +""" + +raw_module = cp.RawModule(code=cuda_kernel, options=('-I', '/usr/local/cuda-12.3/include/'),) + +parents = cp.array([0, 1, 1, 2, 2, 2, 2, 2, 2, 5], dtype=cp.int32) +fromptr = cp.array([1, 0, 2.5677, 1.2345, 3.2367, 2.256576, 4.3456, 3, 5, 4, 6, 5, 7, 6, 8, 7, 9, 8, 10, 0], dtype=cp.float32) +lenparents = len(parents) +outlength = int(cp.max(parents)) + 1 +toptr = cp.zeros(2 * outlength, dtype=cp.float32) + +block_size = [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] +for i in range(len(block_size)): + partial = cp.zeros(2 * outlength * ((lenparents + block_size[i] - 1) // block_size[i]), dtype=cp.float32) + grid_size = (lenparents + block_size[i] - 1) // block_size[i] + shared_mem_size = block_size[i] * 2 * cp.float32().nbytes + + + awkward_reduce_sum_complex_a = raw_module.get_function('awkward_reduce_sum_complex_a') + awkward_reduce_sum_complex_b = raw_module.get_function('awkward_reduce_sum_complex_b') + awkward_reduce_sum_complex_c = raw_module.get_function('awkward_reduce_sum_complex_c') + + awkward_reduce_sum_complex_a((grid_size,), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial)) + awkward_reduce_sum_complex_b((grid_size,), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial), shared_mem=shared_mem_size) + awkward_reduce_sum_complex_c(((outlength + block_size[i] - 1) // block_size[i],), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial)) + + print(block_size[i], toptr.get()) \ No newline at end of file diff --git a/studies/cuda-kernels/reducers/awkward_reduce_sum_int32_bool_64_tree_reduction.py b/studies/cuda-kernels/reducers/awkward_reduce_sum_int32_bool_64_tree_reduction.py new file mode 100644 index 0000000000..142e111730 --- /dev/null +++ b/studies/cuda-kernels/reducers/awkward_reduce_sum_int32_bool_64_tree_reduction.py @@ -0,0 +1,84 @@ +import cupy as cp + +cuda_kernel = """ +extern "C" { + __global__ void awkward_reduce_countnonzero_a(int* toptr, bool* fromptr, int* parents, int lenparents, int outlength, int* partial) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + toptr[thread_id] = 0; + } + } +} + +extern "C" { + __global__ void awkward_reduce_countnonzero_b(int* toptr, bool* fromptr, int* parents, int lenparents, int outlength, int* partial) { + extern __shared__ int shared[]; + + int idx = threadIdx.x; + int thread_id = blockIdx.x * blockDim.x + idx; + + if (thread_id < lenparents) { + shared[idx] = (fromptr[thread_id] != 0) ? 1 : 0; + } else { + shared[idx] = 0; + } + __syncthreads(); + + for (int stride = 1; stride < blockDim.x; stride *= 2) { + int val = 0; + if (idx >= stride && thread_id < lenparents && parents[thread_id] == parents[thread_id - stride]) { + val = shared[idx - stride]; + } + shared[idx] += val; + __syncthreads(); + } + + if (thread_id < lenparents) { + int parent = parents[thread_id]; + if (idx == blockDim.x - 1 || thread_id == lenparents - 1 || parents[thread_id] != parents[thread_id + 1]) { + partial[blockIdx.x * outlength + parent] = shared[idx]; + } + } + } +} + +extern "C" { + __global__ void awkward_reduce_countnonzero_c(int* toptr, bool* fromptr, int* parents, int lenparents, int outlength, int* partial) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + int countnonzero = 0; + int blocks = (lenparents + blockDim.x - 1) / blockDim.x; + for (int i = 0; i < blocks; ++i) { + countnonzero += partial[i * outlength + thread_id]; + } + toptr[thread_id] = countnonzero; + } + } +} +""" + +parents = cp.array([0, 1, 1, 2, 2, 2, 2, 2, 2, 5], dtype=cp.int32) +fromptr = cp.array([1, 1, 1, 0, 1, 1, 0, 1, 1, 0], dtype=cp.bool_) +lenparents = len(parents) +outlength = int(cp.max(parents)) + 1 +toptr = cp.zeros(outlength, dtype=cp.int32) + +block_size = [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] +for i in range (len(block_size)): + partial = cp.zeros((outlength * ((lenparents + block_size[i] - 1) // block_size[i])), dtype=cp.int32) + grid_size = (lenparents + block_size[i] - 1) // block_size[i] + shared_mem_size = block_size[i] * cp.int32().nbytes + + raw_module = cp.RawModule(code=cuda_kernel) + + awkward_reduce_countnonzero_a = raw_module.get_function('awkward_reduce_countnonzero_a') + awkward_reduce_countnonzero_b = raw_module.get_function('awkward_reduce_countnonzero_b') + awkward_reduce_countnonzero_c = raw_module.get_function('awkward_reduce_countnonzero_c') + + awkward_reduce_countnonzero_a((grid_size,), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial)) + awkward_reduce_countnonzero_b((grid_size,), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial), shared_mem=shared_mem_size) + awkward_reduce_countnonzero_c(((outlength + block_size[i] - 1) // block_size[i],), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial)) + + assert cp.array_equal(toptr, cp.array([1, 2, 4, 0, 0, 0])) \ No newline at end of file diff --git a/studies/cuda-kernels/reducers/awkward_reduce_sum_int64_bool_64_tree_reduction.py b/studies/cuda-kernels/reducers/awkward_reduce_sum_int64_bool_64_tree_reduction.py new file mode 100644 index 0000000000..69be141d00 --- /dev/null +++ b/studies/cuda-kernels/reducers/awkward_reduce_sum_int64_bool_64_tree_reduction.py @@ -0,0 +1,84 @@ +import cupy as cp + +cuda_kernel = """ +extern "C" { + __global__ void awkward_reduce_countnonzero_a(long long* toptr, bool* fromptr, long long* parents, long long lenparents, long long outlength, long long* partial) { + long long thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + toptr[thread_id] = 0; + } + } +} + +extern "C" { + __global__ void awkward_reduce_countnonzero_b(long long* toptr, bool* fromptr, long long* parents, long long lenparents, long long outlength, long long* partial) { + extern __shared__ long long shared[]; + + long long idx = threadIdx.x; + long long thread_id = blockIdx.x * blockDim.x + idx; + + if (thread_id < lenparents) { + shared[idx] = (fromptr[thread_id] != 0) ? 1 : 0; + } else { + shared[idx] = 0; + } + __syncthreads(); + + for (long long stride = 1; stride < blockDim.x; stride *= 2) { + long long val = 0; + if (idx >= stride && thread_id < lenparents && parents[thread_id] == parents[thread_id - stride]) { + val = shared[idx - stride]; + } + shared[idx] += val; + __syncthreads(); + } + + if (thread_id < lenparents) { + long long parent = parents[thread_id]; + if (idx == blockDim.x - 1 || thread_id == lenparents - 1 || parents[thread_id] != parents[thread_id + 1]) { + partial[blockIdx.x * outlength + parent] = shared[idx]; + } + } + } +} + +extern "C" { + __global__ void awkward_reduce_countnonzero_c(long long* toptr, bool* fromptr, long long* parents, long long lenparents, long long outlength, long long* partial) { + long long thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + long long countnonzero = 0; + long long blocks = (lenparents + blockDim.x - 1) / blockDim.x; + for (long long i = 0; i < blocks; ++i) { + countnonzero += partial[i * outlength + thread_id]; + } + toptr[thread_id] = countnonzero; + } + } +} +""" + +parents = cp.array([0, 1, 1, 2, 2, 2, 2, 2, 2, 5], dtype=cp.int64) +fromptr = cp.array([1, 1, 1, 0, 1, 1, 0, 1, 1, 0], dtype=cp.bool_) +lenparents = len(parents) +outlength = int(cp.max(parents)) + 1 +toptr = cp.zeros(outlength, dtype=cp.int64) + +block_size = [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] +for i in range (len(block_size)): + partial = cp.zeros((outlength * ((lenparents + block_size[i] - 1) // block_size[i])), dtype=cp.int64) + grid_size = (lenparents + block_size[i] - 1) // block_size[i] + shared_mem_size = block_size[i] * cp.int64().nbytes + + raw_module = cp.RawModule(code=cuda_kernel) + + awkward_reduce_countnonzero_a = raw_module.get_function('awkward_reduce_countnonzero_a') + awkward_reduce_countnonzero_b = raw_module.get_function('awkward_reduce_countnonzero_b') + awkward_reduce_countnonzero_c = raw_module.get_function('awkward_reduce_countnonzero_c') + + awkward_reduce_countnonzero_a((grid_size,), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial)) + awkward_reduce_countnonzero_b((grid_size,), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial), shared_mem=shared_mem_size) + awkward_reduce_countnonzero_c(((outlength + block_size[i] - 1) // block_size[i],), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial)) + + assert cp.array_equal(toptr, cp.array([1, 2, 4, 0, 0, 0])) \ No newline at end of file diff --git a/studies/cuda-kernels/reducers/awkward_reduce_sum_tree_reduction.py b/studies/cuda-kernels/reducers/awkward_reduce_sum_tree_reduction.py new file mode 100644 index 0000000000..db460ec574 --- /dev/null +++ b/studies/cuda-kernels/reducers/awkward_reduce_sum_tree_reduction.py @@ -0,0 +1,84 @@ +import cupy as cp + +cuda_kernel = """ +extern "C" { + __global__ void awkward_reduce_sum_a(int* toptr, int* fromptr, int* parents, int lenparents, int outlength, int* partial) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + toptr[thread_id] = 0; + } + } +} + +extern "C" { + __global__ void awkward_reduce_sum_b(int* toptr, int* fromptr, int* parents, int lenparents, int outlength, int* partial) { + extern __shared__ int shared[]; + + int idx = threadIdx.x; + int thread_id = blockIdx.x * blockDim.x + idx; + + if (thread_id < lenparents) { + shared[idx] = fromptr[thread_id]; + } else { + shared[idx] = 0; + } + __syncthreads(); + + for (int stride = 1; stride < blockDim.x; stride *= 2) { + int val = 0; + if (idx >= stride && thread_id < lenparents && parents[thread_id] == parents[thread_id - stride]) { + val = shared[idx - stride]; + } + shared[idx] += val; + __syncthreads(); + } + + if (thread_id < lenparents) { + int parent = parents[thread_id]; + if (idx == blockDim.x - 1 || thread_id == lenparents - 1 || parents[thread_id] != parents[thread_id + 1]) { + partial[blockIdx.x * outlength + parent] = shared[idx]; + } + } + } +} + +extern "C" { + __global__ void awkward_reduce_sum_c(int* toptr, int* fromptr, int* parents, int lenparents, int outlength, int* partial) { + int thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + if (thread_id < outlength) { + int sum = 0; + int blocks = (lenparents + blockDim.x - 1) / blockDim.x; + for (int i = 0; i < blocks; ++i) { + sum += partial[i * outlength + thread_id]; + } + toptr[thread_id] = sum; + } + } +} +""" + +parents = cp.array([0, 1, 1, 2, 2, 2, 2, 2, 2, 5], dtype=cp.int32) +fromptr = cp.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=cp.int32) +lenparents = len(parents) +outlength = int(cp.max(parents)) + 1 +toptr = cp.zeros(outlength, dtype=cp.int32) + +block_size = [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] +for i in range (len(block_size)): + partial = cp.zeros((outlength * ((lenparents + block_size[i] - 1) // block_size[i])), dtype=cp.int32) + grid_size = (lenparents + block_size[i] - 1) // block_size[i] + shared_mem_size = block_size[i] * cp.int32().nbytes + + raw_module = cp.RawModule(code=cuda_kernel) + + awkward_reduce_sum_a = raw_module.get_function('awkward_reduce_sum_a') + awkward_reduce_sum_b = raw_module.get_function('awkward_reduce_sum_b') + awkward_reduce_sum_c = raw_module.get_function('awkward_reduce_sum_c') + + awkward_reduce_sum_a((grid_size,), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial)) + awkward_reduce_sum_b((grid_size,), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial), shared_mem=shared_mem_size) + awkward_reduce_sum_c(((outlength + block_size[i] - 1) // block_size[i],), (block_size[i],), (toptr, fromptr, parents, lenparents, outlength, partial)) + + assert cp.array_equal(toptr, cp.array([1, 5, 39, 0, 0, 10])) \ No newline at end of file