Skip to content

Commit

Permalink
Fix incorrect outputs and improve performance of commonMemSetLargePat…
Browse files Browse the repository at this point in the history
…tern

Change the implementation of commonMemSetLargePattern to use the largest
pattern word size supported by the backend into which the pattern can be
divided. That is, use 4-byte words if the pattern size is a multiple of 4,
2-byte words for even sizes and 1-byte words for odd sizes.

Keep the idea of filling the entire destination region with the first word,
and only start strided fill from the second, but implement it correctly.
The previous implementation produced incorrect results for any pattern size
which wasn't a multiple of 4.

Add a new optimisation skipping the strided fills completely if the pattern
is equal to the first word repeated throughout. This is most commonly the
case for a pattern of all zeros, but other cases are possible.
  • Loading branch information
rafbiels committed Oct 31, 2024
1 parent a9c7aef commit 5e99670
Showing 1 changed file with 50 additions and 25 deletions.
75 changes: 50 additions & 25 deletions source/adapters/cuda/enqueue.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -953,35 +953,60 @@ UR_APIEXPORT ur_result_t UR_APICALL urEnqueueMemBufferCopyRect(

// CUDA has no memset functions that allow setting values more than 4 bytes. UR
// API lets you pass an arbitrary "pattern" to the buffer fill, which can be
// more than 4 bytes. We must break up the pattern into 1 byte values, and set
// the buffer using multiple strided calls. The first 4 patterns are set using
// cuMemsetD32Async then all subsequent 1 byte patterns are set using
// cuMemset2DAsync which is called for each pattern.
// more than 4 bytes. We must break up the pattern into 1, 2 or 4-byte values
// and set the buffer using multiple strided calls.
ur_result_t commonMemSetLargePattern(CUstream Stream, uint32_t PatternSize,
size_t Size, const void *pPattern,
CUdeviceptr Ptr) {
// Calculate the number of patterns, stride, number of times the pattern
// needs to be applied, and the number of times the first 32 bit pattern
// needs to be applied.
auto NumberOfSteps = PatternSize / sizeof(uint8_t);
auto Pitch = NumberOfSteps * sizeof(uint8_t);
auto Height = Size / NumberOfSteps;
auto Count32 = Size / sizeof(uint32_t);

// Get 4-byte chunk of the pattern and call cuMemsetD32Async
auto Value = *(static_cast<const uint32_t *>(pPattern));
UR_CHECK_ERROR(cuMemsetD32Async(Ptr, Value, Count32, Stream));
for (auto step = 4u; step < NumberOfSteps; ++step) {
// take 1 byte of the pattern
Value = *(static_cast<const uint8_t *>(pPattern) + step);

// offset the pointer to the part of the buffer we want to write to
auto OffsetPtr = Ptr + (step * sizeof(uint8_t));

// set all of the pattern chunks
UR_CHECK_ERROR(cuMemsetD2D8Async(OffsetPtr, Pitch, Value, sizeof(uint8_t),
Height, Stream));
// Find the largest supported word size into which the pattern can be divided
auto BackendWordSize = PatternSize%4u==0u ? 4u : PatternSize%2u==0u ? 2u : 1u;

// Calculate the number of words in the pattern, the stride, and the number of
// times the pattern needs to be applied
auto NumberOfSteps = PatternSize / BackendWordSize;
auto Pitch = NumberOfSteps * BackendWordSize;
auto Height = Size / PatternSize;

// Same implementation works for any pattern word type (uint8_t, uint16_t, uint32_t)
auto memsetImpl = [BackendWordSize, NumberOfSteps, Pitch, Height, Size, Ptr, &Stream](const auto* pPatternWords, auto&& continuousMemset, auto&& stridedMemset){
// If the pattern is 1 word or the first word is repeated throughout, a fast
// continuous fill can be used without the need for slower strided fills
bool UseOnlyFirstValue{true};
for (auto Step{1u}; (Step < NumberOfSteps) && UseOnlyFirstValue; ++Step) {
if (*(pPatternWords + Step) != *pPatternWords) {
UseOnlyFirstValue=false;
}
}
auto OptimizedNumberOfSteps{UseOnlyFirstValue ? 1u : NumberOfSteps};

// Fill the pattern in steps of BackendWordSize bytes. Use a continuous
// fill in the first step because it's faster than a strided fill. Then,
// overwrite the other values in subsequent steps.
for (auto Step{0u}; Step < OptimizedNumberOfSteps; ++Step) {
if (Step==0) {
UR_CHECK_ERROR(continuousMemset(Ptr, *(pPatternWords), Size / BackendWordSize, Stream));
} else {
UR_CHECK_ERROR(stridedMemset(Ptr + Step * BackendWordSize, Pitch, *(pPatternWords + Step), 1u, Height, Stream));
}
}
};

// Apply the implementation to the chosen pattern word type
switch (BackendWordSize) {
case 4u: {
memsetImpl(static_cast<const uint32_t *>(pPattern), cuMemsetD32Async, cuMemsetD2D32Async);
break;
}
case 2u: {
memsetImpl(static_cast<const uint16_t *>(pPattern), cuMemsetD16Async, cuMemsetD2D16Async);
break;
}
default: {
memsetImpl(static_cast<const uint8_t *>(pPattern), cuMemsetD8Async, cuMemsetD2D8Async);
break;
}
}

return UR_RESULT_SUCCESS;
}

Expand Down

0 comments on commit 5e99670

Please sign in to comment.