-
Notifications
You must be signed in to change notification settings - Fork 97
Cudastf #794
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Cudastf #794
Changes from 1 commit
d6dc01d
245b20f
9b35ec8
7d298d4
154b3f9
c8ef988
52b18c9
d726b10
5e7576c
1373699
92e7204
a608f3f
b062577
bbf9abc
3e831ea
702fe79
5bfe21e
f407256
221599c
7a5bb6c
0c2432f
3ae267b
6a75794
f1facca
0199e75
39b16f4
9b7c4b0
f9e09f1
6c9a791
a1efd1c
6437eab
bbb9aae
e13c9b6
7244399
66f6850
89e2a43
973886b
92885e7
8607840
14e0985
92e04d5
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
- Loading branch information
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -73,7 +73,12 @@ int main([[maybe_unused]] int argc, [[maybe_unused]] char **argv) | |
{ | ||
MATX_ENTER_HANDLER(); | ||
using complex = cuda::std::complex<float>; | ||
#if 0 | ||
cudaExecutor exec{}; | ||
#else | ||
stfExecutor exec{}; | ||
auto ctx = exec.getCtx(); | ||
#endif | ||
|
||
index_t signal_size = 1ULL << 16; | ||
index_t filter_size = 16; | ||
|
@@ -117,7 +122,11 @@ int main([[maybe_unused]] int argc, [[maybe_unused]] char **argv) | |
// Perform the FFT in-place on both signal and filter | ||
for (int i = 0; i < iterations; i++) { | ||
if (i == 1) { | ||
#if 0 | ||
cudaEventRecord(start, stream); | ||
#else | ||
cudaEventRecord(start, ctx.task_fence()); | ||
#endif | ||
} | ||
(sig_freq = fft(sig_time, filtered_size)).run(exec); | ||
(filt_freq = fft(filt_time, filtered_size)).run(exec); | ||
|
@@ -129,18 +138,30 @@ int main([[maybe_unused]] int argc, [[maybe_unused]] char **argv) | |
|
||
} | ||
|
||
#if 0 | ||
cudaEventRecord(stop, stream); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Eventually we should mask these events behind the executor as well so the timing is the same regardless of the executor. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes this makes it look like the code is very different for both executors but that timing is the sole reason especially if finalize is moved to the dtor |
||
#else | ||
cudaEventRecord(stop, ctx.task_fence()); | ||
#endif | ||
exec.sync(); | ||
cudaEventElapsedTime(&separate_ms, start, stop); | ||
|
||
for (int i = 0; i < iterations; i++) { | ||
if (i == 1) { | ||
cudaEventRecord(start, stream); | ||
#if 0 | ||
cudaEventRecord(start, stream); | ||
#else | ||
cudaEventRecord(start, ctx.task_fence()); | ||
#endif | ||
} | ||
(sig_freq = ifft(fft(sig_time, filtered_size) * fft(filt_time, filtered_size))).run(exec); | ||
} | ||
|
||
|
||
#if 0 | ||
cudaEventRecord(stop, stream); | ||
#else | ||
cudaEventRecord(stop, ctx.task_fence()); | ||
#endif | ||
exec.sync(); | ||
cudaEventElapsedTime(&fused_ms, start, stop); | ||
|
||
|
@@ -153,7 +174,11 @@ int main([[maybe_unused]] int argc, [[maybe_unused]] char **argv) | |
(time_out = conv1d(sig_time, filt1, matxConvCorrMode_t::MATX_C_MODE_FULL)).run(exec); | ||
|
||
exec.sync(); | ||
|
||
|
||
#if 1 | ||
ctx.finalize(); | ||
#endif | ||
|
||
// Compare signals | ||
for (index_t b = 0; b < batches; b++) { | ||
for (index_t i = 0; i < filtered_size; i++) { | ||
|
@@ -172,4 +197,4 @@ int main([[maybe_unused]] int argc, [[maybe_unused]] char **argv) | |
|
||
CUDA_CHECK_LAST_ERROR(); | ||
MATX_EXIT_HANDLER(); | ||
} | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -63,10 +63,12 @@ template <typename T> constexpr bool is_matx_set_op(); | |
stfExecutor(cudaStream_t stream) : stream_(stream) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. What does a stream do here? I thought STF had its own internal streams? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @cliffburdick In STF you can create nested/localized contexts & streams from existing (non-STF created) streams. This allows STF mechanisms to be correctly synchronized within the existing stream ecosystem. @caugonnet correct me if I am wrong. |
||
cuda::experimental::stf::async_resources_handle handle; | ||
ctx_ = cuda::experimental::stf::stream_ctx(stream, handle); | ||
//ctx_ = cuda::experimental::stf::graph_ctx(stream, handle); | ||
} | ||
stfExecutor(int stream) : stream_(reinterpret_cast<cudaStream_t>(stream)) { | ||
cuda::experimental::stf::async_resources_handle handle; | ||
ctx_ = cuda::experimental::stf::stream_ctx(reinterpret_cast<cudaStream_t>(stream), handle); | ||
//ctx_ = cuda::experimental::stf::graph_ctx(reinterpret_cast<cudaStream_t>(stream), handle); | ||
} | ||
|
||
/** | ||
|
@@ -75,6 +77,7 @@ template <typename T> constexpr bool is_matx_set_op(); | |
*/ | ||
stfExecutor() : stream_(0) { | ||
ctx_ = cuda::experimental::stf::stream_ctx(); | ||
//ctx_ = cuda::experimental::stf::graph_ctx(); | ||
} | ||
|
||
/** | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -72,9 +72,35 @@ namespace detail { | |
return tmp_out_(indices...); | ||
}; | ||
|
||
template <typename Task> | ||
__MATX_INLINE__ void apply_dep_to_task(Task &&task, int perm=1) const noexcept { | ||
/* Albert -- Scenario where the all() operator is on the RHS and sum has already | ||
run previously. So we make tmp_out have a read permission as it will be read from */ | ||
tmp_out_.apply_dep_to_task(std::forward<Task>(task), 1); | ||
} | ||
|
||
template <typename Out, typename Executor> | ||
void Exec(Out &&out, Executor &&ex) const { | ||
all_impl(cuda::std::get<0>(out), a_, ex); | ||
auto output = cuda::std::get<0>(out); | ||
// stfexecutor case | ||
if constexpr (!is_cuda_executor_v<Executor>) { | ||
auto ctx = ex.getCtx(); | ||
auto tsk = ctx.task(); | ||
tsk.set_symbol("all_task"); | ||
|
||
output.PreRun(out_dims_, std::forward<Executor>(ex)); | ||
output.apply_dep_to_task(tsk, 0); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why isn't |
||
a_.apply_dep_to_task(tsk, 1); | ||
|
||
tsk->*[&](cudaStream_t s) { | ||
auto exec = cudaExecutor(s); | ||
all_impl(output, a_, exec); | ||
}; | ||
} | ||
// cudaExecutor case | ||
else if constexpr (is_cuda_executor_v<Executor>) { | ||
all_impl(output, a_, ex); | ||
} | ||
} | ||
|
||
static __MATX_INLINE__ constexpr __MATX_HOST__ __MATX_DEVICE__ int32_t Rank() | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -58,8 +58,8 @@ namespace matx | |
* cuda Stream to execute on | ||
* | ||
*/ | ||
template <typename XType, typename AType, typename BType> | ||
__MATX_INLINE__ void cgsolve_impl(XType X, AType A, BType B, double tol=1e-6, int max_iters=4, cudaStream_t stream=0) | ||
template <typename XType, typename AType, typename BType, typename Executor> | ||
__MATX_INLINE__ void cgsolve_impl(XType X, AType A, BType B, Executor &&exec, double tol=1e-6, int max_iters=4, cudaStream_t stream=0) | ||
{ | ||
using value_type = typename XType::value_type; | ||
const int VRANK = XType::Rank(); | ||
|
@@ -120,15 +120,19 @@ namespace matx | |
auto pApc = clone<VRANK>(pAp, clone_shape); | ||
|
||
// A*X | ||
(Ap = matvec(A, X)).run(stream); | ||
//(Ap = matvec(A, X)).run(stream); | ||
(Ap = matvec(A, X)).run(exec); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. is that the same to call run(exec) and run(stream) when we have a "classic" executor ? (won't it trigger much more work ?) |
||
// r0 = B - A*X | ||
// p = r0 | ||
(p = r0 = B - Ap).run(stream); | ||
//(p = r0 = B - Ap).run(stream); | ||
(p = r0 = B - Ap).run(exec); | ||
|
||
(r0r0 = sum(r0*r0)).run(stream); | ||
//(r0r0 = sum(r0*r0)).run(stream); | ||
(r0r0 = sum(r0*r0)).run(exec); | ||
|
||
if(tol>0.0f) { | ||
(converged = matx::all(as_int(sqrt(r0r0) < tol))).run(stream); | ||
//(converged = matx::all(as_int(sqrt(r0r0) < tol))).run(stream); | ||
(converged = matx::all(as_int(sqrt(r0r0) < tol))).run(exec); | ||
|
||
cudaEventRecord(event, stream); | ||
cudaStreamWaitEvent(d2h, event); | ||
|
@@ -137,10 +141,12 @@ namespace matx | |
int i; | ||
for (i = 0 ; i < max_iters; i++) { | ||
// Ap = matvec(A, p) | ||
(Ap = matvec(A, p)).run(stream); | ||
//(Ap = matvec(A, p)).run(stream); | ||
(Ap = matvec(A, p)).run(exec); | ||
|
||
// pAp = dot(p,Ap) | ||
(pAp = sum(p*Ap)).run(stream); | ||
//(pAp = sum(p*Ap)).run(stream); | ||
(pAp = sum(p*Ap)).run(exec); | ||
|
||
// if pAp is zero then we have exactly numerically converged. | ||
// However, this is batched so we may iterate more. Iterating | ||
|
@@ -152,10 +158,12 @@ namespace matx | |
auto updateOp = ( r1 = r0 - (r0r0c/pApc) * Ap, | ||
X = X + (r0r0c/pApc) * p); | ||
|
||
(IF( pApc != value_type(0), updateOp)).run(stream); | ||
//(IF( pApc != value_type(0), updateOp)).run(stream); | ||
(IF( pApc != value_type(0), updateOp)).run(exec); | ||
|
||
// r1r1 = dot(r1, r1) | ||
(r1r1 = sum(r1*r1)).run(stream); | ||
//(r1r1 = sum(r1*r1)).run(stream); | ||
(r1r1 = sum(r1*r1)).run(exec); | ||
|
||
if(tol>0.0f) { | ||
// copy convergence criteria to host. | ||
|
@@ -168,15 +176,17 @@ namespace matx | |
break; | ||
} | ||
|
||
(converged = matx::all(as_int(sqrt(r1r1) < tol))).run(stream); | ||
//(converged = matx::all(as_int(sqrt(r1r1) < tol))).run(stream); | ||
(converged = matx::all(as_int(sqrt(r1r1) < tol))).run(exec); | ||
|
||
cudaEventRecord(event, stream); | ||
cudaStreamWaitEvent(d2h, event); | ||
} | ||
|
||
// p = r1 + b * p | ||
auto updateP = ( p = r1 + (r1r1c/r0r0c) * p); | ||
(IF( pApc != value_type(0), updateP)).run(stream); | ||
//(IF( pApc != value_type(0), updateP)).run(stream); | ||
(IF( pApc != value_type(0), updateP)).run(exec); | ||
|
||
// Advance residual | ||
swap(r0r0, r1r1); | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
what is
finalize
used for vssync
? Could you hide the context in the executor so the user doesn't need it, and callingexec.sync()
callsfinalize()
?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
finalize terminates everything in the stf context, it waits for asynchronous tasks, deletes internal resources etc... you can only do it once, sync is more equivalent to a ctx.task_fence() which is a non blocking fence (it returns a CUDA stream, and waiting for that stream means everything was done).
I'd like to move finalize to the dtor of the executor, but there are some caveats if you define the executor as a static variable, is this allowed ? The caveat might be some inappropriate unload ordering of CUDA and STF libraries as usual ...
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Sounds good. I think the destructor is the right place. but does sync() work as expected?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@sidelnik is it doing a task fence with a stream sync ?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@caugonnet , sync() should be calling ctx.task_fence() now. I agree, I think we should place the ctx.finalize() inside the stf executor dtor