Skip to content

Commit

Permalink
Merge pull request #1721 from CEED/jeremy/shared-nontensor
Browse files Browse the repository at this point in the history
Add non-tensor shared
  • Loading branch information
jeremylt authored Jan 9, 2025
2 parents 514f0e7 + 1f6c24f commit 1a63be7
Show file tree
Hide file tree
Showing 18 changed files with 1,156 additions and 189 deletions.
202 changes: 184 additions & 18 deletions backends/cuda-shared/ceed-cuda-shared-basis.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,7 @@
#include "ceed-cuda-shared.h"

//------------------------------------------------------------------------------
// Device initalization
//------------------------------------------------------------------------------
int CeedInit_CudaInterp(CeedScalar *d_B, CeedInt P_1d, CeedInt Q_1d, CeedScalar **c_B);
int CeedInit_CudaGrad(CeedScalar *d_B, CeedScalar *d_G, CeedInt P_1d, CeedInt Q_1d, CeedScalar **c_B_ptr, CeedScalar **c_G_ptr);
int CeedInit_CudaCollocatedGrad(CeedScalar *d_B, CeedScalar *d_G, CeedInt P_1d, CeedInt Q_1d, CeedScalar **c_B_ptr, CeedScalar **c_G_ptr);

//------------------------------------------------------------------------------
// Apply basis
// Apply tensor basis
//------------------------------------------------------------------------------
static int CeedBasisApplyTensorCore_Cuda_shared(CeedBasis basis, bool apply_add, const CeedInt num_elem, CeedTransposeMode t_mode,
CeedEvalMode eval_mode, CeedVector u, CeedVector v) {
Expand Down Expand Up @@ -58,8 +51,7 @@ static int CeedBasisApplyTensorCore_Cuda_shared(CeedBasis basis, bool apply_add,
CeedCallBackend(CeedBasisGetNumQuadraturePoints1D(basis, &Q_1d));
CeedInt thread_1d = CeedIntMax(Q_1d, P_1d);

CeedCallBackend(CeedInit_CudaInterp(data->d_interp_1d, P_1d, Q_1d, &data->c_B));
void *interp_args[] = {(void *)&num_elem, &data->c_B, &d_u, &d_v};
void *interp_args[] = {(void *)&num_elem, &data->d_interp_1d, &d_u, &d_v};

if (dim == 1) {
// avoid >512 total threads
Expand Down Expand Up @@ -104,14 +96,14 @@ static int CeedBasisApplyTensorCore_Cuda_shared(CeedBasis basis, bool apply_add,

CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d));
CeedCallBackend(CeedBasisGetNumQuadraturePoints1D(basis, &Q_1d));
CeedInt thread_1d = CeedIntMax(Q_1d, P_1d);
CeedInt thread_1d = CeedIntMax(Q_1d, P_1d);
CeedScalar *d_grad_1d = data->d_grad_1d;

if (data->d_collo_grad_1d) {
CeedCallBackend(CeedInit_CudaCollocatedGrad(data->d_interp_1d, data->d_collo_grad_1d, P_1d, Q_1d, &data->c_B, &data->c_G));
} else {
CeedCallBackend(CeedInit_CudaGrad(data->d_interp_1d, data->d_grad_1d, P_1d, Q_1d, &data->c_B, &data->c_G));
d_grad_1d = data->d_collo_grad_1d;
}
void *grad_args[] = {(void *)&num_elem, &data->c_B, &data->c_G, &d_u, &d_v};
void *grad_args[] = {(void *)&num_elem, &data->d_interp_1d, &d_grad_1d, &d_u, &d_v};

if (dim == 1) {
// avoid >512 total threads
CeedInt elems_per_block = CeedIntMin(ceed_Cuda->device_prop.maxThreadsDim[2], CeedIntMax(512 / thread_1d, 1));
Expand Down Expand Up @@ -328,8 +320,7 @@ static int CeedBasisApplyAtPointsCore_Cuda_shared(CeedBasis basis, bool apply_ad
CeedCallBackend(CeedBasisGetNumQuadraturePoints1D(basis, &Q_1d));
CeedInt thread_1d = CeedIntMax(Q_1d, P_1d);

CeedCallBackend(CeedInit_CudaInterp(data->d_chebyshev_interp_1d, P_1d, Q_1d, &data->c_B));
void *interp_args[] = {(void *)&num_elem, &data->c_B, &data->d_points_per_elem, &d_x, &d_u, &d_v};
void *interp_args[] = {(void *)&num_elem, &data->d_chebyshev_interp_1d, &data->d_points_per_elem, &d_x, &d_u, &d_v};

if (dim == 1) {
// avoid >512 total threads
Expand Down Expand Up @@ -364,7 +355,6 @@ static int CeedBasisApplyAtPointsCore_Cuda_shared(CeedBasis basis, bool apply_ad
CeedCallBackend(CeedBasisGetNumQuadraturePoints1D(basis, &Q_1d));
CeedInt thread_1d = CeedIntMax(Q_1d, P_1d);

CeedCallBackend(CeedInit_CudaInterp(data->d_chebyshev_interp_1d, P_1d, Q_1d, &data->c_B));
void *grad_args[] = {(void *)&num_elem, &data->d_chebyshev_interp_1d, &data->d_points_per_elem, &d_x, &d_u, &d_v};

if (dim == 1) {
Expand Down Expand Up @@ -424,6 +414,121 @@ static int CeedBasisApplyAddAtPoints_Cuda_shared(CeedBasis basis, const CeedInt
return CEED_ERROR_SUCCESS;
}

//------------------------------------------------------------------------------
// Apply non-tensor basis
//------------------------------------------------------------------------------
static int CeedBasisApplyNonTensorCore_Cuda_shared(CeedBasis basis, bool apply_add, const CeedInt num_elem, CeedTransposeMode t_mode,
CeedEvalMode eval_mode, CeedVector u, CeedVector v) {
Ceed ceed;
Ceed_Cuda *ceed_Cuda;
CeedInt dim;
const CeedScalar *d_u;
CeedScalar *d_v;
CeedBasis_Cuda_shared *data;

CeedCallBackend(CeedBasisGetCeed(basis, &ceed));
CeedCallBackend(CeedGetData(ceed, &ceed_Cuda));
CeedCallBackend(CeedBasisGetData(basis, &data));
CeedCallBackend(CeedBasisGetDimension(basis, &dim));

// Get read/write access to u, v
if (u != CEED_VECTOR_NONE) CeedCallBackend(CeedVectorGetArrayRead(u, CEED_MEM_DEVICE, &d_u));
else CeedCheck(eval_mode == CEED_EVAL_WEIGHT, ceed, CEED_ERROR_BACKEND, "An input vector is required for this CeedEvalMode");
if (apply_add) CeedCallBackend(CeedVectorGetArray(v, CEED_MEM_DEVICE, &d_v));
else CeedCallBackend(CeedVectorGetArrayWrite(v, CEED_MEM_DEVICE, &d_v));

// Apply basis operation
switch (eval_mode) {
case CEED_EVAL_INTERP: {
CeedInt P, Q;

CeedCallBackend(CeedBasisGetNumNodes(basis, &P));
CeedCallBackend(CeedBasisGetNumQuadraturePoints(basis, &Q));
CeedInt thread = CeedIntMax(Q, P);

void *interp_args[] = {(void *)&num_elem, &data->d_interp_1d, &d_u, &d_v};

{
// avoid >512 total threads
CeedInt elems_per_block = CeedIntMin(ceed_Cuda->device_prop.maxThreadsDim[2], CeedIntMax(512 / thread, 1));
CeedInt grid = num_elem / elems_per_block + (num_elem % elems_per_block > 0);
CeedInt shared_mem = elems_per_block * thread * sizeof(CeedScalar);

if (t_mode == CEED_TRANSPOSE) {
CeedCallBackend(CeedRunKernelDimShared_Cuda(ceed, apply_add ? data->InterpTransposeAdd : data->InterpTranspose, grid, thread, 1,
elems_per_block, shared_mem, interp_args));
} else {
CeedCallBackend(CeedRunKernelDimShared_Cuda(ceed, data->Interp, grid, thread, 1, elems_per_block, shared_mem, interp_args));
}
}
} break;
case CEED_EVAL_GRAD: {
CeedInt P, Q;

CeedCallBackend(CeedBasisGetNumNodes(basis, &P));
CeedCallBackend(CeedBasisGetNumQuadraturePoints(basis, &Q));
CeedInt thread = CeedIntMax(Q, P);

void *grad_args[] = {(void *)&num_elem, &data->d_grad_1d, &d_u, &d_v};

{
// avoid >512 total threads
CeedInt elems_per_block = CeedIntMin(ceed_Cuda->device_prop.maxThreadsDim[2], CeedIntMax(512 / thread, 1));
CeedInt grid = num_elem / elems_per_block + (num_elem % elems_per_block > 0);
CeedInt shared_mem = elems_per_block * thread * sizeof(CeedScalar);

if (t_mode == CEED_TRANSPOSE) {
CeedCallBackend(CeedRunKernelDimShared_Cuda(ceed, apply_add ? data->GradTransposeAdd : data->GradTranspose, grid, thread, 1,
elems_per_block, shared_mem, grad_args));
} else {
CeedCallBackend(CeedRunKernelDimShared_Cuda(ceed, data->Grad, grid, thread, 1, elems_per_block, shared_mem, grad_args));
}
}
} break;
case CEED_EVAL_WEIGHT: {
CeedInt Q;

CeedCheck(data->d_q_weight_1d, ceed, CEED_ERROR_BACKEND, "%s not supported; q_weights_1d not set", CeedEvalModes[eval_mode]);
CeedCallBackend(CeedBasisGetNumQuadraturePoints(basis, &Q));
void *weight_args[] = {(void *)&num_elem, (void *)&data->d_q_weight_1d, &d_v};

{
// avoid >512 total threads
CeedInt elems_per_block = CeedIntMin(ceed_Cuda->device_prop.maxThreadsDim[2], CeedIntMax(512 / Q, 1));
CeedInt grid = num_elem / elems_per_block + (num_elem % elems_per_block > 0);

CeedCallBackend(CeedRunKernelDim_Cuda(ceed, data->Weight, grid, Q, elems_per_block, 1, weight_args));
}
} break;
case CEED_EVAL_NONE: /* handled separately below */
break;
// LCOV_EXCL_START
case CEED_EVAL_DIV:
case CEED_EVAL_CURL:
return CeedError(ceed, CEED_ERROR_BACKEND, "%s not supported", CeedEvalModes[eval_mode]);
// LCOV_EXCL_STOP
}

// Restore vectors, cover CEED_EVAL_NONE
CeedCallBackend(CeedVectorRestoreArray(v, &d_v));
if (eval_mode == CEED_EVAL_NONE) CeedCallBackend(CeedVectorSetArray(v, CEED_MEM_DEVICE, CEED_COPY_VALUES, (CeedScalar *)d_u));
if (eval_mode != CEED_EVAL_WEIGHT) CeedCallBackend(CeedVectorRestoreArrayRead(u, &d_u));
CeedCallBackend(CeedDestroy(&ceed));
return CEED_ERROR_SUCCESS;
}

static int CeedBasisApplyNonTensor_Cuda_shared(CeedBasis basis, const CeedInt num_elem, CeedTransposeMode t_mode, CeedEvalMode eval_mode,
CeedVector u, CeedVector v) {
CeedCallBackend(CeedBasisApplyNonTensorCore_Cuda_shared(basis, false, num_elem, t_mode, eval_mode, u, v));
return CEED_ERROR_SUCCESS;
}

static int CeedBasisApplyAddNonTensor_Cuda_shared(CeedBasis basis, const CeedInt num_elem, CeedTransposeMode t_mode, CeedEvalMode eval_mode,
CeedVector u, CeedVector v) {
CeedCallBackend(CeedBasisApplyNonTensorCore_Cuda_shared(basis, true, num_elem, t_mode, eval_mode, u, v));
return CEED_ERROR_SUCCESS;
}

//------------------------------------------------------------------------------
// Destroy basis
//------------------------------------------------------------------------------
Expand Down Expand Up @@ -513,3 +618,64 @@ int CeedBasisCreateTensorH1_Cuda_shared(CeedInt dim, CeedInt P_1d, CeedInt Q_1d,
}

//------------------------------------------------------------------------------
// Create non-tensor basis
//------------------------------------------------------------------------------
int CeedBasisCreateH1_Cuda_shared(CeedElemTopology topo, CeedInt dim, CeedInt num_nodes, CeedInt num_qpts, const CeedScalar *interp,
const CeedScalar *grad, const CeedScalar *q_ref, const CeedScalar *q_weight, CeedBasis basis) {
Ceed ceed;
CeedInt num_comp, q_comp_interp, q_comp_grad;
const CeedInt q_bytes = num_qpts * sizeof(CeedScalar);
CeedBasis_Cuda_shared *data;

CeedCallBackend(CeedBasisGetCeed(basis, &ceed));
CeedCallBackend(CeedCalloc(1, &data));

// Check max sizes
CeedCheck(dim <= 3, ceed, CEED_ERROR_BACKEND, "Backend does not implement nontensor bases with dim > 3");
CeedCheck(num_nodes * num_qpts * dim < 52 * 52 * 3, ceed, CEED_ERROR_BACKEND, "Backend does not implement nontensor bases with P * Q this large");

// Copy basis data to GPU
CeedCallBackend(CeedBasisGetNumQuadratureComponents(basis, CEED_EVAL_INTERP, &q_comp_interp));
CeedCallBackend(CeedBasisGetNumQuadratureComponents(basis, CEED_EVAL_GRAD, &q_comp_grad));
if (q_weight) {
CeedCallCuda(ceed, cudaMalloc((void **)&data->d_q_weight_1d, q_bytes));
CeedCallCuda(ceed, cudaMemcpy(data->d_q_weight_1d, q_weight, q_bytes, cudaMemcpyHostToDevice));
}
if (interp) {
const CeedInt interp_bytes = q_bytes * num_nodes * q_comp_interp;

CeedCallCuda(ceed, cudaMalloc((void **)&data->d_interp_1d, interp_bytes));
CeedCallCuda(ceed, cudaMemcpy(data->d_interp_1d, interp, interp_bytes, cudaMemcpyHostToDevice));
}
if (grad) {
const CeedInt grad_bytes = q_bytes * num_nodes * q_comp_grad;

CeedCallCuda(ceed, cudaMalloc((void **)&data->d_grad_1d, grad_bytes));
CeedCallCuda(ceed, cudaMemcpy(data->d_grad_1d, grad, grad_bytes, cudaMemcpyHostToDevice));
}

// Compile basis kernels
const char basis_kernel_source[] = "// Non-tensor basis source\n#include <ceed/jit-source/cuda/cuda-shared-basis-nontensor.h>\n";

CeedCallBackend(CeedBasisGetNumComponents(basis, &num_comp));
CeedCallBackend(CeedCompile_Cuda(ceed, basis_kernel_source, &data->module, 5, "BASIS_Q", num_qpts, "BASIS_P", num_nodes, "T_1D",
CeedIntMax(num_qpts, num_nodes), "BASIS_DIM", dim, "BASIS_NUM_COMP", num_comp));
CeedCallBackend(CeedGetKernel_Cuda(ceed, data->module, "Interp", &data->Interp));
CeedCallBackend(CeedGetKernel_Cuda(ceed, data->module, "InterpTranspose", &data->InterpTranspose));
CeedCallBackend(CeedGetKernel_Cuda(ceed, data->module, "InterpTransposeAdd", &data->InterpTransposeAdd));
CeedCallBackend(CeedGetKernel_Cuda(ceed, data->module, "Grad", &data->Grad));
CeedCallBackend(CeedGetKernel_Cuda(ceed, data->module, "GradTranspose", &data->GradTranspose));
CeedCallBackend(CeedGetKernel_Cuda(ceed, data->module, "GradTransposeAdd", &data->GradTransposeAdd));
CeedCallBackend(CeedGetKernel_Cuda(ceed, data->module, "Weight", &data->Weight));

CeedCallBackend(CeedBasisSetData(basis, data));

// Register backend functions
CeedCallBackend(CeedSetBackendFunction(ceed, "Basis", basis, "Apply", CeedBasisApplyNonTensor_Cuda_shared));
CeedCallBackend(CeedSetBackendFunction(ceed, "Basis", basis, "ApplyAdd", CeedBasisApplyAddNonTensor_Cuda_shared));
CeedCallBackend(CeedSetBackendFunction(ceed, "Basis", basis, "Destroy", CeedBasisDestroy_Cuda_shared));
CeedCallBackend(CeedDestroy(&ceed));
return CEED_ERROR_SUCCESS;
}

//------------------------------------------------------------------------------
1 change: 1 addition & 0 deletions backends/cuda-shared/ceed-cuda-shared.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ static int CeedInit_Cuda_shared(const char *resource, Ceed ceed) {
CeedCallBackend(CeedDestroy(&ceed_ref));

CeedCallBackend(CeedSetBackendFunction(ceed, "Ceed", ceed, "BasisCreateTensorH1", CeedBasisCreateTensorH1_Cuda_shared));
CeedCallBackend(CeedSetBackendFunction(ceed, "Ceed", ceed, "BasisCreateH1", CeedBasisCreateH1_Cuda_shared));
CeedCallBackend(CeedSetBackendFunction(ceed, "Ceed", ceed, "Destroy", CeedDestroy_Cuda));
return CEED_ERROR_SUCCESS;
}
Expand Down
3 changes: 3 additions & 0 deletions backends/cuda-shared/ceed-cuda-shared.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,3 +39,6 @@ typedef struct {

CEED_INTERN int CeedBasisCreateTensorH1_Cuda_shared(CeedInt dim, CeedInt P_1d, CeedInt Q_1d, const CeedScalar *interp_1d, const CeedScalar *grad_1d,
const CeedScalar *q_ref_1d, const CeedScalar *q_weight_1d, CeedBasis basis);

CEED_INTERN int CeedBasisCreateH1_Cuda_shared(CeedElemTopology topo, CeedInt dim, CeedInt num_nodes, CeedInt num_qpts, const CeedScalar *interp,
const CeedScalar *grad, const CeedScalar *q_ref, const CeedScalar *q_weight, CeedBasis basis);
53 changes: 0 additions & 53 deletions backends/cuda-shared/kernels/cuda-shared-basis.cu

This file was deleted.

Loading

0 comments on commit 1a63be7

Please sign in to comment.