diff --git a/examples/pytacsadapt/plate/make_plate_geom.py b/examples/pytacsadapt/plate/make_plate_geom.py new file mode 100644 index 0000000..17d6ec9 --- /dev/null +++ b/examples/pytacsadapt/plate/make_plate_geom.py @@ -0,0 +1,166 @@ +""" +A function to generate the plate geometry used for testing +""" + +# imports +import numpy as np +from egads4py import egads + + +def makePlateGeom(width=1.0, height=1.0, npanels=1, makeIGES=False): + """ + Writes a plate.step/.iges geometry file for the planar plate model of the + following form: + __________ __________ + | | | + | | | ^ y + | | | | + |__________|__________| ---> x + + Parameters + ---------- + width : float + the length of the plate geometry along the x-axis + + height : float + the length of the plate geometry along the y-axis + + npanels : int + the number of panels used define the plate geometry along the x-dimension + + makeIGES : bool + boolean flag on whether to return an IGES/STEP geometry + """ + # get the model size info + nverts = 2 * (npanels + 1) + nedges = (3 * npanels) + 1 + nfaces = npanels + + # get the vertex coordinates + nx = npanels + 1 + ny = 2 + x = np.linspace(0, width, nx) + y = np.linspace(0, height, ny) + X, Y = np.meshgrid(x, y) + coords = np.zeros([nverts, 3]) + coords[:, 0] = X.ravel() + coords[:, 1] = Y.ravel() + # print(coords) + + # make the quad node connectivity - used to make the edge and face connectivity + quad_conn = [] + for iface in range(nfaces): + i = iface % 4 + j = iface // 4 + quad_conn.append( + [i + nx * j, (i + 1) + nx * j, (i + 1) + nx * (j + 1), i + nx * (j + 1)] + ) + quad_conn = np.array(quad_conn) + # print(quad_conn) + + # make the edge connectivity + edge_conn = [] + for iedge in range(nfaces * 4): + ipanel = iedge // 4 + local_edge = iedge % 4 + if ipanel > 0 and local_edge == 3: + continue + v1 = quad_conn[ipanel, local_edge] + if local_edge == 3: + v2 = quad_conn[ipanel, 0] + else: + v2 = quad_conn[ipanel, local_edge + 1] + edge_conn.append([v1, v2]) + edge_conn = np.array(edge_conn) + # print(edge_conn) + + # make the face connectivity + face_conn = [] + for iface in range(nfaces): + conn = [] + for iedge in range(4): + v1 = quad_conn[iface, iedge] + if iedge == 3: + v2 = quad_conn[iface, 0] + else: + v2 = quad_conn[iface, iedge + 1] + edge = np.array([v1, v2]) + edge_ind = np.where((edge_conn == edge).all(axis=1))[0] + if edge_ind.size == 0: + edge_ind = np.where((edge_conn == np.flip(edge)).all(axis=1))[0] + conn.append(edge_ind[0]) + face_conn.append(conn) + face_conn = np.array(face_conn) + # print(face_conn) + + # create egads + ctx = egads.context() + ctx.setOutLevel(0) + + # create the node topology + nodes = [] + for i in range(nverts): + nodes.append(ctx.makeTopology(egads.NODE, rdata=coords[i])) + + # create the line geometry + lines = [] + for i in range(nedges): + n1_ind = edge_conn[i, 0] + n2_ind = edge_conn[i, 1] + delta = coords[n2_ind] - coords[n1_ind] + lines.append( + ctx.makeGeometry( + egads.CURVE, mtype=egads.LINE, rdata=[coords[n1_ind], delta] + ) + ) + + # create the edge topology + edges = [] + for i in range(nedges): + n1_ind = edge_conn[i, 0] + n2_ind = edge_conn[i, 1] + delta = coords[n2_ind] - coords[n1_ind] + dist = np.linalg.norm(delta, 2) + edges.append( + ctx.makeTopology( + egads.EDGE, + mtype=egads.TWONODE, + geom=lines[i], + children=[nodes[n1_ind], nodes[n2_ind]], + rdata=[0, dist], + ) + ) + + # create the edge loops + edge_loops = [] + edge_loop_nums = [] + for i in range(nfaces): + e1_ind = face_conn[i, 0] + e2_ind = face_conn[i, 1] + e3_ind = face_conn[i, 2] + e4_ind = face_conn[i, 3] + eloop, nloop_edges = ctx.makeLoop( + [edges[e1_ind], edges[e2_ind], edges[e3_ind], edges[e4_ind]] + ) + edge_loops.append(eloop) + edge_loop_nums.append(nloop_edges) + + # create the faces + faces = [] + for i in range(nfaces): + faces.append(ctx.makeFace(edge_loops[i], egads.SFORWARD)) + + # piece it all together and make the model + shell = ctx.makeTopology(egads.SHELL, egads.OPEN, children=faces) + body = ctx.makeTopology(egads.BODY, egads.SHEETBODY, children=[shell]) + model = ctx.makeTopology(egads.MODEL, children=[body]) + fname = "plate" + if makeIGES: + fname += ".iges" + else: + fname += ".step" + model.saveModel(fname, overwrite=True) + return + + +makePlateGeom() diff --git a/examples/pytacsadapt/plate/plate_adapt.py b/examples/pytacsadapt/plate/plate_adapt.py new file mode 100644 index 0000000..2892e1e --- /dev/null +++ b/examples/pytacsadapt/plate/plate_adapt.py @@ -0,0 +1,318 @@ +""" +A simple example to test adaptive analysis with TACS/TMR. + +This example uses a square flat plate, fully fixed on all edges, with a uniform +pressure load. Static analysis is performed, followed by either uniform or +adaptive refinement. Adaptive refinement is based on a selected output of +interest. Various options for this example can be selected through command line +arguments, see below. + +To run a simple adaptive analysis on the KS failure output, do the following: +1. run `python make_plate_geom.py` to generate the square plate geometry file +2. run `mpirun -np 4 python plate_adapt.py --niters 8 --adapt` +3. run `mpirun -np 4 python plate_adapt.py --niters 5` +4. compare the results of adaptive and uniform refinement between step 2 and 3 +""" +import sys +import os +import argparse +from mpi4py import MPI +import numpy as np +from tmr import TMR +from tacs import TACS, elements, constitutive, functions, problems +from tmr.pytacsadapt import * + + +# =============================================================================== +# Local functions +# =============================================================================== +def elemCallBack(order, quad): + # set constitutive properties + rho = 2700.0 + E = 70.0e9 + nu = 0.33 + ys = 276.0e6 + min_thickness = 0.001 + max_thickness = 0.01 + thickness = 0.01 + props = constitutive.MaterialProperties(rho=rho, E=E, nu=nu, ys=ys) + stiff = constitutive.IsoShellConstitutive( + props, t=thickness, tlb=min_thickness, tub=max_thickness + ) + + # create the element + transform = elements.ShellNaturalTransform() + element = None + if order == 2: + element = elements.Quad4Shell(transform, stiff) + elif order == 3: + element = elements.Quad9Shell(transform, stiff) + elif order == 4: + element = elements.Quad16Shell(transform, stiff) + else: + raise ValueError(f"orders {2}-{4} supported") + return element + + +def probCallBack(name, comm, geomLoader, assembler, **kwargs): + # assign keyword arguments + args = kwargs.get("cmd_ln_args") + + # set the output flags + flags = ( + TACS.OUTPUT_CONNECTIVITY + | TACS.OUTPUT_NODES + | TACS.OUTPUT_DISPLACEMENTS + | TACS.OUTPUT_STRAINS + | TACS.OUTPUT_STRESSES + | TACS.OUTPUT_EXTRAS + ) + + # create the output viewer + f5Output = TACS.ToFH5(assembler, TACS.BEAM_OR_SHELL_ELEMENT, flags) + for ind, comp_name in enumerate(geomLoader.getComponentDescripts()): + f5Output.setComponentName(ind, comp_name) + + # set static problem options + static_opts = { + "subSpaceSize": 10, + "nRestarts": 1, + "L2Convergence": 1.0e-9, + "L2ConvergenceRel": 1.0e-12, + "useMonitor": True, + "monitorFrequency": 1, + } + + # create a static problem (meshLoader arg defaults to None) + problem = problems.StaticProblem( + name, assembler, comm, outputViewer=f5Output, options=static_opts + ) + + # add potential functions of interest to this problem + problem.addFunction( + funcName="ks_disp", + funcHandle=functions.KSDisplacement, + ftype="continuous", + ksWeight=args.ksweight, + direction=[0.0, 0.0, 1.0], + ) + problem.addFunction( + funcName="ks_fail", + funcHandle=functions.KSFailure, + ftype="continuous", + ksWeight=args.ksweight, + ) + problem.addFunction(funcName="compliance", funcHandle=functions.Compliance) + return problem + + +def initCallBack(coarse_space, **kwargs): + # assign keyword arguments + geom_file = kwargs.get("geom_file") + geom_type = kwargs.get("geom_type") + args = kwargs.get("cmd_ln_args") + + # load the geometry for the coarse model + coarse_space.geomLoader = pyGeometryLoader(coarse_space.comm, geom_type) + coarse_space.geomLoader.readGeometryFile(geom_file) + + # name the face and edges + face_names = {0: "panel"} + edge_names = {0: "south", 1: "east", 2: "north", 3: "west"} + coarse_space.geomLoader.nameGeometricEntities( + face_names=face_names, edge_names=edge_names + ) + + # specify boundary conditions - use defaults for nums and vals to fully fix + bc_names = ["south", "east", "north", "west"] + bc_nums = {} + bc_info = {"type": "edge", "bc_names": bc_names, "bc_nums": bc_nums} + coarse_space.geomLoader.setBoundaryConditions(bc_info) + + # set the meshing options + coarse_space.geomLoader.setMeshOptions(write_mesh_quality_histogram=0) + if not args.adapt: + coarse_space.geomLoader.setMeshOptions(mesh_type_default=TMR.STRUCTURED) + + # create the initial coarse mesh + coarse_space.geomLoader.createMesh(h=args.hinit) + + # create the topology + coarse_space.geomLoader.createTopology(useMesh=True) + + # create the coarse forest + if args.order < 4: + coarse_space.geomLoader.createForest( + order=args.order, interp=TMR.UNIFORM_POINTS + ) + else: + coarse_space.geomLoader.createForest( + order=args.order, interp=TMR.GAUSS_LOBATTO_POINTS + ) + coarse_space.forest = coarse_space.geomLoader.getForest() + return + + +def printroot(msg): + if comm.rank == 0: + print(f"{msg}") + return + + +# =============================================================================== +# Main +# =============================================================================== +if __name__ == "__main__": + # get the MPI communicator + comm = MPI.COMM_WORLD + + # parse input arguments + parser = argparse.ArgumentParser( + description="Mesh refinement test case for a fully-clamped flat plate" + ) + parser.add_argument( + "--hinit", + default=0.25, + type=float, + help="target mesh size used for initial mesh generation", + ) + parser.add_argument( + "--order", default=3, type=int, help="order of elements in the mesh" + ) + parser.add_argument( + "--p", default=-1.0e2, type=float, help="uniform pressure load to apply" + ) + parser.add_argument( + "--output", default="ks_fail", type=str, help="name of output of interest" + ) + parser.add_argument( + "--ksweight", + default=1.0e6, + type=float, + help="weight factor (rho) used for KS-aggregated outputs", + ) + parser.add_argument( + "--niters", + default=0, + type=int, + help="number of refinement iterations to perform", + ) + parser.add_argument( + "--adapt", + default=False, + action="store_true", + help="boolean flag to apply adaptive/uniform mesh refinement", + ) + parser.add_argument( + "--err_tol", + default=1.0e-6, + type=float, + help="target output error criterion used for adaptation termination", + ) + parser.add_argument( + "--nmaxlevs", + default=30, + type=int, + help="max number of refinement levels allowed during adaptation", + ) + parser.add_argument( + "--ndecrease", + default=5, + type=int, + help="number of iterations needed to decrease the refinement threshold to 1", + ) + args = parser.parse_args() + for key in vars(args): + printroot(f"args.{key} = {str(getattr(args,key))}") + + # remove old files + if comm.rank == 0: + for item in os.listdir(os.getcwd()): + fname, fext = os.path.splitext(item) + if fext in [".f5", ".plt"]: + os.remove(os.path.join(os.getcwd(), item)) + + # create the adaptive model + model = pyTACSAdapt( + comm, + initCallBack, + elemCallBack, + probCallBack, + args.output.lower(), + error_tol=args.err_tol, + num_max_ref_levels=args.nmaxlevs, + num_decrease_iters=args.ndecrease, + ) + model.setOutputOfInterest(args.output.lower()) + + # initialize the coarse-space model + model.initializeCoarseSpace( + geom_file="plate.step", geom_type="quad", cmd_ln_args=args + ) + + # loop over adaptive iterations + for k in range(args.niters + 1): + printroot(f"starting iteration {k}") + + # set up the coarse-space model + model.setupModel(model_type="coarse", cmd_ln_args=args) + model.coarse.setAuxElements( + geom_labels=["panel"], aux_type="pressure", faceIndex=0, p=args.p + ) + printroot("coarse model set up") + + # solve the coarse primal problem + model.solvePrimal(model_type="coarse", writeSolution=True) + printroot("coarse primal problem solved") + + if args.adapt: + # solve the coarse adjoint problem + model.solveAdjoint(model_type="coarse", writeSolution=False) + printroot("coarse adjoint problem solved") + + # set up the fine-space model + model.createFineSpace(cmd_ln_args=args) + model.fine.setAuxElements( + geom_labels=["panel"], aux_type="pressure", faceIndex=0, p=args.p + ) + printroot("fine model set up") + + # get the interpolated state in the fine space + model.interpolateField(field_type="state", writeSolution=False) + printroot("fine state interpolated") + + # do the high-order reconstruction of the adjoint (computes the + # difference between the reconstructed and interpolated fine-space adjoints) + model.reconstructField( + field_type="adjoint", compute_diff=True, writeSolution=False + ) + printroot("fine adjoint reconstructed") + + # do the adjoint-based element-wise error estimation + ( + error_estimate, + output_correction, + element_errors, + ) = model.estimateOutputError() + printroot("output error estimated") + + if error_estimate <= args.err_tol: + # end early if error tolerance is satisfied + printroot(f"target error tolerance met") + break + elif k < (args.niters): + # do the adaptation if not yet satisfied + printroot("applying adaptive refinement") + model.adaptModelDecreasingThreshold(element_errors) + + # otherwise do uniform mesh refinement + else: + if k < (args.niters): + printroot("applying uniform refinement") + model.coarse.applyRefinement() + + # print out final information + printroot(f"mesh_history: {model.mesh_history}") + printroot(f"output_history: {model.output_history}") + printroot(f"error_history: {model.error_history}") + printroot(f"adaptation_history: {model.adaptation_history}") diff --git a/src/TMR_RefinementTools.cpp b/src/TMR_RefinementTools.cpp index 45d5185..7a530e8 100644 --- a/src/TMR_RefinementTools.cpp +++ b/src/TMR_RefinementTools.cpp @@ -210,7 +210,7 @@ static TacsScalar computeJacobianTrans2D(const TacsScalar Xpts[], // Compute the cross-product with the normal crossProduct(&Xd[0], &Xd[3], &Xd[6]); - vec3Scale(1.0 / sqrt(vec3Dot(&Xd[6], &Xd[6])), &Xd[6]); + vec3Normalize(&Xd[6]); // Compute the transpose of the Jacobian transformation return inv3x3(Xd, J); @@ -713,7 +713,7 @@ static void computeElemRecon2D(const int vars_per_node, TMRQuadForest *forest, d1[0] = Xd[0]; d1[1] = Xd[1]; d1[2] = Xd[2]; - vec3Scale(1.0 / sqrt(vec3Dot(d1, d1)), d1); + vec3Normalize(d1); // Compute d2 = n x d1 crossProduct(&Xd[6], d1, d2); @@ -2835,7 +2835,9 @@ void createPartUnityVector(TMRQuadForest *pu, TACSBVec **pu_vec) { } /* - Refine the mesh using the original solution and the adjoint solution + Estimate error in a functional output using an adjoint-weighted residual. + + Assumes steady solution, transient problems not yet supported. input: forest: the forest of quadtrees @@ -2847,186 +2849,212 @@ void createPartUnityVector(TMRQuadForest *pu, TACSBVec **pu_vec) { solutions computed in some manner output: - adj_corr: adjoint-based functional correction + node_error: the node-wise error indicators + elem_error: the localized element-wise error indicators + adj_corr: adjoint-based functional output correction returns: - absolute functional error estimate + absolute functional output error estimate */ double TMR_AdjointErrorEst(TMRQuadForest *forest, TACSAssembler *tacs, TMRQuadForest *forest_refined, TACSAssembler *tacs_refined, TACSBVec *solution_refined, - TACSBVec *adjoint_refined, double *error, - double *adj_corr) { - /* - // The maximum number of nodes - const int max_num_nodes = MAX_ORDER*MAX_ORDER; - - // Get the number of variables per node - const int vars_per_node = tacs->getVarsPerNode(); - - // Get the order of the mesh and the number of enrichment shape functions - const double *refined_knots; - const int refined_order = forest_refined->getInterpKnots(&refined_knots); - const int num_refined_nodes = refined_order*refined_order; - - // Perform a local refinement of the nodes based on the strain energy - // within each element - const int nelems = tacs->getNumElements(); - - // Get the communicator - MPI_Comm comm = tacs->getMPIComm(); - - // Allocate the element arrays needed for the reconstruction - TacsScalar *vars_interp = new TacsScalar[ vars_per_node*num_refined_nodes ]; - TacsScalar *adj_interp = new TacsScalar[ vars_per_node*num_refined_nodes ]; - - // Allocate the nodal error estimates array - TacsScalar *err = new TacsScalar[ num_refined_nodes ]; - TacsScalar pu_err[4]; - - // Keep track of the total error remaining from each element - // indicator and the adjoint error correction - TacsScalar total_adjoint_corr = 0.0; - - // Create the partition of unity - TMRQuadForest *pu = NULL; - if (forest->getMeshOrder() > 2){ - pu = forest->duplicate(); - pu->incref(); - pu->setMeshOrder(2); - pu->createNodes(); - } - else { - pu = forest; - pu->incref(); - } - - // Create a vector for the predicted nodal errors. - TACSBVec *nodal_error; - createPartUnityVector(pu, &nodal_error); - nodal_error->incref(); - - // Distribute the values for the adjoint/solution - solution_refined->beginDistributeValues(); - adjoint_refined->beginDistributeValues(); - solution_refined->endDistributeValues(); - adjoint_refined->endDistributeValues(); + TACSBVec *adjoint_refined, double *node_error, + double *elem_error, double *adj_corr) { + // The maximum number of nodes + const int max_num_nodes = MAX_ORDER * MAX_ORDER; - // Get the auxiliary elements (surface tractions) associated with - // the element class - TACSAuxElements *aux_elements = tacs_refined->getAuxElements(); - int num_aux_elems = 0; - TACSAuxElem *aux = NULL; - if (aux_elements){ - aux_elements->sort(); - num_aux_elems = aux_elements->getAuxElements(&aux); - } + // Get the number of variables per node + const int vars_per_node = tacs->getVarsPerNode(); - // Get the partition of unity connectivity - const int *pu_conn; - pu->getNodeConn(&pu_conn); + // Get the order of the mesh and the number of enrichment shape functions + const double *refined_knots; + const int refined_order = forest_refined->getInterpKnots(&refined_knots); + const int num_refined_nodes = refined_order * refined_order; - // Compute the residual on the refined mesh with the interpolated - // variables. - int aux_count = 0; - for ( int elem = 0; elem < nelems; elem++ ){ - // Set the simulation time - double time = 0.0; + // Get the number of elements in the coarse/refined mesh + const int nelems = tacs->getNumElements(); - // Get the node numbers for the refined mesh - int refine_len = 0; - const int *refine_nodes; - tacs_refined->getElement(elem, &refine_nodes, &refine_len); + // Get the communicator + MPI_Comm comm = tacs->getMPIComm(); - // Get the node locations - TacsScalar Xpts[3*max_num_nodes]; - TACSElement *element = tacs_refined->getElement(elem, Xpts); + // Allocate the element arrays needed for the adjoint-weighted residual + TacsScalar *vars_refined = new TacsScalar[vars_per_node * num_refined_nodes]; + TacsScalar *dvars_refined = new TacsScalar[vars_per_node * num_refined_nodes]; + TacsScalar *ddvars_refined = + new TacsScalar[vars_per_node * num_refined_nodes]; + TacsScalar *adj_refined = new TacsScalar[vars_per_node * num_refined_nodes]; + TacsScalar *res_refined = new TacsScalar[vars_per_node * num_refined_nodes]; + memset(dvars_refined, 0, + vars_per_node * num_refined_nodes * sizeof(TacsScalar)); + memset(ddvars_refined, 0, + vars_per_node * num_refined_nodes * sizeof(TacsScalar)); + + // Allocate arrays for storing the nodal error esimates and nodal + // element counts - updated per element + TacsScalar *err = new TacsScalar[num_refined_nodes]; + TacsScalar *weights = new TacsScalar[num_refined_nodes]; + + // Keep track of the total output functional error estimate and the total + // output functional correction terms + TacsScalar total_error_est = 0.0; + TacsScalar total_output_corr = 0.0; + + // Create a distributed vector for the nodal error estimates + TACSNodeMap *refined_map = tacs_refined->getNodeMap(); + TACSBVecDistribute *refined_dist = tacs_refined->getBVecDistribute(); + TACSBVecDepNodes *refined_dep_nodes = tacs_refined->getBVecDepNodes(); + TACSBVec *nodal_error = + new TACSBVec(refined_map, 1, refined_dist, refined_dep_nodes); + nodal_error->incref(); + nodal_error->zeroEntries(); + + // Create a distributed vector for the element count of each node + // This is used for error localization from the nodes to the elements + TACSBVec *nodal_weights = + new TACSBVec(refined_map, 1, refined_dist, refined_dep_nodes); + nodal_weights->incref(); + computeLocalWeights(tacs_refined, nodal_weights); + + // Distribute the values for the adjoint/solution + solution_refined->beginDistributeValues(); + adjoint_refined->beginDistributeValues(); + solution_refined->endDistributeValues(); + adjoint_refined->endDistributeValues(); + + // Get the auxiliary elements defined in the problem + TACSAuxElements *aux_elements = tacs_refined->getAuxElements(); + int num_aux_elems = 0; + TACSAuxElem *aux = NULL; + if (aux_elements) { + aux_elements->sort(); + num_aux_elems = aux_elements->getAuxElements(&aux); + } - // Get the adjoint variables for this element - solution_refined->getValues(refine_len, refine_nodes, vars_interp); - adjoint_refined->getValues(refine_len, refine_nodes, adj_interp); + // Compute the nodal error estimates using an adjoint-weighted residual for + // each element on this proc + int aux_count = 0; + for (int elem = 0; elem < nelems; elem++) { + // Set the simulation time + double time = 0.0; - // Compute the localized error on the refined mesh - memset(err, 0, element->getNumNodes()*sizeof(TacsScalar)); - element->addLocalizedError(time, err, adj_interp, - Xpts, vars_interp); + // Get the node numbers for this element in the refined mesh + int nnodes = 0; + const int *node_inds; + tacs_refined->getElement(elem, &nnodes, &node_inds); - // Add the contribution from any forces - while (aux_count < num_aux_elems && aux[aux_count].num == elem){ - aux[aux_count].elem->addLocalizedError(time, err, adj_interp, - Xpts, vars_interp); - aux_count++; + // Get the node locations for this element + TacsScalar Xpts[3 * max_num_nodes]; + TACSElement *element = tacs_refined->getElement(elem, Xpts); + + // Get the state and adjoint variables for this element + solution_refined->getValues(nnodes, node_inds, vars_refined); + adjoint_refined->getValues(nnodes, node_inds, adj_refined); + + // Compute the residual on the element + memset(err, 0, nnodes * sizeof(TacsScalar)); + memset(res_refined, 0, nnodes * vars_per_node * sizeof(TacsScalar)); + element->addResidual(elem, time, Xpts, vars_refined, dvars_refined, + ddvars_refined, res_refined); + + // Compute the nodal error estimates with the adjoint-weighted residual + for (int inode = 0; inode < nnodes; inode++) { + for (int ivar = 0; ivar < vars_per_node; ivar++) { + int ind = vars_per_node * inode + ivar; + err[inode] += -1. * (res_refined[ind] * adj_refined[ind]); } + } - // Add up the total adjoint correction - for ( int j = 0; j < 2; j++ ){ - for ( int i = 0; i < 2; i++ ){ - int node = (i*(refined_order-1) + - j*refined_order*(refined_order-1)); - total_adjoint_corr += TacsRealPart(err[node]); - pu_err[i + 2*j] = err[node]; + // Add the contribution from any loads - opposite sign for error + // contribution + while (aux_count < num_aux_elems && aux[aux_count].num == elem) { + memset(res_refined, 0, nnodes * vars_per_node * sizeof(TacsScalar)); + aux[aux_count].elem->addResidual(elem, time, Xpts, vars_refined, + dvars_refined, ddvars_refined, + res_refined); + for (int inode = 0; inode < nnodes; inode++) { + for (int ivar = 0; ivar < vars_per_node; ivar++) { + int ind = vars_per_node * inode + ivar; + err[inode] += 1. * (res_refined[ind] * adj_refined[ind]); } } - - // Add the contributions to the nodal error - nodal_error->setValues(4, &pu_conn[4*elem], pu_err, TACS_ADD_VALUES); + aux_count++; } - // Finish setting the values into the nodal error array - nodal_error->beginSetValues(TACS_ADD_VALUES); - nodal_error->endSetValues(TACS_ADD_VALUES); + // Add the nodal errors to the total output functional correction term + for (int i = 0; i < nnodes; i++) total_output_corr += TacsRealPart(err[i]); - // Distribute the values back to all nodes - nodal_error->beginDistributeValues(); - nodal_error->endDistributeValues(); + // Add the nodal errors from this element to the nodal error vector + nodal_error->setValues(nnodes, node_inds, err, TACS_ADD_VALUES); + } - // Finish setting the values into the array - double total_error_remain = 0.0; - for ( int elem = 0; elem < nelems; elem++ ){ - // Get the error for this mesh - nodal_error->getValues(4, &pu_conn[4*elem], pu_err); + // Finish setting the values into the nodal error array + nodal_error->beginSetValues(TACS_ADD_VALUES); + nodal_error->endSetValues(TACS_ADD_VALUES); - // Compute the element indicator error as a function of the nodal - // error estimate. - error[elem] = 0.0; - for ( int i = 0; i < 4; i++ ){ - error[elem] += TacsRealPart(pu_err[i]); - } - error[elem] = 0.25*fabs(error[elem]); - } + // Distribute the values back to all nodes + nodal_error->beginDistributeValues(); + nodal_error->endDistributeValues(); - // Add up the absolute value of the total nodal error contributions - TacsScalar *nerr; - int size = nodal_error->getArray(&nerr); - for ( int i = 0; i < size; i++ ){ - total_error_remain += fabs(TacsRealPart(nerr[i])); + // Localize the error from the nodes to the elements with an equal split + for (int elem = 0; elem < nelems; elem++) { + // Get the node numbers for this element in the refined mesh + int nnodes = 0; + const int *node_inds; + tacs_refined->getElement(elem, &nnodes, &node_inds); + + // Get the errors and element counts for the nodes of this element + nodal_error->getValues(nnodes, node_inds, err); + nodal_weights->getValues(nnodes, node_inds, weights); + + // Compute the element indicator error as a function of the nodal + // error estimate. + elem_error[elem] = 0.0; + for (int i = 0; i < nnodes; i++) { + if (node_inds[i] < 0) { + // skip dependent nodes - handled in beginSetValues() with dep weights + continue; + } + elem_error[elem] += fabs(TacsRealPart(err[i])) / weights[i]; } + } - // Sum up the contributions across all processors - double temp[2]; - temp[0] = total_error_remain; - temp[1] = total_adjoint_corr; - MPI_Allreduce(MPI_IN_PLACE, temp, 2, MPI_DOUBLE, MPI_SUM, comm); - total_error_remain = temp[0]; - total_adjoint_corr = temp[1]; - - // Free the data that is no longer required - delete [] vars_interp; - delete [] adj_interp; - delete [] err; - pu->decref(); - nodal_error->decref(); + // Add up the absolute value of the nodal errors to get the output + // functional error + TacsScalar *nerr; + int size = nodal_error->getArray(&nerr); + memcpy(node_error, nerr, size * sizeof(double)); + for (int i = 0; i < size; i++) { + total_error_est += fabs(TacsRealPart(nerr[i])); + } - // Set the adjoint residual correction - if (adj_corr){ - *adj_corr = total_adjoint_corr; - } + // Sum up the contributions across all processors + double temp[2]; + temp[0] = total_error_est; + temp[1] = total_output_corr; + MPI_Allreduce(MPI_IN_PLACE, temp, 2, MPI_DOUBLE, MPI_SUM, comm); + total_error_est = temp[0]; + total_output_corr = temp[1]; + + // Free the data that is no longer required + delete[] vars_refined; + delete[] dvars_refined; + delete[] ddvars_refined; + delete[] adj_refined; + delete[] res_refined; + delete[] err; + delete[] weights; + nodal_error->decref(); + nodal_weights->decref(); + + // Set the adjoint residual correction + if (adj_corr) { + *adj_corr = total_output_corr; + } - // Return the error - return total_error_remain; - */ - return 0.0; + // Return the error + return total_error_est; } /* diff --git a/src/TMR_RefinementTools.h b/src/TMR_RefinementTools.h index 57442e2..7616b49 100644 --- a/src/TMR_RefinementTools.h +++ b/src/TMR_RefinementTools.h @@ -91,8 +91,8 @@ double TMR_AdjointErrorEst(TMRQuadForest *forest, TACSAssembler *tacs, TMRQuadForest *forest_refined, TACSAssembler *tacs_refined, TACSBVec *solution_refined, - TACSBVec *adjoint_refined, double *error, - double *adj_corr); + TACSBVec *adjoint_refined, double *node_error, + double *elem_error, double *adj_corr); double TMR_AdjointErrorEst(TMROctForest *forest, TACSAssembler *tacs, TMROctForest *forest_refined, TACSAssembler *tacs_refined, diff --git a/src/interfaces/TMREgads.cpp b/src/interfaces/TMREgads.cpp index 7d0ed89..acbbfae 100644 --- a/src/interfaces/TMREgads.cpp +++ b/src/interfaces/TMREgads.cpp @@ -379,6 +379,7 @@ void TMR_EgadsFace::getFaceObject(ego *f) { *f = face; } Create the TMRModel by loading in an EGADS model file */ TMRModel *TMR_EgadsInterface::TMR_LoadModelFromEGADSFile(const char *filename, + const char *units, int print_level) { // Create the common context for all egads objects TMR_EgadsContext *ctx = new TMR_EgadsContext(); @@ -386,7 +387,7 @@ TMRModel *TMR_EgadsInterface::TMR_LoadModelFromEGADSFile(const char *filename, // Load in the egads model from the file int flags = 0; ego model; - int icode = EG_loadModel(ctx->getContext(), flags, filename, &model); + int icode = EG_loadModel(ctx->getContext(), flags, filename, units, &model); if (icode != 0) { fprintf(stderr, "TMR: Error reading egads file with error code %d\n", icode); diff --git a/src/interfaces/TMREgads.h b/src/interfaces/TMREgads.h index 710b54d..0c81e2d 100644 --- a/src/interfaces/TMREgads.h +++ b/src/interfaces/TMREgads.h @@ -106,7 +106,8 @@ class TMR_EgadsFace : public TMRFace { /* Initialization of the OpenCascade geometry from an IGES/STEP files */ -TMRModel *TMR_LoadModelFromEGADSFile(const char *filename, int print_level = 0); +TMRModel *TMR_LoadModelFromEGADSFile(const char *filename, const char *units, + int print_level = 0); TMRModel *TMR_ConvertEGADSModel(ego model, int print_level = 0); } // namespace TMR_EgadsInterface diff --git a/src/interfaces/TMROpenCascade.cpp b/src/interfaces/TMROpenCascade.cpp index 5f596c7..ea2b269 100644 --- a/src/interfaces/TMROpenCascade.cpp +++ b/src/interfaces/TMROpenCascade.cpp @@ -466,10 +466,290 @@ void TMR_RemoveFloatingShapes(TopoDS_Compound &compound, compound = new_compound; } +/* + Sew the faces of a loaded compound +*/ +void TMR_SewCompound(TopoDS_Compound &compound, int print_level, double sew_tol, + bool nonmanifold_mode) { + // get the shapes from the original compound + TopTools_IndexedMapOfShape verts, edges, wires, faces, shells, solids; + TopExp::MapShapes(compound, TopAbs_VERTEX, verts); + TopExp::MapShapes(compound, TopAbs_EDGE, edges); + TopExp::MapShapes(compound, TopAbs_WIRE, wires); + TopExp::MapShapes(compound, TopAbs_FACE, faces); + TopExp::MapShapes(compound, TopAbs_SHELL, shells); + TopExp::MapShapes(compound, TopAbs_SOLID, solids); + + // create the sewing object and set options + BRepBuilderAPI_Sewing *sew = + new BRepBuilderAPI_Sewing(sew_tol, true, true, true, nonmanifold_mode); + + // add the compound to be sewn + sew->Add(compound); + + // do the sewing/analysis + sew->Perform(); + + // check if anything was done + if ((print_level > 0) && + (sew->IsModified(compound) || sew->IsModifiedSubShape(compound))) { + // print an update + printf( + "Original compound:\nnverts=%i, nedge=%i, nwires=%i, nfaces=%i, " + "nshells=%i, nsolids=%i\n", + verts.Extent(), edges.Extent(), wires.Extent(), faces.Extent(), + shells.Extent(), solids.Extent()); + printf(" Found %i free edges\n", sew->NbFreeEdges()); + printf(" Found %i multiple edges\n", sew->NbMultipleEdges()); + printf(" Found %i contiguous edges\n", sew->NbContigousEdges()); + printf(" Found %i degenerated shapes\n", sew->NbDegeneratedShapes()); + printf("Loaded compound was sewn with tolerance=%.3e\n", sew_tol); + + // set the updated compound + compound = TopoDS::Compound(sew->SewedShape()); + } else if (print_level > 0) { + printf("Loaded compound was not sewn\n"); + } + delete sew; + return; +} + +void TMR_SewModelIGES(char *filename, const char *units, int print_level, + double sew_tol, bool nonmanifold_mode) { + // create the file reader + IGESControl_Reader reader; + Interface_Static::SetCVal("xstep.cascade.unit", units); + IFSelect_ReturnStatus status = reader.ReadFile(filename); + if (status != IFSelect_RetDone) { + fprintf(stderr, "TMR Warning: IGES file reader failed\n"); + } + + // inspect the root transfers + reader.PrintCheckLoad(Standard_False, IFSelect_ItemsByEntity); + + // The number of root objects for transfer + int nroots = reader.NbRootsForTransfer(); + for (int k = 1; k <= nroots; k++) { + Standard_Boolean ok = reader.TransferOneRoot(k); + if (!ok) { + fprintf(stderr, "TMR Warning: Transfer %d not OK!\n", k); + } + } + + // Load the different shapes + int nbs = reader.NbShapes(); + + // Build the compound shape + TopoDS_Compound compound; + BRep_Builder builder; + builder.MakeCompound(compound); + for (int i = 1; i <= nbs; i++) { + TopoDS_Shape shape = reader.Shape(i); + builder.Add(compound, shape); + } + + // do the sewing + TMR_SewCompound(compound, print_level, sew_tol, nonmanifold_mode); + + // get the new filename + char new_file[512]; + int len = strlen(filename); + char *ext = strrchr(filename, '.'); // get the file extension + int ext_len = strlen(ext); + int val = snprintf(new_file, len - ext_len + 1, "%s", + filename); // add original name + strcat(new_file, "_sewn"); // add the sewn tag + strcat(new_file, ext); // add the file extension + + // create the file writer + IGESControl_Writer writer; + Interface_Static::SetCVal("write.iges.unit", units); + + // add the compound + Standard_Boolean ok = writer.AddShape(compound); + if (!ok) { + fprintf(stderr, "TMR Warning: adding compound not OK!\n"); + } + + // write out the new IGES file + ok = writer.Write(new_file); + if (!ok) { + fprintf(stderr, "TMR Warning: writing %s failed!\n", new_file); + } + return; +} + +void TMR_SewModelSTEP(char *filename, const char *units, int print_level, + double sew_tol, bool nonmanifold_mode) { + // create the file reader + STEPControl_Reader reader; + Interface_Static::SetCVal("xstep.cascade.unit", units); + IFSelect_ReturnStatus status = reader.ReadFile(filename); + if (status != IFSelect_RetDone) { + fprintf(stderr, "TMR Warning: STEP file reader failed\n"); + } + + // inspect the root transfers + reader.PrintCheckLoad(Standard_False, IFSelect_ItemsByEntity); + + // The number of root objects for transfer + int nroots = reader.NbRootsForTransfer(); + for (int k = 1; k <= nroots; k++) { + Standard_Boolean ok = reader.TransferRoot(k); + if (!ok) { + fprintf(stderr, "TMR Warning: Transfer %d not OK!\n", k); + } + } + + // Load the different shapes + int nbs = reader.NbShapes(); + + // Build the shape + TopoDS_Compound compound; + BRep_Builder builder; + builder.MakeCompound(compound); + for (int i = 1; i <= nbs; i++) { + TopoDS_Shape shape = reader.Shape(i); + builder.Add(compound, shape); + } + + TMR_RemoveFloatingShapes(compound, TopAbs_EDGE); + + // do the sewing + TMR_SewCompound(compound, print_level, sew_tol, nonmanifold_mode); + + // get the new filename + char new_file[512]; + int len = strlen(filename); + char *ext = strrchr(filename, '.'); // get the file extension + int ext_len = strlen(ext); + int val = snprintf(new_file, len - ext_len + 1, "%s", + filename); // add original name + strcat(new_file, "_sewn"); // add the sewn tag + strcat(new_file, ext); // add the file extension + + // create the file writer + STEPControl_Writer writer; + Interface_Static::SetCVal("write.step.unit", units); + + // add the compound + IFSelect_ReturnStatus stat = writer.Transfer(compound, STEPControl_AsIs); + if (stat != IFSelect_RetVoid && stat != IFSelect_RetDone) { + fprintf(stderr, "TMR Warning: error transferring compound!\n"); + } + + // write out the new STEP file + stat = writer.Write(new_file); + if (stat != IFSelect_RetVoid && stat != IFSelect_RetDone) { + fprintf(stderr, "TMR Warning: writing %s failed!\n", new_file); + } + return; +} + +void TMR_FixCompound(TopoDS_Compound &compound, int print_level, double tol) { + // create the shape fixer and set parameters + ShapeFix_Shape *shape_fixer = new ShapeFix_Shape(compound); + shape_fixer->SetPrecision(tol); + + // apply the shape fixing + shape_fixer->Perform(); + + // check the status + if (print_level > 0 && shape_fixer->Status(ShapeExtend_OK)) { + printf("Loaded compound shape was not fixed. Nothing was done\n"); + } else if (print_level > 0 && shape_fixer->Status(ShapeExtend_DONE)) { + printf("Loaded compound shape was fixed with the following changes:\n"); + if (shape_fixer->Status(ShapeExtend_DONE1)) { + printf(" free edges were fixed\n"); + } + if (shape_fixer->Status(ShapeExtend_DONE2)) { + printf(" free wires were fixed\n"); + } + if (shape_fixer->Status(ShapeExtend_DONE3)) { + printf(" free faces were fixed\n"); + } + if (shape_fixer->Status(ShapeExtend_DONE4)) { + printf(" free shells were fixed\n"); + } + if (shape_fixer->Status(ShapeExtend_DONE5)) { + printf(" free solids were fixed\n"); + } + if (shape_fixer->Status(ShapeExtend_DONE6)) { + printf(" shapes in compound were fixed\n"); + } + } else if (print_level > 0 && shape_fixer->Status(ShapeExtend_FAIL)) { + printf("Loaded compound shape failed to be fixed. Error during fixing\n"); + } + + // set the fixed compound shape + if (shape_fixer->Status(ShapeExtend_DONE)) { + compound = TopoDS::Compound(shape_fixer->Shape()); + } + delete shape_fixer; + + // create the wireframe fixer and set parameters + ShapeFix_Wireframe *wire_fixer = new ShapeFix_Wireframe(compound); + wire_fixer->SetPrecision(tol); + wire_fixer->ModeDropSmallEdges() = true; + + // apply the wireframe fixing + wire_fixer->FixWireGaps(); + wire_fixer->FixSmallEdges(); + + // check the status for wire gaps + if (print_level > 0 && wire_fixer->StatusWireGaps(ShapeExtend_OK)) { + printf("Loaded compound wireframe gaps not fixed. No gaps were found\n"); + } else if (print_level > 0 && wire_fixer->StatusWireGaps(ShapeExtend_DONE)) { + printf("Loaded compound wireframe gaps were fixed:\n"); + if (wire_fixer->StatusWireGaps(ShapeExtend_DONE1)) { + printf(" gaps in 3D were fixed\n"); + } + if (wire_fixer->StatusWireGaps(ShapeExtend_DONE2)) { + printf(" gaps in 2D were fixed\n"); + } + } else if (print_level > 0 && wire_fixer->StatusWireGaps(ShapeExtend_FAIL)) { + printf("Loaded compound wireframe gaps failed to be fixed:\n"); + if (wire_fixer->StatusWireGaps(ShapeExtend_FAIL1)) { + printf(" error fixing gaps in 3D\n"); + } + if (wire_fixer->StatusWireGaps(ShapeExtend_FAIL2)) { + printf(" error fixing gaps in 2D\n"); + } + } + + // check the status for small edges + if (print_level > 0 && wire_fixer->StatusSmallEdges(ShapeExtend_OK)) { + printf( + "Loaded compound wireframe edges not fixed. No small edges were " + "found\n"); + } else if (print_level > 0 && + wire_fixer->StatusSmallEdges(ShapeExtend_DONE)) { + printf("Loaded compound wireframe edges were fixed:\n"); + if (wire_fixer->StatusSmallEdges(ShapeExtend_DONE1)) { + printf(" small edges were fixed\n"); + } + } else if (print_level > 0 && + wire_fixer->StatusSmallEdges(ShapeExtend_FAIL)) { + printf("Loaded compound wireframe edges failed to be fixed:\n"); + if (wire_fixer->StatusSmallEdges(ShapeExtend_FAIL1)) { + printf(" error fixing small edges\n"); + } + } + + // set the fixed compound shape + if (wire_fixer->StatusWireGaps(ShapeExtend_DONE) || + wire_fixer->StatusSmallEdges(ShapeExtend_DONE)) { + compound = TopoDS::Compound(wire_fixer->Shape()); + } + delete wire_fixer; + return; +} + /* Create the TMRModel based on the IGES input file */ -TMRModel *TMR_LoadModelFromIGESFile(const char *filename, int print_level) { +TMRModel *TMR_LoadModelFromIGESFile(const char *filename, const char *units, + int print_level) { FILE *fp = fopen(filename, "r"); if (!fp) { return NULL; @@ -477,6 +757,7 @@ TMRModel *TMR_LoadModelFromIGESFile(const char *filename, int print_level) { fclose(fp); IGESControl_Reader reader; + Interface_Static::SetCVal("xstep.cascade.unit", units); IFSelect_ReturnStatus status = reader.ReadFile(filename); if (status != IFSelect_RetDone) { fprintf(stderr, "TMR Warning: IGES file reader failed\n"); @@ -514,7 +795,8 @@ TMRModel *TMR_LoadModelFromIGESFile(const char *filename, int print_level) { /* Create the TMRModel based on the STEP input file */ -TMRModel *TMR_LoadModelFromSTEPFile(const char *filename, int print_level) { +TMRModel *TMR_LoadModelFromSTEPFile(const char *filename, const char *units, + int print_level) { FILE *fp = fopen(filename, "r"); if (!fp) { return NULL; @@ -522,6 +804,7 @@ TMRModel *TMR_LoadModelFromSTEPFile(const char *filename, int print_level) { fclose(fp); STEPControl_Reader reader; + Interface_Static::SetCVal("xstep.cascade.unit", units); IFSelect_ReturnStatus status = reader.ReadFile(filename); if (status != IFSelect_RetDone) { fprintf(stderr, "TMR Warning: STEP file reader failed\n"); @@ -612,9 +895,9 @@ TMRModel *TMR_LoadModelFromCompound(TopoDS_Compound &compound, if (print_level > 0) { printf( - "Compound loaded with:\nnverts = %d nedges = %d nfaces = %d " - "nwires = %d nshells = %d nsolids = %d\n", - nverts, nedges, nfaces, nwires, nshells, nsolids); + "Compound loaded with:\nnverts=%d, nedges=%d, nwires=%d, " + "nfaces=%d, nshells=%d, nsolids=%d\n", + nverts, nedges, nwires, nfaces, nshells, nsolids); } // Re-iterate through the list and create the objects needed to diff --git a/src/interfaces/TMROpenCascade.h b/src/interfaces/TMROpenCascade.h index 58fd0d3..9c413df 100644 --- a/src/interfaces/TMROpenCascade.h +++ b/src/interfaces/TMROpenCascade.h @@ -46,6 +46,7 @@ #include #include #include +#include #include #include #include @@ -83,6 +84,7 @@ #include #include #include +#include #include #include #include @@ -92,6 +94,7 @@ #include #include #include +#include #include #include #include @@ -172,8 +175,14 @@ class TMR_OCCFace : public TMRFace { /* Initialization of the OpenCascade geometry from an IGES/STEP files */ -TMRModel *TMR_LoadModelFromIGESFile(const char *filename, int print_level = 0); -TMRModel *TMR_LoadModelFromSTEPFile(const char *filename, int print_level = 0); +void TMR_SewModelIGES(char *filename, const char *units, int print_level = 0, + double sew_tol = 1.e-6, bool nonmanifold_mode = false); +void TMR_SewModelSTEP(char *filename, const char *units, int print_level = 0, + double sew_tol = 1.e-6, bool nonmanifold_mode = false); +TMRModel *TMR_LoadModelFromIGESFile(const char *filename, const char *units, + int print_level = 0); +TMRModel *TMR_LoadModelFromSTEPFile(const char *filename, const char *units, + int print_level = 0); TMRModel *TMR_LoadModelFromCompound(TopoDS_Compound &compound, int print_level = 0); diff --git a/tmr/TMR.pxd b/tmr/TMR.pxd index 6a5a409..a960e27 100644 --- a/tmr/TMR.pxd +++ b/tmr/TMR.pxd @@ -446,12 +446,14 @@ cdef extern from "TMR_TACSCreator.h": TMROctForest* getFilter() cdef extern from "TMROpenCascade.h": - cdef TMRModel* TMR_LoadModelFromIGESFile(const char*, int) - cdef TMRModel* TMR_LoadModelFromSTEPFile(const char*, int) + cdef void TMR_SewModelIGES(char *, const char *, int, double, bool) + cdef void TMR_SewModelSTEP(char *, const char *, int, double, bool) + cdef TMRModel* TMR_LoadModelFromIGESFile(const char*, const char*, int) + cdef TMRModel* TMR_LoadModelFromSTEPFile(const char*, const char*, int) cdef extern from "TMREgads.h" namespace "TMR_EgadsInterface": cdef TMRModel* TMR_ConvertEGADSModel"TMR_EgadsInterface::TMR_ConvertEGADSModel"(ego, int) - cdef TMRModel* TMR_LoadModelFromEGADSFile"TMR_EgadsInterface::TMR_LoadModelFromEGADSFile"(const char*, int) + cdef TMRModel* TMR_LoadModelFromEGADSFile"TMR_EgadsInterface::TMR_LoadModelFromEGADSFile"(const char*, const char*, int) cdef extern from "TMR_RefinementTools.h": void TMR_CreateTACSMg(int, TACSAssembler**, @@ -467,7 +469,7 @@ cdef extern from "TMR_RefinementTools.h": double*) double TMR_AdjointErrorEst(TMRQuadForest*, TACSAssembler*, TMRQuadForest*, TACSAssembler*, - TACSBVec*, TACSBVec*, double*, double*) + TACSBVec*, TACSBVec*, double*, double*, double*) void TMR_CreateTACSMg(int, TACSAssembler**, TMROctForest**, TACSMg**, double, int, int, int) diff --git a/tmr/TMR.pyx b/tmr/TMR.pyx index f579f9b..9a0fae6 100644 --- a/tmr/TMR.pyx +++ b/tmr/TMR.pyx @@ -3798,7 +3798,38 @@ cdef _init_OctForest(TMROctForest* ptr): forest.ptr.incref() return forest -def LoadModel(fname, int print_lev=0): +def sewModel(file, units="M", int print_level=0, sew_options={}): + """ + Load in a STEP/IGES file, apply a sewing operation with OpenCASCADE, + and write out the sewn model to a new file. + + NOTE: Must be run in serial + + Args: + file (str): Name of the geometry file + units (str): Units to use when reading in geometry file + print_lev (int): Print level for operation + sew_options (dict): options for sewing operation on model + """ + cdef char *filename = tmr_convert_str_to_chars(file) + cdef const char *units_chars = tmr_convert_str_to_chars(units) + if file.lower().endswith(('step', 'stp')): + TMR_SewModelSTEP(filename, + units_chars, + print_level, + sew_options.get("sew_tol",1e-6), + sew_options.get("nonmanifold_mode",False)) + elif file.lower().endswith(('iges', 'igs')): + TMR_SewModelIGES(filename, + units_chars, + print_level, + sew_options.get("sew_tol",1e-6), + sew_options.get("nonmanifold_mode",False)) + else: + print("model file extension not supported") + return + +def LoadModel(fname, units="M", int print_lev=0): """ LoadModel(fname, print_lev=0) @@ -3807,22 +3838,25 @@ def LoadModel(fname, int print_lev=0): Args: fname (str): Name of the geometry file + units (str): Units to use when reading in geometry file (IGES/STEP) print_lev (int): Print level for operation Returns: Model: An instance of a Model class """ cdef string sfilename = tmr_convert_str_to_chars(fname) + cdef string sunits = tmr_convert_str_to_chars(units) + cdef const char *units_c = sunits.c_str() cdef const char *filename = NULL cdef TMRModel *model = NULL if fname is not None: filename = sfilename.c_str() if fname.lower().endswith(('step', 'stp')): - model = TMR_LoadModelFromSTEPFile(filename, print_lev) + model = TMR_LoadModelFromSTEPFile(filename, units_c, print_lev) elif fname.lower().endswith(('igs', 'iges')): - model = TMR_LoadModelFromIGESFile(filename, print_lev) + model = TMR_LoadModelFromIGESFile(filename, units_c, print_lev) elif fname.lower().endswith(('egads')): - model = TMR_LoadModelFromEGADSFile(filename, print_lev) + model = TMR_LoadModelFromEGADSFile(filename, units_c, print_lev) if model is NULL: errmsg = 'Error loading model. File %s does not exist?'%(fname) raise RuntimeError(errmsg) @@ -4426,58 +4460,79 @@ def adjointError(forest, Assembler coarse, ----------- forest: :class:`~TMR.OctForest` or :class:`~TMR.QuadForest` Forest for current mesh level - coarse_assembler: :class:`~TACS.Assembler` - Finite assembler class for associated with forest + coarse: :class:`~TACS.Assembler` + Finite assembler class associated with forest forest_refined: :class:`~TMR.OctForest` or :class:`~TMR.QuadForest` Higher-order forest for refined mesh level - refined_assembler: :class:`~TACS.Assembler` - Higher-order finite assembler class for associated with forest_refined - solution_refined: :class:`~TACS.Vec` + refined: :class:`~TACS.Assembler` + Higher-order finite assembler class associated with forest_refined + solution: :class:`~TACS.Vec` The higher-order solution (or approximation) - adjoint_refined: :class:`~TACS.Vec` + adjoint: :class:`~TACS.Vec` The difference between the refined and coarse adjoint solutions computed in some manner Returns ------- - ans: double - Total strain energy error - - err: array of double - Elemental strain energy error + err_est: double + Total error estimate for the output functional adj_corr: TacsScalar - Adjoint-based functional correction + Adjoint-based output functional correction + + err: array of double + Element-wise error indicators """ - cdef TacsScalar ans = 0.0 - cdef TacsScalar adj_corr = 0.0 cdef TMROctForest *oct_forest = NULL cdef TMROctForest *oct_forest_refined = NULL cdef TMRQuadForest *quad_forest = NULL cdef TMRQuadForest *quad_forest_refined = NULL - cdef np.ndarray err = None cdef TacsScalar err_est = 0.0 - err = np.zeros(coarse.ptr.getNumElements(), dtype=np.double) + cdef TacsScalar adj_corr = 0.0 + cdef np.ndarray elem_error = np.zeros(refined.ptr.getNumElements(), dtype=np.double) + cdef np.ndarray node_error = np.zeros(refined.ptr.getNumOwnedNodes(), dtype=np.double) if isinstance(forest, OctForest): oct_forest = (forest).ptr oct_forest_refined = (forest_refined).ptr err_est = TMR_AdjointErrorEst(oct_forest, coarse.ptr, oct_forest_refined, refined.ptr, solution.getBVecPtr(), adjoint.getBVecPtr(), - err.data, &adj_corr) + elem_error.data, &adj_corr) elif isinstance(forest, QuadForest): quad_forest = (forest).ptr quad_forest_refined = (forest_refined).ptr err_est = TMR_AdjointErrorEst(quad_forest, coarse.ptr, quad_forest_refined, refined.ptr, solution.getBVecPtr(), adjoint.getBVecPtr(), - err.data, &adj_corr) - return err_est, adj_corr, err + node_error.data, + elem_error.data, + &adj_corr) + return err_est, adj_corr, elem_error, node_error def computeInterpSolution(forest, Assembler coarse, forest_refined, Assembler refined, - Vec uvec=None, Vec uvec_refined=None, - compute_diff=False): + Vec uvec=None, Vec uvec_refined=None): + """ + Given coarse and fine sets of forests and assemblers, interpolates a field + from the coarse space in the fine space using its higher-order basis. + + Parameters + ----------- + forest: :class:`~TMR.OctForest` or :class:`~TMR.QuadForest` + Forest for current mesh level + coarse: :class:`~TACS.Assembler` + Finite assembler class associated with forest + forest_refined: :class:`~TMR.OctForest` or :class:`~TMR.QuadForest` + Higher-order forest for refined mesh level + refined: :class:`~TACS.Assembler` + Higher-order finite assembler class associated with forest_refined + uvec: :class:`~TACS.Vec` or None + The coarse-space field to interpolate. If None, the current set of states + in the coarse assembler are used. + uvec_refined: :class:`~TACS.Vec` or None + The fine-space field that is returned. If None, a vector is created by the + fine assembler. + """ cdef TMROctForest *oct_forest = NULL cdef TMROctForest *oct_forest_refined = NULL cdef TMRQuadForest *quad_forest = NULL @@ -4506,6 +4561,34 @@ def computeReconSolution(forest, Assembler coarse, forest_refined, Assembler refined, Vec uvec=None, Vec uvec_refined=None, compute_diff=False): + """ + Given coarse and fine sets of forests and assemblers, reconstruct a field + in the fine-space given a field in the coarse-space. Enriches the field in + the fine-space with higher-order information obtained from local least- + square solutions on each element. + + Parameters + ----------- + forest: :class:`~TMR.OctForest` or :class:`~TMR.QuadForest` + Forest for current mesh level + coarse: :class:`~TACS.Assembler` + Finite assembler class associated with forest + forest_refined: :class:`~TMR.OctForest` or :class:`~TMR.QuadForest` + Higher-order forest for refined mesh level + refined: :class:`~TACS.Assembler` + Higher-order finite assembler class associated with forest_refined + uvec: :class:`~TACS.Vec` or None + The coarse-space field to reconstruct. If None, the current set of states + in the coarse assembler are used. + uvec_refined: :class:`~TACS.Vec` or None + The fine-space field that is returned. If None, a vector is created by the + fine assembler. + compute_diff: Bool + If True, uvec_refined is the difference between the high-order + reconstructed field in the fine-space and the coarse-space field that is + interpolated on the fine-space. If False, uvec_refined is just the high- + order reconstructed field in the fine-space. + """ cdef TMROctForest *oct_forest = NULL cdef TMROctForest *oct_forest_refined = NULL cdef TMRQuadForest *quad_forest = NULL diff --git a/tmr/pytacsadapt.py b/tmr/pytacsadapt.py new file mode 100644 index 0000000..f2dd5a7 --- /dev/null +++ b/tmr/pytacsadapt.py @@ -0,0 +1,991 @@ +#!/usr/bin/python +""" +pytacsadapt - A python-based interface for doing output-based error estimation +and mesh adaptation with TACS and TMR. + +This interface is designed to provide an easier-to-use interface to the C++ +layer of TACS and TMR. +""" + +# ============================================================================= +# Imports +# ============================================================================= +import copy +from mpi4py import MPI +import numpy as np +import h5py +from tmr import TMR +from tmr.pygeometryloader import * +from tacs.utilities import BaseUI + + +# ============================================================================= +# pyApproximationSpace +# ============================================================================= +class pyApproximationSpace(BaseUI): + """ + This class defines an approximation space for the finite-element method that + is being applied to the problem of interest. It consists of both TACS- and + TMR-related objects which include: the model geometry, Quad/OctForest, + finite-element assembler, finite-element problem, and copies of necessary + information used in an adaptive solution. + """ + + def __init__(self, comm, _elemCallBack, _probCallBack): + """ + Constructor + + Parameters + ---------- + comm : mpi4py.MPI.Intracomm + The comm object on which to create this approximation space + + _elemCallBack : function handle + Handle to user-defined function for creating elements when making + the TACS Assembler. See pyGeometryLoader.createTACSAssembler() + for more details on the function prototype. + + _probCallBack : function handle + Handle to user-defined function for creating the desired TACSProblem + to be used in analysis. This function should create the TACSProblem + and set an output to be used for error estimation and adaptation. + It has the following prototype: + _probCallBack(name, comm, geomLoader, assembler, **kwargs) -> TACSProblem + """ + # Set MPI communicator + BaseUI.__init__(self, comm=comm) + + # initialize the remaining class attributes + self.elemCallBack = _elemCallBack + self.probCallBack = _probCallBack + self.geomLoader = None + self.forest = None + self.assembler = None + self.problem = None + self.state = None + self.adjoint = None + self.refine_iter = 0 + self.prob_name = "" + self.output_name = "" + return + + def createAssembler(self): + """ + Create the TACS Assembler object and the state and adjoint vectors to be + used during the adaptive analysis + """ + self.assembler = self.geomLoader.createTACSAssembler(self.elemCallBack) + self.state = self.assembler.createVec() + self.adjoint = self.assembler.createVec() + return + + def createProblem(self, prob_name, **kwargs): + """ + Creates the TACS problem object to be used for analysis. To apply fixed + loads to the problem, pyApproximationSpace.setAuxElements() can be called + after pyApproximationSpace.createProblem() as many times as necessary. + + Parameters + ---------- + prob_name : str + Name to assign to the problem + """ + # create the problem + self.prob_name = prob_name + self.problem = self.probCallBack( + name=prob_name, + comm=self.comm, + geomLoader=self.geomLoader, + assembler=self.assembler, + **kwargs, + ) + + # check the output of interest is present in the problem + output_names = self.problem.getFunctionKeys() + assert ( + self.output_name in output_names + ), f"Selected output of interest ({self.output_name}) is not in the list of outputs defined in the problem: {output_names}" + return + + def setAuxElements(self, geom_labels, aux_type, **kwargs): + """ + Creates and assigns the auxiliary elements to the assembler and problem. + + Applies identical aux elements to every element that belongs to the + specified geometric entities. + + NOTE: Does not reset the aux elements before adding new ones. Previously + added aux elements will persist unless manually reset. + + Parameters + ---------- + geom_labels : list[str] + Labels of geometric entities in the model with which the aux elements + will be added to + + aux_type : str in ('traction', 'pressure', 'inertial', 'centrifugal') + The type of aux element to add + + kwargs : keyword args + arg=value pairs that will directly be passed to the aux element constructor + """ + # check the aux_type is set properly + valid_aux_types = ("traction", "pressure", "inertial", "centrifugal") + aux_type = aux_type.lower() + assert aux_type in valid_aux_types, f"aux_type must be one of {valid_aux_types}" + + for name in geom_labels: + # get the elem ids associated with this geometric entity name + entities = None + if isinstance(self.forest, TMR.QuadForest): + entities = self.forest.getQuadsWithName(name) + elif isinstance(self.forest, TMR.OctForest): + entities = self.forest.getOctsWithName(name) + elem_ids = [] + for entity in entities: + elem_ids.append(entity.tag) # tag for elem id + + # create and add the aux elements + for eid in elem_ids: + # get the element object + elem, _, _, _, _ = self.assembler.getElementData(eid) + + # create the specified aux element + aux = None + if aux_type == "traction": + aux = elem.createElementTraction( + kwargs.get("faceIndex"), kwargs.get("trac") + ) + elif aux_type == "pressure": + aux = elem.createElementPressure( + kwargs.get("faceIndex"), kwargs.get("p") + ) + elif aux_type == "inertial": + aux = elem.createElementInertialForce(kwargs.get("inertiaVec")) + elif aux_type == "centrifugal": + aux = elem.createElementCentrifugalForce( + kwargs.get("omegaVec"), + kwargs.get("rotCenter"), + kwargs.get("firstOrder", False), + ) + + # add to the aux elems in the problem + self.problem.auxElems.addElement(eid, aux) + + # set the aux elems into the assembler too + self.assembler.setAuxElements(self.problem.auxElems) + return + + def applyRefinement( + self, refine_indicator=None, num_min_levels=0, num_max_levels=TMR.MAX_LEVEL + ): + """ + Refine the mesh based on an element refinement indicator. Hanging-node + h-refinement is carried out, followed by a 2:1 balancing such that there + is only one level of difference between adjacent elements. + + Parameters + ---------- + refine_indicator : None or np.ndarray (1D, int) [num_elements] + Refinement indicator array, integer number of levels each element will + be refined or coarsened. If None, uniform refinement of a single level + is applied to the mesh. + + num_min_levels : int + Minimum number of levels the forest is allowed to refine the elements + + num_max_levels : int + Maximum number of levels the forest is allowed to refine the elements + """ + self.forest.refine( + refine_indicator, min_lev=num_min_levels, max_lev=num_max_levels + ) + self.forest.balance(1) + self.forest.repartition() + self.forest.createNodes() + self.refine_iter += 1 + return + + def writeField(self, field, field_name, reset=True): + """ + Sets the problem variables with the supplied field and writes out the + solution data file. Uses the field name to name the file. + Optionally, will reset the problem variables with the current state + field if specified. + + Parameters + ---------- + field : TACS Vec or np.ndarray (1D, float) + Vector to set as the problem variables when writing out the file + + field_name : str + Name of the field to add to the end of the file name + + reset : bool + Flag for resetting the problem variables with the current state + """ + # set the field in the problem variables + self.problem.setVariables(field) + self.problem.writeSolution(baseName=f"{self.prob_name}_{field_name}") + + if reset: + # re-write the states back into the problem + self.problem.setVariables(self.state) + return + + +# ============================================================================= +# pyTACSAdapt +# ============================================================================= +class pyTACSAdapt(BaseUI): + """ + This class contains the coarse- and fine-space finite element approximations + that can be used for output-based error estimation and adaptation. This is + the high-level class that should be used for adaptive analysis. + """ + + def __init__( + self, + comm, + _initCallBack, + _elemCallBack, + _probCallBack, + _adapt_output, + adapt_params={}, + ): + """ + Constructor + + Parameters + ---------- + comm : mpi4py.MPI.Intracomm + The comm object on which to create the coarse- and fine-space models + used in the adaptive analysis. + + _initCallBack : function handle + Handle to user-defined function for initializing the adaptive model. + This function should set up the coarse approximation space info by + reading the geometry, making the initial mesh, and setting the + initial Quad/OctForest. + It has the following prototype: + _initializeCallBack(coarse_space, **kwargs) + + _elemCallBack : function handle + Handle to user-defined function for creating elements when making + the TACS Assembler. See pyGeometryLoader.createTACSAssembler() + for more details on the function prototype. + + _probCallBack : function handle + Handle to user-defined function for creating the desired TACSProblem + to be used in analysis. See pyApproximationSpace.__init__() for more + details on the function prototype. + + _adapt_output : str + Name of the output to be used for the adaptive analysis + + adapt_params : dict + arg=value pairs that can be used to specify adaptation parameters + """ + # Set MPI communicator + BaseUI.__init__(self, comm=comm) + + # initialize the class attributes + self.initCallBack = _initCallBack + self.coarse = pyApproximationSpace(comm, _elemCallBack, _probCallBack) + self.fine = pyApproximationSpace(comm, _elemCallBack, _probCallBack) + self.adapt_output = _adapt_output + + # set the adaptation parameters + valid_strategies = ("decreasing_threshold", "fixed_growth") + self.adapt_strategy = adapt_params.get("adapt_strategy", "").lower() + if self.adapt_strategy: + assert ( + self.adapt_strategy in valid_strategies + ), f"adaptation strategy must be one of {valid_strategies}" + # get the general adaptation params + self.num_min_ref_levels = adapt_params.get("num_min_ref_levels", 0) + self.num_max_ref_levels = adapt_params.get( + "num_max_ref_levels", TMR.MAX_LEVEL + ) + self.error_tol = adapt_params.get("error_tol") + assert ( + self.error_tol is not None + ), f"error tolerance must be specified for adaptation" + # get strategy-specific params + if self.adapt_strategy == "decreasing_threshold": + self.num_decrease_iters = adapt_params.get("num_decrease_iters") + assert ( + self.num_decrease_iters is not None + ), f"number of decrease iterations must be specified for adaptation strategy `{self.adapt_strategy}`" + if self.adapt_strategy == "fixed_growth": + self.growth_refine_factor = np.clip( + adapt_params.get("growth_refine_factor", 0.0), 0.0, 1.0 + ) + self.growth_coarsen_factor = np.clip( + adapt_params.get("growth_coarsen_factor", 0.0), 0.0, 1.0 + ) + + # initialize the storage for saving model info + self.mesh_history = {} # mesh size (ndof) + self.output_history = {} # outputs/corrected outputs + self.error_history = {} # error estimates + self.adaptation_history = { + "threshold": {}, # refine/coarsen thresholds + "element_errors": {}, + } # element-wise errors + + # set the output used for adaptation + self.setOutputOfInterest(self.adapt_output) + return + + def initializeCoarseSpace(self, **kwargs): + """ + Initializes the coarse approximation space using the user-defined + initCallBack() function. + + Parameters + ---------- + kwargs : keyword args + arg=value pairs that will directly be passed to self.initCallBack() + """ + self.initCallBack(self.coarse, **kwargs) + return + + def createFineSpace(self, **kwargs): + """ + Creates the "fine" finite-element approximation space by copying the + "coarse" approximation space and incrementing the approximation order + by one. + + NOTE: Interpolation types (node-spacing) may need to be changed between + the 'coarse' and 'fine' space for some high-order elements. + + Parameters + ---------- + kwargs : keyword args + arg=value pairs that will directly be passed to self.setupModel() + """ + # copy the geometry info if necessary + # geometry should be constant during the adaptive analysis + if not self.fine.geomLoader: + self.fine.geomLoader = copy.copy(self.coarse.geomLoader) + + # duplicate the forest, incrementing the order by 1 + # ensure interpolation type for high-order elements is properly set + self.fine.forest = self.coarse.forest.duplicate() + refined_order = self.coarse.forest.getMeshOrder() + 1 + if isinstance(self.fine.forest, TMR.QuadForest): + if refined_order < 4: + self.fine.forest.setMeshOrder(refined_order, TMR.UNIFORM_POINTS) + else: + self.fine.forest.setMeshOrder(refined_order, TMR.GAUSS_LOBATTO_POINTS) + elif isinstance(self.fine.forest, TMR.OctForest): + self.fine.forest.setMeshOrder(refined_order, TMR.UNIFORM_POINTS) + + # make sure the forest is balanced and shared with the geomLoader object + self.fine.forest.balance(1) + self.fine.geomLoader.forest = self.fine.forest + + # copy the refinement iteration counter + self.fine.refine_iter = self.coarse.refine_iter + + # setup the fine-space model + self.setupModel(model_type="fine", **kwargs) + return + + def setOutputOfInterest(self, output_name): + """ + Sets the output name used for adaptation in both the coarse-space and + fine-space objects. + + Parameters + ---------- + output_name : str + Name of output to be used for output-based adaptation + """ + setattr(self.coarse, "output_name", output_name) + setattr(self.fine, "output_name", output_name) + return + + def setupModel(self, model_type, **kwargs): + """ + Sets up the model for the selected approximation space. After this + function is called, the underlying finite-element assembler and problem + in the model will be ready for use. + + Parameters + ---------- + model_type : str in ('coarse', 'fine') + Selector for which model to set up + + kwargs : keyword args + arg=value pairs that will directly be passed to pyApproximationSpace.createProblem() + """ + # select the appropriate model + model = self._selectModel(model_type) + + # create the assembler + model.createAssembler() + + # create and set up the problem used for analysis + if "prob_name" not in kwargs: + kwargs["prob_name"] = f"{model_type}_{model.refine_iter}" + else: + kwargs["prob_name"] += f"_{model_type}_{model.refine_iter}" + model.createProblem(**kwargs) + + # record the number of degrees of freedom for this model + nnodes = model.assembler.getNumOwnedNodes() + self.mesh_history[ + model.prob_name + ] = model.assembler.getVarsPerNode() * model.comm.allreduce(nnodes, op=MPI.SUM) + return + + def solvePrimal(self, model_type, writeSolution=False, **kwargs): + """ + Solves the primal problem for the given model, copies the state to the + model, and evaluates the output using the solved state. Optionally write + out the solution data file. + + Parameters + ---------- + model_type : str in ('coarse', 'fine') + Selector for which model to solve the primal problem on + + writeSolution : bool + Boolean flag for writing out the state field to a data file + + kwargs : keyword args + arg=value pairs that will directly be passed to TACSProblem.solve() + """ + # select the appropriate model + model = self._selectModel(model_type) + + # solve the problem + model.problem.solve(**kwargs) + + # copy the states + model.problem.getVariables(model.state) + + # evaluate the output + model.problem.evalFunctions(self.output_history, evalFuncs=[model.output_name]) + + # write out the state field + if writeSolution: + model.writeField(model.state, "state") + return + + def solveAdjoint(self, model_type, writeSolution=False): + """ + Solves the adjoint problem for the given model and copies the adjoint to + the model. Optionally write out the solution data file. + + Parameters + ---------- + model_type : str in ('coarse', 'fine') + Selector for which model to solve the adjoint problem on + + writeSolution : bool + Boolean flag for writing out the adjoint field to a data file + """ + # select the appropriate model + model = self._selectModel(model_type) + + # get the output sensitivity w.r.t. the states + dJdu = model.assembler.createVec().getArray() + model.problem.addSVSens([model.output_name], [dJdu]) + + # solve for the adjoint variables + model.adjoint.zeroEntries() + model.problem.solveAdjoint(dJdu, model.adjoint) + + # write out the adjoint field + if writeSolution: + model.writeField(model.adjoint, "adjoint") + return + + def evaluateResidual(self, model_type, vec=None, writeSolution=False): + """ + Evaluates the residual of the specified model for a given perturbation. + Optionally write out the residual to a data file. + + Parameters + ---------- + model_type : str in ('coarse', 'fine') + Selector for which model to evaluate the residual on + + vec : TACS Vec or np.ndarray (1D, float) + Vector to add to rhs when evaluating the residual + + writeSolution : bool + Boolean flag for writing out the residual field to a data file + + Returns + ------- + res : TACS Vec + Residual vector + """ + # select the appropriate model + model = self._selectModel(model_type) + + # create the residual vector + res = model.assembler.createVec() + res.zeroEntries() + + # evaluate the residual + model.problem.getResidual(res, Fext=vec) + + # write out the residual field + if writeSolution: + model.writeField(res, "residual") + + return res + + def interpolateField(self, field_type, writeSolution=False): + """ + Uses the information in the coarse-space model to interpolate the given + field in the higher-order, fine-space model. The interpolated field + is copied to the fine-space model. Optionally write out the solution + data file. + + Parameters + ---------- + field_type : str in ('state', 'adjoint') + Selector for which field to interpolate in the fine-space model + + writeSolution : bool + Boolean flag for writing out the selected field to a data file + """ + field_type = self._selectField(field_type) + + # interpolate the selected field + TMR.computeInterpSolution( + self.coarse.forest, + self.coarse.assembler, + self.fine.forest, + self.fine.assembler, + getattr(self.coarse, field_type), + getattr(self.fine, field_type), + ) + + # evaluate the fine-space output if the state is interpolated + if field_type == "state": + self.fine.problem.setVariables(getattr(self.fine, field_type)) + self.fine.problem.evalFunctions( + self.output_history, evalFuncs=[self.fine.output_name] + ) + + # write out the selected field + if writeSolution: + self.fine.writeField(getattr(self.fine, field_type), f"{field_type}_interp") + return + + def reconstructField(self, field_type, compute_diff=False, writeSolution=False): + """ + Uses the information in the coarse-space model to reconstruct the given + field on the higher-order fine-space model. This enriches the field + using the higher-order basis. The reconstructed field is copied to the + fine model. Optionally store the difference between the reconstructed + field and the interpolated field in the fine-space. Optionally write out + the solution data file. + + Parameters + ---------- + field_type : str in ('state', 'adjoint') + Selector for which field to reconstruct using a higher-order basis + in the fine-space model + + compute_diff : bool + When True, computes and stores the difference between the reconstructed + field and the interpolated field in the fine-space. Otherwise, just + compute and store the reconstructed field in the fine-space. + + writeSolution : bool + Boolean flag for writing out the selected field to a data file + """ + field_type = self._selectField(field_type) + + # reconstruct the select field + TMR.computeReconSolution( + self.coarse.forest, + self.coarse.assembler, + self.fine.forest, + self.fine.assembler, + getattr(self.coarse, field_type), + getattr(self.fine, field_type), + compute_diff=compute_diff, + ) + + # write out the selected field + if writeSolution: + self.fine.writeField(getattr(self.fine, field_type), f"{field_type}_recon") + return + + def estimateOutputError(self, writeSolution=False): + """ + Do the adjoint-based error estimation for the output of interest. Uses + the current state and adjoint fields in the fine-space model for the + estimation. Optionally output the nodal error field to a data file. + + NOTE: Nodal error field is a scalar and is written to the "u" variable. + + Parameters + ---------- + writeSolution : bool + Boolean flag for writing out node error field to a data file + + Returns + ------- + error_estimate : float + The absolute error estimate in the output of interest + + output_correction : float + The signed correction to the output of interest + + element_errors : np.ndarray (1D, float) [num_elements] + The error in the output associated with each element in the mesh + """ + # do the error estimation + ( + error_estimate, + output_correction, + element_errors, + node_errors, + ) = TMR.adjointError( + self.coarse.forest, + self.coarse.assembler, + self.fine.forest, + self.fine.assembler, + self.fine.state, + self.fine.adjoint, + ) + + # update histories + self.output_history[ + f"{self.fine.prob_name}_{self.fine.output_name}" + ] += output_correction + self.error_history[f"adapt_iter_{self.fine.refine_iter}"] = error_estimate + + # write out the nodal error field + if writeSolution: + vpn = self.fine.assembler.getVarsPerNode() + node_error_vars = np.zeros(vpn * len(node_errors)) + node_error_vars[::vpn] = node_errors + self.fine.writeField(field=node_error_vars, field_name="error") + + return error_estimate, output_correction, element_errors, node_errors + + def collectErrors(self, element_errors=None, node_errors=None): + """ + Collects errors that are distributed across all the processors + + Parameters + ---------- + element_errors : np.ndarray (1D, float) + Distributed array of element errors + + node_errors : np.ndarray (1D, float) + Distributed array of node errors + + Returns + ------- + total_errors : np.ndarray (1D, float) or list[np.ndarray (1D, float)] + Collected errors from all processors. If both element and nodal + errors are collected, element errors are the first element in the + returned list. + """ + total_errors = [] + if element_errors is not None: + # collect the error localized to the distributed elements + elem_counts = self.comm.allgather(len(element_errors)) + element_errors_tot = np.empty(sum(elem_counts)) + self.comm.Allgatherv(element_errors, [element_errors_tot, elem_counts]) + total_errors.append(element_errors_tot) + if node_errors is not None: + # collect the error distributed to the nodes + node_counts = self.comm.allgather(len(node_errors)) + node_errors_tot = np.empty(sum(node_counts)) + self.comm.Allgatherv(node_errors, [node_errors_tot, node_counts]) + total_errors.append(node_errors_tot) + if len(total_errors) == 1: + total_errors = total_errors[0] + return total_errors + + def refineModel(self, element_errors=None): + """ + Applies mesh refinement to the model using the selected adaptation + strategy and a distribution of element-wise errors. If adaptation is not + being done, uniform refinement is applied + + Parameters + ---------- + element_errors : np.ndarray (1D, float) [num_elements] + The error in the output associated with each element in the mesh + """ + if self.adapt_strategy == "decreasing_threshold": + self.adaptModelDecreasingThreshold(element_errors) + elif self.adapt_strategy == "fixed_growth": + self.adaptModelFixedGrowth(element_errors) + else: + # apply uniform refinement to the coarse model + self.coarse.applyRefinement() + return + + def adaptModelDecreasingThreshold(self, element_errors): + """ + Refines elements in the coarse-space model based on a decreasing threshold + adaptation strategy. + + Nemec, Marian, Michael Aftosmis, and Mathias Wintzer. "Adjoint-based + adaptive mesh refinement for complex geometries." 46th AIAA Aerospace + Sciences Meeting and Exhibit. 2008. + + Parameters + ---------- + element_errors : np.ndarray (1D, float) [num_elements] + The error in the output associated with each element in the mesh + """ + # get the total number of elements + nelems = len(element_errors) + elem_counts = self.comm.allgather(nelems) + nelems_tot = sum(elem_counts) + + # choose elements based on an equidistributed target error + target_error = self.error_tol / nelems_tot + error_ratio = element_errors / target_error + refine_threshold = max( + 1.0, 2.0 ** (self.num_decrease_iters - self.coarse.refine_iter) + ) + + # record the refinement threshold:error_ratio pair for this iteration + error_ratio_tot = np.zeros(nelems_tot) + self.comm.Allgatherv(error_ratio, [error_ratio_tot, elem_counts]) + self.adaptation_history["threshold"][ + f"refine_{self.coarse.refine_iter}" + ] = refine_threshold + self.adaptation_history["element_errors"][ + f"adapt_iter_{self.coarse.refine_iter}" + ] = error_ratio_tot + + # get the refinement indicator array + adapt_indicator = np.array(error_ratio >= refine_threshold, dtype=int) + nref = np.count_nonzero(adapt_indicator) + nref = self.comm.allreduce(nref, op=MPI.SUM) + + # adapt the coarse-space model + self.coarse.applyRefinement( + adapt_indicator.astype(np.intc), + num_min_levels=self.num_min_ref_levels, + num_max_levels=self.num_max_ref_levels, + ) + return + + def adaptModelFixedGrowth(self, element_errors): + """ + Refines elements in the coarse-space model based on a fixed growth + adaptation strategy. + + Parameters + ---------- + element_errors : np.ndarray (1D, float) [num_elements] + The error in the output associated with each element in the mesh + """ + # get the elem counts + nelems = len(element_errors) + elem_counts = self.comm.allgather(nelems) + nelems_tot = sum(elem_counts) + nrefine = int(self.growth_refine_factor * nelems_tot) + ncoarse = int(self.growth_coarsen_factor * nelems_tot) + + # gather all the element errors on each proc + element_errors_tot = np.empty(nelems_tot) + self.comm.Allgatherv(element_errors, [element_errors_tot, elem_counts]) + + # compute the error ratio for all elements + target_error = self.error_tol / nelems_tot + error_ratio_tot = element_errors_tot / target_error + + # sort all the element error ratios in descending order + sorted_inds = np.flip(np.argsort(error_ratio_tot)) + + # create the adaptation indicator array + adapt_indicator_tot = np.zeros(nelems_tot, dtype=int) + refine_inds = None + coarse_inds = None + if nrefine > 0: + refine_inds = sorted_inds[:nrefine] + adapt_indicator_tot[refine_inds] = np.where( + error_ratio_tot[refine_inds] >= 1.0, 1, adapt_indicator_tot[refine_inds] + ) + if ncoarse > 0: + coarse_inds = sorted_inds[-ncoarse:] + adapt_indicator_tot[coarse_inds] = np.where( + error_ratio_tot[coarse_inds] < 1.0, -1, adapt_indicator_tot[coarse_inds] + ) + + # update the adaptation history + if refine_inds is not None: + ind = 0 + while error_ratio_tot[refine_inds[ind]] >= 1.0: + self.adaptation_history["threshold"][ + f"refine_{self.coarse.refine_iter}" + ] = error_ratio_tot[refine_inds[ind]] + ind += 1 + if ind >= nrefine: + break + if coarse_inds is not None: + ind = ncoarse - 1 + while error_ratio_tot[coarse_inds[ind]] < 1.0: + self.adaptation_history["threshold"][ + f"coarsen_{self.coarse.refine_iter}" + ] = error_ratio_tot[coarse_inds[ind]] + ind -= 1 + if ind <= 0: + break + self.adaptation_history["element_errors"][ + f"adapt_iter_{self.coarse.refine_iter}" + ] = error_ratio_tot + + # scatter the adaptation indicator to each proc + adapt_indicator = np.empty(nelems, dtype=int) + self.comm.Scatterv([adapt_indicator_tot, elem_counts], adapt_indicator, root=0) + + # adapt the coarse-space model + self.coarse.applyRefinement( + adapt_indicator.astype(np.intc), + num_min_levels=self.num_min_ref_levels, + num_max_levels=self.num_max_ref_levels, + ) + return + + def writeModelHistory(self, filename=""): + """ + Writes out the model history information to an .hdf5 file + + Parameters + ---------- + filename : str + name of output file, should include the proper .hdf5 file extension + """ + if self.comm.rank == 0: + if not filename: + filename = "model_history.hdf5" + with h5py.File(filename, "w") as h5: + # store the mesh histories + h5["mesh_history/coarse"] = np.array( + [ + self.mesh_history[key] + for key in self.mesh_history.keys() + if "coarse" in key + ] + ) + h5["mesh_history/fine"] = np.array( + [ + self.mesh_history[key] + for key in self.mesh_history.keys() + if "fine" in key + ] + ) + + # store the output histories + h5["output_history/coarse"] = np.array( + [ + self.output_history[key] + for key in self.output_history.keys() + if "coarse" in key + ] + ) + h5["output_history/fine"] = np.array( + [ + self.output_history[key] + for key in self.output_history.keys() + if "fine" in key + ] + ) + h5["output_history"].attrs["output_name"] = self.adapt_output.replace( + "_", " " + ).upper() + + # store the error estimate history + h5["error_estimate_history"] = np.array( + list(self.error_history.values()) + ) + + # store the adaptation refinement histories + h5["adaptation_history/threshold/refine"] = np.array( + [ + self.adaptation_history["threshold"][key] + for key in self.adaptation_history["threshold"].keys() + if "refine" in key + ] + ) + h5["adaptation_history/threshold/coarsen"] = np.array( + [ + self.adaptation_history["threshold"][key] + for key in self.adaptation_history["threshold"].keys() + if "coarsen" in key + ] + ) + for key, val in self.adaptation_history["element_errors"].items(): + h5[f"adaptation_history/element_errors/{key}"] = val + if self.adapt_strategy: + h5["adaptation_history"].attrs[ + "strategy" + ] = self.adapt_strategy.replace("_", " ").lower() + h5["adaptation_history"].attrs["error_tolerance"] = self.error_tol + h5["adaptation_history"].attrs[ + "max_refine_levels" + ] = self.num_max_ref_levels + h5["adaptation_history"].attrs[ + "min_refine_levels" + ] = self.num_min_ref_levels + if hasattr(self, "num_decrease_iters"): + h5["adaptation_history"].attrs[ + "threshold_decrease_iters" + ] = self.num_decrease_iters + if hasattr(self, "growth_refine_factor"): + h5["adaptation_history"].attrs[ + "growth_refine_factor" + ] = self.growth_refine_factor + if hasattr(self, "growth_coarsen_factor"): + h5["adaptation_history"].attrs[ + "growth_coarsen_factor" + ] = self.growth_coarsen_factor + return + + def _selectModel(self, model_type): + """ + Helper function to check and select the appropriate model option + """ + valid_model_types = ("coarse", "fine") + model_type = model_type.lower() + assert ( + model_type in valid_model_types + ), f"model_type must be one of {valid_model_types}" + return getattr(self, model_type) + + def _selectField(self, field_type): + """ + Helper function to check and select the appropriate field option + """ + valid_field_types = ("state", "adjoint") + field_type = field_type.lower() + assert ( + field_type in valid_field_types + ), f"field_type must be one of {valid_field_types}" + return field_type + + def printroot(self, msg): + """ + Helper function to print a message on the root proc + """ + if self.comm.rank == 0: + print(f"{msg}") + return + + def printproc(self, msg): + """ + Helper function to print a message on all procs + """ + print(f"[{self.comm.rank}] {msg}") + return