diff --git a/.gitignore b/.gitignore index 6fdd2ea..fd586d3 100644 --- a/.gitignore +++ b/.gitignore @@ -49,6 +49,7 @@ tmr/*.cpp *.stp *.step *.egads +*.hdf5 # vscode config files .vscode/* diff --git a/examples/pytacsadapt/plate/make_plate_geom.py b/examples/pytacsadapt/plate/make_plate_geom.py index 17d6ec9..1d7a7c7 100644 --- a/examples/pytacsadapt/plate/make_plate_geom.py +++ b/examples/pytacsadapt/plate/make_plate_geom.py @@ -45,6 +45,7 @@ def makePlateGeom(width=1.0, height=1.0, npanels=1, makeIGES=False): coords = np.zeros([nverts, 3]) coords[:, 0] = X.ravel() coords[:, 1] = Y.ravel() + coords *= 1000.0 # egads defaults to use mm as units, not m # print(coords) # make the quad node connectivity - used to make the edge and face connectivity @@ -159,7 +160,7 @@ def makePlateGeom(width=1.0, height=1.0, npanels=1, makeIGES=False): fname += ".iges" else: fname += ".step" - model.saveModel(fname, overwrite=True) + model.saveModel(fname, units="M", overwrite=True) return diff --git a/examples/pytacsadapt/plate/plate_adapt.py b/examples/pytacsadapt/plate/plate_adapt.py index 2892e1e..2747842 100644 --- a/examples/pytacsadapt/plate/plate_adapt.py +++ b/examples/pytacsadapt/plate/plate_adapt.py @@ -1,7 +1,7 @@ """ A simple example to test adaptive analysis with TACS/TMR. -This example uses a square flat plate, fully fixed on all edges, with a uniform +This example uses a square flat plate, fully fixed on two edges, with a uniform pressure load. Static analysis is performed, followed by either uniform or adaptive refinement. Adaptive refinement is based on a selected output of interest. Various options for this example can be selected through command line @@ -9,11 +9,10 @@ To run a simple adaptive analysis on the KS failure output, do the following: 1. run `python make_plate_geom.py` to generate the square plate geometry file -2. run `mpirun -np 4 python plate_adapt.py --niters 8 --adapt` -3. run `mpirun -np 4 python plate_adapt.py --niters 5` -4. compare the results of adaptive and uniform refinement between step 2 and 3 +2. run `mpirun -np 4 python plate_adapt.py --niters 4` +3. run `mpirun -np 4 python plate_adapt.py --niters 10 --strategy fixed_growth --ref_factor 0.1` +4. compare the results of uniform and adaptive refinement between step 2 and 3 """ -import sys import os import argparse from mpi4py import MPI @@ -114,28 +113,25 @@ def initCallBack(coarse_space, **kwargs): # load the geometry for the coarse model coarse_space.geomLoader = pyGeometryLoader(coarse_space.comm, geom_type) - coarse_space.geomLoader.readGeometryFile(geom_file) + coarse_space.geomLoader.readGeometryFile(geom_file=geom_file, print_lev=0) - # name the face and edges - face_names = {0: "panel"} - edge_names = {0: "south", 1: "east", 2: "north", 3: "west"} + # name the geometry + edge_names = {0: "bottom", 1: "right", 2: "top", 3: "left"} + face_names = {0: "plate"} coarse_space.geomLoader.nameGeometricEntities( face_names=face_names, edge_names=edge_names ) # specify boundary conditions - use defaults for nums and vals to fully fix - bc_names = ["south", "east", "north", "west"] - bc_nums = {} - bc_info = {"type": "edge", "bc_names": bc_names, "bc_nums": bc_nums} + bc_names = ["left", "right"] + bc_info = {"type": "edge", "bc_names": bc_names} coarse_space.geomLoader.setBoundaryConditions(bc_info) # set the meshing options - coarse_space.geomLoader.setMeshOptions(write_mesh_quality_histogram=0) - if not args.adapt: - coarse_space.geomLoader.setMeshOptions(mesh_type_default=TMR.STRUCTURED) + coarse_space.geomLoader.setMeshOptions(write_mesh_quality_histogram=False) # create the initial coarse mesh - coarse_space.geomLoader.createMesh(h=args.hinit) + coarse_space.geomLoader.createMesh(h=args.hinit, writeBDF=False) # create the topology coarse_space.geomLoader.createTopology(useMesh=True) @@ -168,26 +164,26 @@ def printroot(msg): # parse input arguments parser = argparse.ArgumentParser( - description="Mesh refinement test case for a fully-clamped flat plate" + description="Mesh refinement test case for a plate" ) parser.add_argument( "--hinit", - default=0.25, + default=0.125, type=float, help="target mesh size used for initial mesh generation", ) parser.add_argument( - "--order", default=3, type=int, help="order of elements in the mesh" + "--p", default=-3.0e4, type=float, help="uniform pressure load to apply" ) parser.add_argument( - "--p", default=-1.0e2, type=float, help="uniform pressure load to apply" + "--order", default=3, type=int, help="order of elements in the mesh" ) parser.add_argument( "--output", default="ks_fail", type=str, help="name of output of interest" ) parser.add_argument( "--ksweight", - default=1.0e6, + default=1.0e2, type=float, help="weight factor (rho) used for KS-aggregated outputs", ) @@ -198,28 +194,28 @@ def printroot(msg): help="number of refinement iterations to perform", ) parser.add_argument( - "--adapt", - default=False, - action="store_true", - help="boolean flag to apply adaptive/uniform mesh refinement", + "--strategy", + default="", + type=str, + help="type of adaptation strategy to use (leave empty for uniform refinement)", ) parser.add_argument( "--err_tol", - default=1.0e-6, + default=1.0e-3, type=float, help="target output error criterion used for adaptation termination", ) - parser.add_argument( - "--nmaxlevs", - default=30, - type=int, - help="max number of refinement levels allowed during adaptation", - ) parser.add_argument( "--ndecrease", default=5, type=int, - help="number of iterations needed to decrease the refinement threshold to 1", + help="number of iterations needed to decrease the refinement threshold to 0", + ) + parser.add_argument( + "--ref_factor", + default=0.0, + type=float, + help="fixed-growth refinement fraction applied each adaptive iteraton", ) args = parser.parse_args() for key in vars(args): @@ -229,21 +225,24 @@ def printroot(msg): if comm.rank == 0: for item in os.listdir(os.getcwd()): fname, fext = os.path.splitext(item) - if fext in [".f5", ".plt"]: + if fext in [".hdf5", ".f5", ".plt"]: os.remove(os.path.join(os.getcwd(), item)) # create the adaptive model + adapt_params = { + "adapt_strategy": args.strategy, + "error_tol": args.err_tol, + "num_decrease_iters": args.ndecrease, + "growth_refine_factor": args.ref_factor, + } model = pyTACSAdapt( comm, initCallBack, elemCallBack, probCallBack, args.output.lower(), - error_tol=args.err_tol, - num_max_ref_levels=args.nmaxlevs, - num_decrease_iters=args.ndecrease, + adapt_params, ) - model.setOutputOfInterest(args.output.lower()) # initialize the coarse-space model model.initializeCoarseSpace( @@ -257,7 +256,7 @@ def printroot(msg): # set up the coarse-space model model.setupModel(model_type="coarse", cmd_ln_args=args) model.coarse.setAuxElements( - geom_labels=["panel"], aux_type="pressure", faceIndex=0, p=args.p + geom_labels=["plate"], aux_type="pressure", faceIndex=0, p=args.p ) printroot("coarse model set up") @@ -265,7 +264,7 @@ def printroot(msg): model.solvePrimal(model_type="coarse", writeSolution=True) printroot("coarse primal problem solved") - if args.adapt: + if args.strategy: # solve the coarse adjoint problem model.solveAdjoint(model_type="coarse", writeSolution=False) printroot("coarse adjoint problem solved") @@ -273,7 +272,7 @@ def printroot(msg): # set up the fine-space model model.createFineSpace(cmd_ln_args=args) model.fine.setAuxElements( - geom_labels=["panel"], aux_type="pressure", faceIndex=0, p=args.p + geom_labels=["plate"], aux_type="pressure", faceIndex=0, p=args.p ) printroot("fine model set up") @@ -293,6 +292,7 @@ def printroot(msg): error_estimate, output_correction, element_errors, + node_errors, ) = model.estimateOutputError() printroot("output error estimated") @@ -303,16 +303,19 @@ def printroot(msg): elif k < (args.niters): # do the adaptation if not yet satisfied printroot("applying adaptive refinement") - model.adaptModelDecreasingThreshold(element_errors) + model.refineModel(element_errors) # otherwise do uniform mesh refinement else: if k < (args.niters): printroot("applying uniform refinement") - model.coarse.applyRefinement() + model.refineModel() # print out final information printroot(f"mesh_history: {model.mesh_history}") printroot(f"output_history: {model.output_history}") printroot(f"error_history: {model.error_history}") printroot(f"adaptation_history: {model.adaptation_history}") + + # write the model history file + model.writeModelHistory() diff --git a/src/TMRQuadForest.cpp b/src/TMRQuadForest.cpp index 483fe62..a943200 100644 --- a/src/TMRQuadForest.cpp +++ b/src/TMRQuadForest.cpp @@ -4017,8 +4017,8 @@ void TMRQuadForest::getNodeConn(const int **_conn, int *_num_elements, if (_num_owned_nodes) { *_num_owned_nodes = num_owned_nodes; } - if (_num_owned_nodes) { - *_num_owned_nodes = num_owned_nodes; + if (_num_local_nodes) { + *_num_local_nodes = num_local_nodes; } } diff --git a/src/TMR_RefinementTools.cpp b/src/TMR_RefinementTools.cpp index 7a530e8..1089dcb 100644 --- a/src/TMR_RefinementTools.cpp +++ b/src/TMR_RefinementTools.cpp @@ -3016,8 +3016,11 @@ double TMR_AdjointErrorEst(TMRQuadForest *forest, TACSAssembler *tacs, // skip dependent nodes - handled in beginSetValues() with dep weights continue; } - elem_error[elem] += fabs(TacsRealPart(err[i])) / weights[i]; + elem_error[elem] += TacsRealPart(err[i]) / weights[i]; } + // absolute value after the sum allows for error cancellation between each + // basis function within an element + elem_error[elem] = fabs(elem_error[elem]); } // Add up the absolute value of the nodal errors to get the output @@ -3025,8 +3028,8 @@ double TMR_AdjointErrorEst(TMRQuadForest *forest, TACSAssembler *tacs, TacsScalar *nerr; int size = nodal_error->getArray(&nerr); memcpy(node_error, nerr, size * sizeof(double)); - for (int i = 0; i < size; i++) { - total_error_est += fabs(TacsRealPart(nerr[i])); + for (int i = 0; i < nelems; i++) { + total_error_est += elem_error[i]; } // Sum up the contributions across all processors diff --git a/tmr/pytacsadapt.py b/tmr/pytacsadapt.py index f2dd5a7..48e67d0 100644 --- a/tmr/pytacsadapt.py +++ b/tmr/pytacsadapt.py @@ -327,8 +327,8 @@ def __init__( # initialize the storage for saving model info self.mesh_history = {} # mesh size (ndof) - self.output_history = {} # outputs/corrected outputs - self.error_history = {} # error estimates + self.output_history = {} # outputs + self.error_history = {} # error estimates and output corrections self.adaptation_history = { "threshold": {}, # refine/coarsen thresholds "element_errors": {}, @@ -661,10 +661,10 @@ def estimateOutputError(self, writeSolution=False): ) # update histories - self.output_history[ - f"{self.fine.prob_name}_{self.fine.output_name}" - ] += output_correction - self.error_history[f"adapt_iter_{self.fine.refine_iter}"] = error_estimate + self.error_history[f"adapt_iter_{self.fine.refine_iter}_error"] = error_estimate + self.error_history[ + f"adapt_iter_{self.fine.refine_iter}_correction" + ] = output_correction # write out the nodal error field if writeSolution: @@ -908,7 +908,20 @@ def writeModelHistory(self, filename=""): # store the error estimate history h5["error_estimate_history"] = np.array( - list(self.error_history.values()) + [ + self.error_history[key] + for key in self.error_history.keys() + if "error" in key + ] + ) + + # store the output correction history + h5["output_correction_history"] = np.array( + [ + self.error_history[key] + for key in self.error_history.keys() + if "correction" in key + ] ) # store the adaptation refinement histories