From 1325b5efd93ccebc1bacd62a910d825b9feba19f Mon Sep 17 00:00:00 2001 From: Peter Sharpe Date: Sun, 24 Mar 2024 21:55:29 -0400 Subject: [PATCH] sync gen2.5 architecture training, except weights and biases (which will be updated after training --- neuralfoil/gen2_5_architecture/__init__.py | 0 .../gen2_5_architecture/_basic_data_type.py | 435 ++++++++++++++++++ neuralfoil/gen2_5_architecture/main.py | 358 ++++++++++++++ .../scaled_input_distribution.npz | Bin 0 -> 7696 bytes 4 files changed, 793 insertions(+) create mode 100644 neuralfoil/gen2_5_architecture/__init__.py create mode 100644 neuralfoil/gen2_5_architecture/_basic_data_type.py create mode 100644 neuralfoil/gen2_5_architecture/main.py create mode 100644 neuralfoil/gen2_5_architecture/nn_weights_and_biases/scaled_input_distribution.npz diff --git a/neuralfoil/gen2_5_architecture/__init__.py b/neuralfoil/gen2_5_architecture/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/neuralfoil/gen2_5_architecture/_basic_data_type.py b/neuralfoil/gen2_5_architecture/_basic_data_type.py new file mode 100644 index 0000000..c883f95 --- /dev/null +++ b/neuralfoil/gen2_5_architecture/_basic_data_type.py @@ -0,0 +1,435 @@ +import warnings +import aerosandbox as asb +import aerosandbox.numpy as np +from dataclasses import dataclass, field +from typing import Union, Sequence, List, Any, Dict +from scipy import interpolate + + +def compute_optimal_x_points( + n_points, +): + # theta = np.arccos(1 - 2 * x) + # delta_Cp_thin_airfoil_theory = 4 / np.tan(theta) + # desired_spacing = 1 / delta_Cp_thin_airfoil_theory + # spacing_function = integral(desired_spacing, x) # rescaled to (0, 1) + + # thin_airfoil_theory_spacing_function = lambda x: ( + # -np.sqrt((1 - x) * x) + # - 0.5 * np.arcsin(1 - 2 * x) + # ) / (np.pi / 2) + 0.5 + # + # uniform_spacing_function = lambda x: x + # + # return ( + # thin_airfoil_theory_spacing_function(np.linspace(0, 1, n_points)) + + # uniform_spacing_function(np.sinspace(0, 1, n_points)) + # ) / 2 + s = np.linspace(0, 1, n_points + 1) + return (s[1:] + s[:-1]) / 2 + + +# def compute_optimal_x_points( +# n_points: int = 24, +# ): +# # theta = np.arccos(1 - 2 * x) +# # delta_Cp_thin_airfoil_theory = 4 / np.tan(theta) +# # desired_spacing = 1 / delta_Cp_thin_airfoil_theory +# # spacing_function = integral(desired_spacing, x) # rescaled to (0, 1) +# +# thin_airfoil_theory_spacing_function = lambda x: ( +# -np.sqrt((1 - x) * x) +# - 0.5 * np.arcsin(1 - 2 * x) +# ) / (np.pi / 2) + 0.5 +# +# uniform_spacing_function = lambda x: x +# +# return ( +# thin_airfoil_theory_spacing_function(np.linspace(0, 1, n_points)) + +# uniform_spacing_function(np.sinspace(0, 1, n_points)) +# ) / 2 + + +@dataclass +class Data(): + airfoil: asb.KulfanAirfoil + alpha: float + Re: float + mach: float + n_crit: float + xtr_upper: float + xtr_lower: float + + N = 32 + bl_x_points = compute_optimal_x_points(n_points=N) + + analysis_confidence: float # Nominally 0 (no confidence) to 1 (high confidence) + af_outputs: Dict[str, Any] = field( + default_factory=lambda: { + "CL" : np.nan, + "CD" : np.nan, + "CM" : np.nan, + "Top_Xtr": np.nan, + "Bot_Xtr": np.nan, + } + ) + upper_bl_outputs: Dict[str, Any] = field( + default_factory=lambda: { + "theta" : np.nan * np.ones_like(Data.bl_x_points), + "H" : np.nan * np.ones_like(Data.bl_x_points), + "ue/vinf": np.nan * np.ones_like(Data.bl_x_points), + } + ) # theta, H, ue/vinf + lower_bl_outputs: Dict[str, Any] = field( + default_factory=lambda: { + "theta" : np.nan * np.ones_like(Data.bl_x_points), + "H" : np.nan * np.ones_like(Data.bl_x_points), + "ue/vinf": np.nan * np.ones_like(Data.bl_x_points), + } + ) # theta, H, ue/vinf + + @property + def inputs(self): + return { + "airfoil" : self.airfoil, + "alpha" : self.alpha, + "Re" : self.Re, + "mach" : self.mach, + "n_crit" : self.n_crit, + "xtr_upper": self.xtr_upper, + "xtr_lower": self.xtr_lower, + } + + @property + def outputs(self): + return { + "analysis_confidence": self.analysis_confidence, + "af_outputs" : self.af_outputs, + "upper_bl_outputs" : self.upper_bl_outputs, + "lower_bl_outputs" : self.lower_bl_outputs, + } + + @classmethod + def from_xfoil(cls, + airfoil: Union[asb.Airfoil, asb.KulfanAirfoil], + alphas: Union[float, Sequence[float]], + Re: float, + mach: float, + n_crit: float, + xtr_upper: float, + xtr_lower: float, + timeout=5, + max_iter=100, + xfoil_command: str = 'xfoil' + ) -> List["Data"]: + airfoil = airfoil.normalize().to_kulfan_airfoil() + + alphas = np.atleast_1d(alphas) + + xf = asb.XFoil( + airfoil=airfoil, + Re=Re, + mach=mach, + n_crit=n_crit, + xtr_upper=xtr_upper, + xtr_lower=xtr_lower, + xfoil_repanel=True, + timeout=timeout, + max_iter=max_iter, + xfoil_command=xfoil_command, + include_bl_data=True, + ) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=UserWarning) + xf_outputs = xf.alpha(alphas) + + training_datas = [] + + def append_empty_data(): + training_datas.append( + cls( + airfoil=airfoil.to_kulfan_airfoil(), + alpha=alpha, + Re=Re, + mach=mach, + n_crit=n_crit, + xtr_upper=xtr_upper, + xtr_lower=xtr_lower, + analysis_confidence=0, + ) + ) + + for i, alpha in enumerate(alphas): + ### Figure out which output corresponds to this alpha + alpha_deviations = np.abs(xf_outputs["alpha"] - alpha) + + if ( # If the alpha is not in the output + (len(alpha_deviations) == 0) or + (np.min(alpha_deviations) > 0.001) + ): + append_empty_data() + continue + + index = np.argmin(alpha_deviations) + xf_output = { + key: value[index] for key, value in xf_outputs.items() + } + bl_data = xf_output["bl_data"] + + # ### Trim off the wake data (rows where "Cp" first becomes NaN) + # wake_node_indices = np.flatnonzero(bl_data["x"] > ) + # bl_data = bl_data.iloc[ + # :np.flatnonzero(bl_data["x"] <= airfoil.x().max())[-1] + 1, + # : + # ] + + ### Split the boundary layer data into upper and lower sections + # LE_index = bl_data["x"].argmin() + try: + dx = np.diff(bl_data["x"].values) + upper_bl_data = bl_data.iloc[ + :np.flatnonzero(dx < 0)[-1] + 2 + : + ].iloc[::-1, :] + lower_bl_data = bl_data.iloc[ + np.flatnonzero(dx > 0)[0]:, + : + ] + except IndexError as e: # If the boundary layer data is too short + print(e) + append_empty_data() + continue + + if len(upper_bl_data) <= 4 or len(lower_bl_data) <= 4: # If the boundary layer data is too short + print("BL data too short") + append_empty_data() + continue + + interp = lambda x, y: interpolate.PchipInterpolator(x, y, extrapolate=True)(cls.bl_x_points) + + try: + training_datas.append( + cls( + airfoil=airfoil.to_kulfan_airfoil(), + alpha=alpha, + Re=Re, + mach=mach, + n_crit=n_crit, + xtr_upper=xtr_upper, + xtr_lower=xtr_lower, + analysis_confidence=1, + af_outputs={ + "CL" : xf_output["CL"], + "CD" : xf_output["CD"], + "CM" : xf_output["CM"], + "Top_Xtr": xf_output["Top_Xtr"], + "Bot_Xtr": xf_output["Bot_Xtr"], + }, + upper_bl_outputs={ + "theta" : interp(upper_bl_data["x"], upper_bl_data["theta"]), + "H" : interp(upper_bl_data["x"], upper_bl_data["H"]), + "ue/vinf": interp(upper_bl_data["x"], upper_bl_data["ue/vinf"]), + }, + lower_bl_outputs={ + "theta" : interp(lower_bl_data["x"], lower_bl_data["theta"]), + "H" : interp(lower_bl_data["x"], lower_bl_data["H"]), + "ue/vinf": interp(lower_bl_data["x"], lower_bl_data["ue/vinf"]), + }, + ) + ) + except ValueError as e: + print(e) + append_empty_data() + continue + + return training_datas + + def to_vector(self) -> np.ndarray: # dtype: float32 + items = [ + self.airfoil.upper_weights, + self.airfoil.lower_weights, + self.airfoil.leading_edge_weight, + self.airfoil.TE_thickness, + self.alpha, + self.Re, + self.mach, + self.n_crit, + self.xtr_upper, + self.xtr_lower, + self.analysis_confidence, + self.af_outputs["CL"], + self.af_outputs["CD"], + self.af_outputs["CM"], + self.af_outputs["Top_Xtr"], + self.af_outputs["Bot_Xtr"], + self.upper_bl_outputs["theta"], + self.upper_bl_outputs["H"], + self.upper_bl_outputs["ue/vinf"], + self.lower_bl_outputs["theta"], + self.lower_bl_outputs["H"], + self.lower_bl_outputs["ue/vinf"], + ] + + return np.concatenate([ + np.atleast_1d(item).flatten().astype(np.float32) + for item in items + ], axis=0) + + @classmethod + def from_vector(cls, vector: np.ndarray) -> "Data": + i = 0 + + def pop(n): + nonlocal i + result = vector[i:i + n] + i += n + return result + + airfoil = asb.KulfanAirfoil( + upper_weights=pop(8), + lower_weights=pop(8), + leading_edge_weight=pop(1), + TE_thickness=pop(1), + ) + alpha = pop(1) + Re = pop(1) + mach = pop(1) + n_crit = pop(1) + xtr_upper = pop(1) + xtr_lower = pop(1) + analysis_confidence = pop(1) + af_outputs = { + "CL" : pop(1), + "CD" : pop(1), + "CM" : pop(1), + "Top_Xtr": pop(1), + "Bot_Xtr": pop(1), + } + upper_bl_outputs = { + "theta" : pop(cls.N), + "H" : pop(cls.N), + "ue/vinf": pop(cls.N), + } + lower_bl_outputs = { + "theta" : pop(cls.N), + "H" : pop(cls.N), + "ue/vinf": pop(cls.N), + } + if not i == len(vector): + raise ValueError(f"Vector length mismatch: Got {len(vector)}, expected {i}.") + + return cls( + airfoil=airfoil, + alpha=alpha, + Re=Re, + mach=mach, + n_crit=n_crit, + xtr_upper=xtr_upper, + xtr_lower=xtr_lower, + analysis_confidence=analysis_confidence, + af_outputs=af_outputs, + upper_bl_outputs=upper_bl_outputs, + lower_bl_outputs=lower_bl_outputs, + ) + + @classmethod + def get_vector_input_column_names(cls): + return [ + *[f"kulfan_upper_{i}" for i in range(8)], + *[f"kulfan_lower_{i}" for i in range(8)], + "kulfan_LE_weight", + "kulfan_TE_thickness", + "alpha", + "Re", + "mach", + "n_crit", + "xtr_upper", + "xtr_lower", + ] + + @classmethod + def get_vector_output_column_names(cls): + return [ + "analysis_confidence", + "CL", + "CD", + "CM", + "Top_Xtr", + "Bot_Xtr", + *[f"upper_bl_theta_{i}" for i in range(cls.N)], + *[f"upper_bl_H_{i}" for i in range(cls.N)], + *[f"upper_bl_ue/vinf_{i}" for i in range(cls.N)], + *[f"lower_bl_theta_{i}" for i in range(cls.N)], + *[f"lower_bl_H_{i}" for i in range(cls.N)], + *[f"lower_bl_ue/vinf_{i}" for i in range(cls.N)], + ] + + @classmethod + def get_vector_column_names(cls): + return cls.get_vector_input_column_names() + cls.get_vector_output_column_names() + + def __eq__(self, other: "Data") -> bool: + v1 = self.to_vector() + v2 = other.to_vector() + + return all([ + np.allclose( + s1, s2, + atol=0, + rtol=np.finfo(np.float32).eps * 10, + ) or np.all(np.isnan([s1, s2])) + for s1, s2 in zip(v1, v2) + ]) + + return np.allclose( + self.to_vector(), + other.to_vector(), + atol=0, + rtol=np.finfo(np.float32).eps * 10, + ) + + def validate_vector_format(self): + assert self == Data.from_vector(self.to_vector()) + + +if __name__ == '__main__': + af = asb.Airfoil("ag41d-02r").normalize().to_kulfan_airfoil() + + datas = Data.from_xfoil( + airfoil=af, + alphas=[3, 5, 60], + Re=1e6, + mach=0, + n_crit=9, + xtr_upper=0.99, + xtr_lower=0.99, + ) + + d = datas[0] + + # import aerosandbox as asb + # import aerosandbox.numpy as np + # + # airfoil_database_path = asb._asb_root / "geometry" / "airfoil" / "airfoil_database" + # + # UIUC_airfoils = [ + # asb.Airfoil(name=filename.stem).normalize().to_kulfan_airfoil() + # for filename in airfoil_database_path.glob("*.dat") + # ] + # + # for af in UIUC_airfoils: + # print(af.name) + # datas = Data.from_xfoil( + # airfoil=af, + # alphas=[2, 60], + # Re=1e6, + # mach=0, + # n_crit=9, + # xtr_upper=0.99, + # xtr_lower=0.99, + # ) + # + # d = datas[0] + # # print(d.to_vector()) + # # print(Data.from_vector(d.to_vector())) + # print(d == Data.from_vector(d.to_vector())) diff --git a/neuralfoil/gen2_5_architecture/main.py b/neuralfoil/gen2_5_architecture/main.py new file mode 100644 index 0000000..9d18ae3 --- /dev/null +++ b/neuralfoil/gen2_5_architecture/main.py @@ -0,0 +1,358 @@ +import aerosandbox as asb +import aerosandbox.numpy as np +from typing import Union, Dict, Set, List +from pathlib import Path +from neuralfoil.gen2_5_architecture._basic_data_type import Data + +npz_file_directory = Path(__file__).parent / "nn_weights_and_biases" + +bl_x_points = Data.bl_x_points + +def _sigmoid(x): + return 1 / (1 + np.exp(-x)) + +def _inverse_sigmoid(x): + return -np.log(1 / x - 1) + +_scaled_input_distribution = dict(np.load(npz_file_directory / "scaled_input_distribution.npz")) +_scaled_input_distribution["N_inputs"] = len(_scaled_input_distribution["mean_inputs_scaled"]) + +def _squared_mahalanobis_distance(x): + d = _scaled_input_distribution + return np.sum( + (x - d["mean_inputs_scaled"]) @ d["inv_cov_inputs_scaled"] * (x - d["mean_inputs_scaled"]), + axis=1 + ) +def get_aero_from_kulfan_parameters( + kulfan_parameters: Dict[str, Union[float, np.ndarray]], + alpha: Union[float, np.ndarray], + Re: Union[float, np.ndarray], + n_crit: Union[float, np.ndarray] = 9.0, + xtr_upper: Union[float, np.ndarray] = 1.0, + xtr_lower: Union[float, np.ndarray] = 1.0, + model_size="large" +) -> Dict[str, Union[float, np.ndarray]]: + + ### Load the neural network parameters + filename = npz_file_directory / f"nn-{model_size}.npz" + if not filename.exists(): + raise FileNotFoundError( + f"Could not find the neural network file {filename}, which contains the weights and biases.") + + data: Dict[str, np.ndarray] = np.load(filename) + + ### Prepare the inputs for the neural network + input_rows: List[Union[float, np.ndarray]] = [ + *[kulfan_parameters["upper_weights"][i] for i in range(8)], + *[kulfan_parameters["lower_weights"][i] for i in range(8)], + kulfan_parameters["leading_edge_weight"], + kulfan_parameters["TE_thickness"] * 50, + np.sind(2 * alpha), + np.cosd(alpha), + 1 - np.cosd(alpha) ** 2, + (np.log(Re) - 12.5) / 3.5, + # No mach + (n_crit - 9) / 4.5, + xtr_upper, + xtr_lower, + ] + + ### Handle the vectorization, where here we figure out how many cases the user wants to run + N_cases = 1 # TODO rework this with np.atleast1d + for row in input_rows: + if np.length(row) > 1: + if N_cases == 1: + N_cases = np.length(row) + else: + if np.length(row) != N_cases: + raise ValueError( + f"The inputs to the neural network must all have the same length. (Conflicting lengths: {N_cases} and {np.length(row)})" + ) + + for i, row in enumerate(input_rows): + input_rows[i] = np.ones(N_cases) * row + + x = np.stack(input_rows, axis=1) # N_cases x N_inputs + ##### Evaluate the neural network + + ### First, determine what the structure of the neural network is (i.e., how many layers it has) by looking at the keys. + # These keys come from the dictionary of saved weights/biases for the specified neural network. + try: + layer_indices: Set[int] = set([ + int(key.split(".")[1]) + for key in data.keys() + ]) + except TypeError: + raise ValueError( + f"Got an unexpected neural network file format.\n" + f"Dictionary keys should be strings of the form 'net.0.weight', 'net.0.bias', 'net.2.weight', etc.'.\n" + f"Instead, got keys of the form {data.keys()}.\n" + ) + layer_indices: List[int] = sorted(list(layer_indices)) + + ### Now, set up evaluation of the basic neural network. + def net(x: np.ndarray): + """ + Evaluates the raw network (taking in scaled inputs and returning scaled outputs). + Input `x` dims: N_cases x N_inputs + Output `y` dims: N_cases x N_outputs + """ + x = np.transpose(x) + layer_indices_to_iterate = layer_indices.copy() + + while len(layer_indices_to_iterate) != 0: + i = layer_indices_to_iterate.pop(0) + w = data[f"net.{i}.weight"] + b = data[f"net.{i}.bias"] + x = w @ x + np.reshape(b, (-1, 1)) + + if len(layer_indices_to_iterate) != 0: # Don't apply the activation function on the last layer + x = np.softplus(x) + x = np.transpose(x) + return x + + y = net(x) # N_outputs x N_cases + y[:, 0] -= _squared_mahalanobis_distance(x) / (2 * _scaled_input_distribution["N_inputs"]) + # This was baked into training in order to ensure the network asymptotes to zero analysis confidence far away from the training data. + + ### Then, flip the inputs and evaluate the network again. + # The goal here is to embed the invariant of "symmetry across alpha" into the network evaluation. + # (This was also performed during training, so the network is "intended" to be evaluated this way.) + + x_flipped = x + 0. # This is a array-api-agnostic way to force a memory copy of the array to be made. + x_flipped[:, :8] = x[:, 8:16] * -1 # switch kulfan_lower with a flipped kulfan_upper + x_flipped[:, 8:16] = x[:, :8] * -1 # switch kulfan_upper with a flipped kulfan_lower + x_flipped[:, 16] *= -1 # flip kulfan_LE_weight + x_flipped[:, 18] *= -1 # flip sin(2a) + x_flipped[:, 23] = x[:, 24] # flip xtr_upper with xtr_lower + x_flipped[:, 24] = x[:, 23] # flip xtr_lower with xtr_upper + + y_flipped = net(x_flipped) + y_flipped[:, 0] -= _squared_mahalanobis_distance(x_flipped) / (2 * _scaled_input_distribution["N_inputs"]) + # This was baked into training in order to ensure the network asymptotes to zero analysis confidence far away from the training data. + + ### The resulting outputs will also be flipped, so we need to flip them back to their normal orientation + y_unflipped = y_flipped + 0. # This is a array-api-agnostic way to force a memory copy of the array to be made. + y_unflipped[:, 1] *= -1 # CL + y_unflipped[:, 3] *= -1 # CM + y_unflipped[:, 4] = y_flipped[:, 5] # switch Top_Xtr with Bot_Xtr + y_unflipped[:, 5] = y_flipped[:, 4] # switch Bot_Xtr with Top_Xtr + + # switch upper and lower Ret, H + y_unflipped[:, 6:6 + 32 * 2] = y_flipped[:, 6 + 32 * 3: 6 + 32 * 5] + y_unflipped[:, 6 + 32 * 3: 6 + 32 * 5] = y_flipped[:, 6:6 + 32 * 2] + + # switch upper_bl_ue/vinf with lower_bl_ue/vinf + y_unflipped[:, 6 + 32 * 2: 6 + 32 * 3] = -1 * y_flipped[:, 6 + 32 * 5: 6 + 32 * 6] + y_unflipped[:, 6 + 32 * 5: 6 + 32 * 6] = -1 * y_flipped[:, 6 + 32 * 2: 6 + 32 * 3] + + ### Then, average the two outputs to get the "symmetric" result + y_fused = (y + y_unflipped) / 2 + y_fused[:, 0] = _sigmoid(y_fused[:, 0]) # Analysis confidence, a binary variable + + # Unpack the neural network outputs + # sqrt_Rex_approx = [((Data.bl_x_points[i] + 1e-2) / Re) ** 0.5 for i in range(Data.N)] + # TODO + + analysis_confidence = y_fused[:, 0] + CL = y_fused[:, 1] / 2 + CD = np.exp((y_fused[:, 2] - 2) * 2) + CM = y_fused[:, 3] / 20 + Top_Xtr = y_fused[:, 4] + Bot_Xtr = y_fused[:, 5] + + upper_bl_ue_over_vinf = y_fused[:, 6 + Data.N * 2:6 + Data.N * 3] + lower_bl_ue_over_vinf = y_fused[:, 6 + Data.N * 5:6 + Data.N * 6] + + upper_theta = ( + (10 ** y_fused[:, 6: 6 + Data.N]) - 0.1 + ) / (np.abs(upper_bl_ue_over_vinf) * np.reshape(Re, (-1, 1))) + upper_H = 2.6 * np.exp(y_fused[:, 6 + Data.N: 6 + Data.N * 2]) + + lower_theta = ( + (10 ** y_fused[:, 6 + Data.N * 3: 6 + Data.N * 4]) - 0.1 + ) / (np.abs(lower_bl_ue_over_vinf) * np.reshape(Re, (-1, 1))) + lower_H = 2.6 * np.exp(y_fused[:, 6 + Data.N * 4: 6 + Data.N * 5]) + + + results = { + "analysis_confidence": analysis_confidence, + "CL" : CL, + "CD" : CD, + "CM" : CM, + "Top_Xtr" : Top_Xtr, + "Bot_Xtr" : Bot_Xtr, + **{ + f"upper_bl_theta_{i}": upper_theta[:, i] + for i in range(Data.N) + }, + **{ + f"upper_bl_H_{i}": upper_H[:, i] + for i in range(Data.N) + }, + **{ + f"upper_bl_ue/vinf_{i}": upper_bl_ue_over_vinf[:, i] + for i in range(Data.N) + }, + **{ + f"lower_bl_theta_{i}": lower_theta[:, i] + for i in range(Data.N) + }, + **{ + f"lower_bl_H_{i}": lower_H[:, i] + for i in range(Data.N) + }, + **{ + f"lower_bl_ue/vinf_{i}": lower_bl_ue_over_vinf[:, i] + for i in range(Data.N) + }, + } + return {key: np.reshape(value, -1) for key, value in results.items()} + + +def get_aero_from_airfoil( + airfoil: asb.Airfoil, + alpha: Union[float, np.ndarray], + Re: Union[float, np.ndarray], + n_crit: Union[float, np.ndarray] = 9.0, + xtr_upper: Union[float, np.ndarray] = 1.0, + xtr_lower: Union[float, np.ndarray] = 1.0, + model_size="large", +) -> Dict[str, Union[float, np.ndarray]]: + + airfoil_normalization = airfoil.normalize(return_dict=True) + + from aerosandbox.geometry.airfoil.airfoil_families import get_kulfan_parameters + + kulfan_parameters = get_kulfan_parameters( + airfoil_normalization["airfoil"].coordinates, + n_weights_per_side=8 + ) + + return get_aero_from_kulfan_parameters( + kulfan_parameters=kulfan_parameters, + alpha=alpha + airfoil_normalization["rotation_angle"], + Re=Re / airfoil_normalization["scale_factor"], + n_crit=n_crit, + xtr_upper=xtr_upper, + xtr_lower=xtr_lower, + model_size=model_size + ) + + +def get_aero_from_coordinates( + coordinates: np.ndarray, + alpha: Union[float, np.ndarray], + Re: Union[float, np.ndarray], + n_crit: Union[float, np.ndarray] = 9.0, + xtr_upper: Union[float, np.ndarray] = 1.0, + xtr_lower: Union[float, np.ndarray] = 1.0, + model_size="large", +): + return get_aero_from_airfoil( + airfoil=asb.Airfoil( + coordinates=coordinates + ), + alpha=alpha, + Re=Re, + n_crit=n_crit, + xtr_upper=xtr_upper, + xtr_lower=xtr_lower, + model_size=model_size + ) + + +def get_aero_from_dat_file( + filename, + alpha: Union[float, np.ndarray], + Re: Union[float, np.ndarray], + model_size="large", +): + with open(filename, "r") as f: + raw_text = f.readlines() + + from aerosandbox.geometry.airfoil.airfoil_families import get_coordinates_from_raw_dat + return get_aero_from_coordinates( + coordinates=get_coordinates_from_raw_dat(raw_text=raw_text), + alpha=alpha, + Re=Re, + model_size=model_size + ) + + +if __name__ == '__main__': + + airfoil = asb.Airfoil("dae11").repanel().normalize() + # airfoil = asb.Airfoil("naca0050") + # airfoil = asb.Airfoil("naca0012").add_control_surface(10, hinge_point_x=0.5) + + alpha = np.linspace(-5, 15, 50) + Re = 1e6 + + aero = get_aero_from_airfoil( + airfoil, 3, Re, + model_size="xxxlarge" + ) + + aeros = {} + + model_sizes = ["xxxlarge"] + + for model_size in model_sizes: + aeros[f"NF-{model_size}"] = get_aero_from_airfoil( + airfoil=airfoil, + alpha=alpha, + Re=Re, + model_size=model_size + ) + + if True: + + aeros["XFoil"] = asb.XFoil( + airfoil=airfoil, + Re=Re, + max_iter=20, + xfoil_repanel=True + ).alpha(alpha) + + import matplotlib.pyplot as plt + import aerosandbox.tools.pretty_plots as p + + fig, ax = plt.subplots(2, 2, figsize=(10, 8)) + for label, aero in aeros.items(): + if "xfoil" in label.lower(): + a = aero["alpha"] + kwargs = dict( + color="k" + ) + else: + a = alpha + kwargs = dict( + alpha=0.6, + ) + + ax[0, 0].plot(a, aero["CL"], **kwargs) + ax[0, 1].plot(a, aero["CD"], label=label, **kwargs) + ax[1, 0].plot(a, aero["CM"], **kwargs) + ax[1, 1].plot(aero["CD"], aero["CL"], **kwargs) + plt.sca(ax[0, 1]) + plt.legend() + ax[0, 0].set_xlabel("$\\alpha$") + ax[0, 0].set_ylabel("$C_L$") + + ax[0, 1].set_xlabel("$\\alpha$") + ax[0, 1].set_ylabel("$C_D$") + ax[0, 1].set_ylim(bottom=0) + + ax[1, 0].set_xlabel("$\\alpha$") + ax[1, 0].set_ylabel("$C_M$") + + ax[1, 1].set_xlabel("$C_D$") + ax[1, 1].set_ylabel("$C_L$") + ax[1, 1].set_xlim(left=0) + + from aerosandbox.tools.string_formatting import eng_string + + plt.suptitle(f"\"{airfoil.name}\" Airfoil at $Re_c = \\mathrm{{{eng_string(Re)}}}$") + + p.show_plot() diff --git a/neuralfoil/gen2_5_architecture/nn_weights_and_biases/scaled_input_distribution.npz b/neuralfoil/gen2_5_architecture/nn_weights_and_biases/scaled_input_distribution.npz new file mode 100644 index 0000000000000000000000000000000000000000..3ffc8c399889182ef3729e3d0febf966f7c31a26 GIT binary patch literal 7696 zcma)h1ymJX+qQ~;5+Yp!(jka+a|CISkP=Zkl$OqO=#=g}q;!dZ*= z`Rg{j^S*04$O#u&_;8*v%OfhVG$I%YA90c?&$ex^pexV(EEof>c#!S=+}-im zzSr-h?#j-3A!`wt)iQap^@O?wZi#VcKmTPw1}$xX?Twl(STj0Gyf`0MFGUIZOq2-v z+|v9T>BC3|xqp!+|4rK5(fyw}-y{2r>@dM!F+1&ABE^fgm#-*CX+Ulug*AawJ}L!y zN})n%Ponarkfu=P)tpMYoh7@f0eu7sQ~pN?XF-w{bvy?r4<5O4lDAK+`3T1U^7^0& z(j2oVT8%(u!Kn~0t@bM9Rdg^Oc`yWr<6Y z-rU}c`rWmu38U=7@A|fes%_b&!z$?E@_|H)0Y9yl)pglSiRt-~6M`bYLBe0HZofv^ z1jb%fr?1-e`(g13j6S3|Z(ZINhTy+x04eyMMlf^3d1jnSv!9Ue@`qjA*Hil?%YIby zjXml~W%juNDCt9*??uX~q-?S{yYsIK?@=)vNR~9%ir?D9uMOPQ_UPC9=ISqzr>?89 zm&I_&_B53XRN2y&VCN`<)&Z*ZZIUTiAF;b2Lu0Mfy%%Uxw9n|yWMH`6{46Tv;GwHA z`e6rC{i!-Wc=)kB^-0(0qDKw!4j&g?-p4}VTfa)>K(y|p)iJpZvQIoC*@j5I7MfUC z{rbVT_)n>^+MS`y6$TsMQiH=qqORY&ZO23k@rYf>)!d9Q3TxXcyz#XX->Y)rt9~Xa zO9LDkB0d@|V8R-!? zuhJJLZVt6N)L8mFtL+@9CZq~-iTO9F>I>P7D`V$J^+H`Zk^k`mA$8bcH%O`C{h}`M#kV1rDBsdM_vbhKJnARIbUDre z)KBgk!iS$Sc0>!qoKndyR6}=`5R779`M_=w@SoCbc8)o=l8#tHZ5yJA`42bKo5!-> zMVVN-trn%8z1SgH(A}&R%&u&ZqCkrC2GHv*X)g&$#P3W?ek(gj?hqHMa=bLoQF>sA zJF%+UBHAD?A++9$Jz|}u7Xa(hHTryMm6H_5!B!3{^G+rXu^G-rF+GpG-i*XOE7!32 z2|5cxn`1In1fqBbRr~hYk(fjt>nW1Z*knE*m>;ryQL@&x&tw z=hlCHTWqLRPhzWi=%r0AKV^tR2abPfR=sDB*9))~E{E|~2IlBoY_YQIb80q&5FHgk7C!~Nv?&I-*KHDzszy?k>%3ALwVor? zjCCcUucY5k1rpky(~ zs{R2a0v}eW&M$fsZ`=1|NM|#9TzZ;cuYzZoFMhq1T!=;;Y9)_IRc&4pRh@Tdqb*o} zWzc^dZ4**SO=h+*Nmf9me)G;lD%8EVu0!>6k;H>@7h9iE`~9n>bZy_yJ|Le?Y4Cg` zSG^9B>ky&oI56TMH88A_G}YkK(ZhYLW&=Ns34?S(oM8W5_VwV|FEcpuWGy`2InLe> zU9<#^jy-K2wj`&fNzN>V+^rSD*}7&C9&IkK))R|nS!XYjo3<1<$>ZN#xl#W&(y$(>I-k22LbDbrrBk1+UJHMyfG)f2hVxm zZbj1bA&T92xXP^=4AgCl-6NAjvE#n=bs&MeBPQZO%KNK*(iy*m>KI|mL>IPs--<%K zZ`nq8PFhVM6O2WQg_Xo4V?dH_25XU6vQ_+7H_;Eolv5~kZ89`Pqmwzf{Z6=6X^Yuw z&8%&^a35S_|2-|ovAmyf!B^W8rM0}ph4OXHBHlmHOTlD^hn5lpyJSAbE*-nY!Gf=x z{aC1$DEe@2{Bqc)pt+IyrHxQjcE_~RUE3ePqrNlY2&37Z%Q)UaLV-AAue9=)HlyQ& zk?UPx66){;P7?;(z#bnn%JGQtW8w)3sMne;X<}-+)JhzzkM|0MBEBs8?QWSEZrMj*i?PAUW?}HKc@qKeA}qTKS&&p+P&mT&N}mbJM_lSU}%$vS(*QO zo$v6yBdRdj^~+C9M|2-4X7M9(!+J2tYeaak|1o#WMD+XQZ2PL`UfQ88QPLUDMAEHm zN%i`*yo$betzY)v1wY3y!IptzjVq!<%9}874P>Y~jU00G6HVf6byjm8D=)7WXO_|q zyyiSl4?lQQ!;$#GA+iJ?e>CrtgrU?hQoz6mJoZKQge8Te0<%i3_WH1< zLIDxnl#bdf;Y}3>*@<&*p+1m`q#kzk~N7z@1 zgF}5d$5V|TyTxm?yLjDq6p50pyGiaosxlLBi2;jCsvJr<9?&ByRWrUjE;Afson9(} z(R+V;2)yoPOqRpe3c^7u(F=4>Yp$(H9pos_Z^l<4S%5L_J5tZqyV#i z%Sl8_DHQ^U4J+Rz2*I(t*)P-Jp^l_Kct>SZ<~x3hWkhd`8v85ilkm-;6ei7 z-$Bc`>%6D<7$P9}|Mr^Eu51P!yKhP|l?N!cAA%|Q?>$t@g;O?@?vj(=8;MJANZ;a1 zrI&6Qwnil7A{N)OR-6h#Hhj;IBwia83%g0t%&`$S1TP^AcC@xmrP~)3_y>9$BXL1N zppL(|QvTbEfbn_p(NM9RGO<<$#y`tTqi3{S$Ufsfav#9{X@{NkF zH7j5IcsR>ZP_0KY&+Qht>#-_S7%^WiE=UVKo!WCWc)Pv3+yN|GAG2Q({$p7k(+8O- zTXf>lsA|UYt=HO#XIBD#y@Xi<=Qdjo#Zrq*5vco^M3g6b_$hBycD&`$OPVS_az1NL zV=#Jr`80%-OI3l;U6u3(|B3-cfVm3`fP5$FGhU}eF<|Zk9sl~SRLe!%p3_w2ekjMi z92nhQb~BW__?7dhOF5frcDg*dqmd0c*bq!i3g*e6S(C`@SLNY|i6pC8_3q$3$CanP zq5Kc{_C4f>(sY9yaz~hfeg76;H$cN?qw?Ub@?MK|GHYy89Fe)+BKNqZxd@hU(OA_vGqZUaHk4(PJTG11lS>nKzp&;?oneKX!>xaq*~!TR3Ij2W=M^S-D@ z?Btl5?ZWO(+j!|zn7*KqZwr`b*vIL%hqkBI<|S0jf@b7t+bMsxXX#p{X?ML_;Prw9Zsjr+~5 z{TeTAzq_~FxHmg>x2Hy8r>wAyr1Epe9$zac7ghk|gYqGHFn<7YjDVr;gRuaAhX2v> z(0Ksl{Lk--Hsg)D!!(wlma^|?QfyS(ES&8r-c}*?FO6f2KPCnfg$c)mVD4c4UmF-# zn=GaM#wnm3{o&^--P_?6H0}|*C*=A6C!O=Qk#yS6>tm4Hf7e9B;uH=4(nQq1HPHtL zcjN!4i)J7EOBd}(q!`Zj7KQU$D-B3mW?HIMS`Nj0O7B;T=zGK(p&6gl_kuBbJ>nkc zViZA<3=?4kCyOkNU2rS1ei}*CI@-8}aiyF2XtZ@LF~jPz;Fl6(OT|QGX3^>=+a_z_ zDrf2HS8uT{Z|NH(+bJs<^9?)16Nx(VFpu@ugaYkh^k?@ju8&|9WzDT6WjF3#W2E`O zC_FUL`J`AXXr$9igZx_YKI67SZ(814Wh(xb`FRt-l1P&Ji$LUFBI(z??ehDG$%3izZ05;+ zAZ8}%YCaKr*>=Wy?DB^PbAwRWhd`~xn*A90$fR%Vg5jWeTiyOx9r~1VTKsiwINn`W ziNu%7euw-Nj_USl@`?Aaz~6vxXx`jNvU0@Wq+U~oq7Irdu*Im&w5P{){zJQ>|oj>{S24oi;g%|gd} z@h|78*b|>t*jy}rUTS8@IGb{zm*~Nx-2_pLSs-La$88zo?MPjHk4=vai0zXq?MPL& z&hv$@J!lAY*MqQLP+DQnI%*>j=Vg23(7)LjsxCFm<5eUno~w2^sqA}rlQSycw~REX#&i0*{2w2Fa-3q@&cyDL%AsOb z@(3&4>vqjO+8P@^xkD4)!Ed!_Lc-MHGvxCGuJn9NVJU$0i|@~!FWt~JA4$z|3-r1K z?o*a^ntIdu3l3fGgSE;nn`o4eXaz3KdA$7T^5q_B`eP-?>2efNx=wqN?CY0GjSOnqasWB>}m%5-a~c zdRRe0v;T4OM_pb2&>2CQPvFj}y%)_r-35Ie-Rb9i577d|=(Bh<11lm{ucUn@{(dMi zW5_(4jn6WS8`f;Mx1?EVIuu=wCBE%a)qmw&C(Un@VoRX&N^N6!0*DQIv9qaOSoTy@_yO9WXclkp^DJBNbj%J{#&$j;-9YqXo znu3BqdS+v=vW4gIAls7S*4g8Af+Fc%R&q^8Qg1bl5~I62Q}`WCnRiMLhH1WLRQV3% zEFRq*gYk^TO3%S=r$e4O&E*;nP*zce6LMDx`S)tTthTM+FtbO%lOwx)YkY63G$1)U zLVAReuH^nP1?k7v^HL=Tx}CnF1uSZ$?H^}i21(q99n(BZpAzB2YAJ2fjud*8mmAx9 z)%zOj+uYiIO6DRoK-NkcSS!?r>9V!5CCx8S%?LrX2{fwoQ??of6q5DT7OK4g%fTz? zP8H(GDUf0$zDKdY?<&Z29ehcGK4QCBG3`MDuxoOUZ#ncDmw__KtSC6JtUwpW3)JQ3 z5f$Yy-0rg6)eMxdI=p@9-fe7h+_CYMtr?;G?2r*gwi~ry?Y(fB*-ZDEwA)u1ubr}w zxGGUkeyiDda@{TqYh~?ZFGvs)khK6sHkO9IGHs;|tRmcHysv(+_PbD8t$LqV)8i2S z^+Lz*j-psp_R&1(v{{6IHDpI`$exCS8N(?Wg)Vl+gCD)j2NdN%BNjlB1*U~J45Nd; zAgh4CIZfmU(>6>7(tcHwhqa}$+x%8yL5h12m_t@VV4|49r5b)uo=1ASZA*gim#jt~ zuGJ6WV8R>=9e(Wq3Y(-l@tH?OXO07g&2t(9-EZIKUVLQI-C5a4oMgnM#I%h7vxfIl zc1Nahj6?|?JQg=heo?5PAAa-3%xC&W+Uv-_R}#T0BnCt*x-sc1p=kD)#UrD9fHvVZ zh}Z%k(DTH29Ik=Cb}1iM>a0`lXjGMOd z?p1gj3vtMyWITF%j5Y&lc&4FsKq;oX7*byH2_|R+t>n6G87c~kgqi}l-Jr_T8lWN$ zDFq8Z*I>Ts*X~4WFe8d5ea0oTVmlV|bEzpDc>0W_p+8ZqRJxyvswJmgWn43^W!`C- zZ%+@&Sk`PmHz?>+tSa)=tmAq9_;do?htN^c;jN7o|H}H(cA{7N@v4%0gL(jsMO947 zYqP{)J|ohm`_}4Oo%{}Bn-0MBVZ`296#?GeSa5oja~l?pGG;mv#oySCag;+%@Dd|q z;Uw*F?xz)yIkgG~@j1vMk0i{%BY(C~fkkdN-UdN=+2t0)BxO?xF8Q%`%TcxXNtqUq zwCbf#>FhHy_n>Vm@N1dbbSaZQ3;C0N_af)GggGEDdhmwHjosws`ih?qBd&Odd!+_0 zSmTKinaa@T_JOhtjh_kMBS~kD4a$<7BYZ4%*cN3^NWr2Op$Pp6tSx<^6M%RX${jrm z@$E;eh?t-kM9t9^j7X80?+ASlYo06&8Q$oKefyb6i%@Ihn&s!3a7vOs$rZ$#A0*WM zRIL!Qdi)f&1~vJkdKp=RcxpZ!I{5s~Fz%=|4GvY*6h(L%Jae{b+i&8~xMn zn?PlT-$S|c{}%xOV^0>K@iW_aIGQ@t)yG-uwcb1(Ic$uK%$VX zyRD_<2So`#4nyC!g__nL?9nM9Dx4?AtWPGpltS^#+-i07mokHCH@$KXP^$2p7Tw&` zV{6it#^YB*Ae2fP1I6JA>fPJ#*Y%J5(QxP$`;!v}Y-bq6-2{zKjCM^#&%|H92y+&H z+SlB5U)tOat;s1U9tZh~Go;zYDqA!c?Hj&hpv-#yi{W*Xl!Gh0JPLeWg7k^)E7e&^4eYm+yT@DEN1aLP_j%`L814pQSQ&0u;f}O3!DIP!g~?8_Ff~IEcUBm_5O` z=U^5P3y07^yD(j^Badb*@b(}X+w7V9F#Ct2yhxXg%`Q3WZ;J1pnQC>>C3&{fM(xbs zy+i=#g>)4`gE4RE*Z6C5)z8Y8D6lk+*M7LBeu;awtagy$!?=t^rUaBRqBj!oHvyy0 z(RYY6kib2dB`GGJ5%pN+G&Oad68{8PezXszeQFA%&jlWNOlVg?E^uFib}fLGJ`}2j zYKkbEEF?lci=H;NudB#y1Q!|B&?z*5)!1t*S`rY0+~6*V`7S)F_~fx6%~ye9$biQw z)+fP5hU{!{eI#YQiybKFGAk1-piKq0EJ&t~=xpcWp4^?yD%g zsHAu2fJ;j)!A&Ip|8d2RF~R)&)KAIl=H+tz1Xd;Sntavv0Gav&F7vWYd0W zA^m4@66x`J>S`MGD>a+i?92lOoWr$Fyg$gNJfbn{K{palR$cti=luwLq3J^Rn3`^J z0mk=}-fUP$8C2?G_5}xwHyS@Pr$KTQdn!)5)VM~B^Ju5bug}r^7Z3`@eQ2k34d7|J zibPX{ogdY6H*3`jRcleOU2NQ++|?@Fg6IPjMRK! z_h!29$sM)0OWY6RaA2$p?GbWoj)o0#{{$WrYJz7Os4{sXAJVu0@y$>B?fM|Fy?CFwUfizWb-{HCS zcenJ9|M|xYrT9zw{-3M=?LXn4z0-fk_+vi)@e}`ei{yX9IR8ESN6G!~5P!7#AL;!E gg8FX|_kZ90UzNM2DmKoa)mXoek>8P