pso_optimizer.py
| 1 | """ |
| 2 | Particle Swarm Optimization for MHD Hybrid Nanofluid EV Battery Cooling |
| 3 | |
| 4 | Multi-objective optimization to find optimal: |
| 5 | - Hartmann number (Ha): 0-60 |
| 6 | - Nanoparticle volume fraction (phi): 0.01-0.05 |
| 7 | - Inlet flow velocity (u_in): 0.05-0.30 m/s |
| 8 | |
| 9 | Objectives: |
| 10 | - Minimize maximum battery surface temperature (T_max) |
| 11 | - Minimize total entropy generation (S_gen) |
| 12 | |
| 13 | Based on paper's PSO implementation with adaptive inertia weight. |
| 14 | """ |
| 15 | |
| 16 | import numpy as np |
| 17 | import torch |
| 18 | import json |
| 19 | import os |
| 20 | |
| 21 | from model import ThermalSurrogateModel, DataNormalizer, get_model_config |
| 22 | |
| 23 | |
| 24 | class PSOOptimizer: |
| 25 | """ |
| 26 | Particle Swarm Optimization for thermal management optimization. |
| 27 | |
| 28 | Implements the PSO algorithm from the paper with: |
| 29 | - Adaptive inertia weight decay (0.9 → 0.4) |
| 30 | - Cognitive and social coefficients (c1=c2=2.0) |
| 31 | - Multi-objective weighted sum scalarization |
| 32 | - Pareto front generation |
| 33 | """ |
| 34 | |
| 35 | def __init__(self, model, normalizer, |
| 36 | n_particles=50, max_iter=200, |
| 37 | w_start=0.9, w_end=0.4, |
| 38 | c1=2.0, c2=2.0, |
| 39 | alpha=0.55): # Paper: w1=0.55 for temperature, w2=0.45 for entropy |
| 40 | """ |
| 41 | Args: |
| 42 | model: Trained ThermalSurrogateModel |
| 43 | normalizer: DataNormalizer for input/output scaling |
| 44 | n_particles: Swarm size (paper: 30-60) |
| 45 | max_iter: Maximum iterations (paper: 100-200) |
| 46 | w_start: Initial inertia weight |
| 47 | w_end: Final inertia weight |
| 48 | c1: Cognitive coefficient |
| 49 | c2: Social coefficient |
| 50 | alpha: Weight for T_max in objective (1-alpha for S_gen) |
| 51 | """ |
| 52 | self.model = model |
| 53 | self.normalizer = normalizer |
| 54 | self.n_particles = n_particles |
| 55 | self.max_iter = max_iter |
| 56 | self.w_start = w_start |
| 57 | self.w_end = w_end |
| 58 | self.c1 = c1 |
| 59 | self.c2 = c2 |
| 60 | self.alpha = alpha |
| 61 | |
| 62 | # Parameter bounds: [Ha, phi, u_in] |
| 63 | self.lb = np.array([0.0, 0.01, 0.05]) |
| 64 | self.ub = np.array([60.0, 0.05, 0.30]) |
| 65 | self.dim = 3 |
| 66 | |
| 67 | def evaluate(self, positions): |
| 68 | """ |
| 69 | Evaluate objective function using neural network surrogate. |
| 70 | |
| 71 | Args: |
| 72 | positions: [n_particles, 3] - parameter values |
| 73 | |
| 74 | Returns: |
| 75 | fitness: [n_particles] - scalar objective values (lower is better) |
| 76 | predictions: [n_particles, 6] - full model predictions |
| 77 | """ |
| 78 | # Normalize inputs |
| 79 | X_norm = self.normalizer.transform_input(positions.astype(np.float32)) |
| 80 | |
| 81 | # Model prediction |
| 82 | with torch.no_grad(): |
| 83 | pred_norm = self.model(torch.tensor(X_norm, dtype=torch.float32)).cpu().numpy() |
| 84 | |
| 85 | # Inverse transform to physical values |
| 86 | pred = self.normalizer.inverse_transform_output(pred_norm) |
| 87 | |
| 88 | T_max = pred[:, 0] # Temperature |
| 89 | S_gen = pred[:, 2] # Entropy generation |
| 90 | |
| 91 | # Normalize objectives for weighted sum |
| 92 | T_max_norm = (T_max - T_max.min()) / (T_max.max() - T_max.min() + 1e-10) |
| 93 | S_gen_norm = (S_gen - S_gen.min()) / (S_gen.max() - S_gen.min() + 1e-10) |
| 94 | |
| 95 | # Multi-objective fitness (Eq. 5 from paper) |
| 96 | fitness = self.alpha * T_max_norm + (1 - self.alpha) * S_gen_norm |
| 97 | |
| 98 | return fitness, pred |
| 99 | |
| 100 | def optimize(self, verbose=True): |
| 101 | """ |
| 102 | Run PSO optimization. |
| 103 | |
| 104 | Returns: |
| 105 | best_params: [3] - optimal [Ha, phi, u_in] |
| 106 | best_predictions: [6] - predicted outputs at optimum |
| 107 | convergence_history: list of dicts with iteration-wise data |
| 108 | """ |
| 109 | if verbose: |
| 110 | print("=" * 60) |
| 111 | print("PARTICLE SWARM OPTIMIZATION") |
| 112 | print(f" Particles: {self.n_particles}") |
| 113 | print(f" Max iterations: {self.max_iter}") |
| 114 | print(f" Weights: T_max={self.alpha:.2f}, S_gen={1-self.alpha:.2f}") |
| 115 | print("=" * 60) |
| 116 | |
| 117 | # Initialize swarm |
| 118 | positions = self.lb + np.random.rand(self.n_particles, self.dim) * (self.ub - self.lb) |
| 119 | velocities = np.zeros_like(positions) |
| 120 | |
| 121 | # Evaluate initial positions |
| 122 | fitness, predictions = self.evaluate(positions) |
| 123 | |
| 124 | # Personal bests |
| 125 | pbest_pos = positions.copy() |
| 126 | pbest_fit = fitness.copy() |
| 127 | pbest_pred = predictions.copy() |
| 128 | |
| 129 | # Global best |
| 130 | gbest_idx = np.argmin(pbest_fit) |
| 131 | gbest_pos = pbest_pos[gbest_idx].copy() |
| 132 | gbest_fit = pbest_fit[gbest_idx] |
| 133 | gbest_pred = pbest_pred[gbest_idx].copy() |
| 134 | |
| 135 | convergence = [] |
| 136 | |
| 137 | for iteration in range(self.max_iter): |
| 138 | # Adaptive inertia weight (linear decay) |
| 139 | w = self.w_start - (self.w_start - self.w_end) * iteration / self.max_iter |
| 140 | |
| 141 | # Random coefficients |
| 142 | r1 = np.random.rand(self.n_particles, self.dim) |
| 143 | r2 = np.random.rand(self.n_particles, self.dim) |
| 144 | |
| 145 | # Update velocities |
| 146 | cognitive = self.c1 * r1 * (pbest_pos - positions) |
| 147 | social = self.c2 * r2 * (gbest_pos - positions) |
| 148 | velocities = w * velocities + cognitive + social |
| 149 | |
| 150 | # Velocity clamping (10% of range per dimension) |
| 151 | v_max = 0.1 * (self.ub - self.lb) |
| 152 | velocities = np.clip(velocities, -v_max, v_max) |
| 153 | |
| 154 | # Update positions |
| 155 | positions = positions + velocities |
| 156 | positions = np.clip(positions, self.lb, self.ub) |
| 157 | |
| 158 | # Evaluate new positions |
| 159 | fitness, predictions = self.evaluate(positions) |
| 160 | |
| 161 | # Update personal bests |
| 162 | improved = fitness < pbest_fit |
| 163 | pbest_pos[improved] = positions[improved] |
| 164 | pbest_fit[improved] = fitness[improved] |
| 165 | pbest_pred[improved] = predictions[improved] |
| 166 | |
| 167 | # Update global best |
| 168 | if pbest_fit.min() < gbest_fit: |
| 169 | gbest_idx = np.argmin(pbest_fit) |
| 170 | gbest_pos = pbest_pos[gbest_idx].copy() |
| 171 | gbest_fit = pbest_fit[gbest_idx] |
| 172 | gbest_pred = pbest_pred[gbest_idx].copy() |
| 173 | |
| 174 | # Log convergence |
| 175 | conv_entry = { |
| 176 | 'iteration': iteration + 1, |
| 177 | 'gbest_fitness': float(gbest_fit), |
| 178 | 'gbest_T_max': float(gbest_pred[0]), |
| 179 | 'gbest_Nu': float(gbest_pred[1]), |
| 180 | 'gbest_S_gen': float(gbest_pred[2]), |
| 181 | 'gbest_Ha': float(gbest_pos[0]), |
| 182 | 'gbest_phi': float(gbest_pos[1]), |
| 183 | 'gbest_u_in': float(gbest_pos[2]), |
| 184 | 'inertia_weight': float(w), |
| 185 | 'swarm_diversity': float(np.std(positions, axis=0).mean()), |
| 186 | } |
| 187 | convergence.append(conv_entry) |
| 188 | |
| 189 | if verbose and (iteration + 1) % 20 == 0: |
| 190 | print(f" Iter {iteration+1:3d} | Fitness: {gbest_fit:.6f} | " |
| 191 | f"T_max: {gbest_pred[0]:.2f}°C | S_gen: {gbest_pred[2]:.4f} | " |
| 192 | f"Ha: {gbest_pos[0]:.1f} | φ: {gbest_pos[1]:.4f} | u: {gbest_pos[2]:.3f}") |
| 193 | |
| 194 | if verbose: |
| 195 | print("\n OPTIMIZATION COMPLETE") |
| 196 | print(f" Converged at iteration {len(convergence)}") |
| 197 | |
| 198 | return gbest_pos, gbest_pred, convergence |
| 199 | |
| 200 | def generate_pareto_front(self, n_alpha=20, verbose=True): |
| 201 | """ |
| 202 | Generate Pareto front by varying the weight factor alpha. |
| 203 | |
| 204 | Returns list of Pareto-optimal solutions. |
| 205 | """ |
| 206 | if verbose: |
| 207 | print("\n" + "=" * 60) |
| 208 | print("PARETO FRONT GENERATION") |
| 209 | print(f" Alpha values: {n_alpha} (from 0.0 to 1.0)") |
| 210 | print("=" * 60) |
| 211 | |
| 212 | pareto_solutions = [] |
| 213 | alphas = np.linspace(0.05, 0.95, n_alpha) |
| 214 | |
| 215 | for i, alpha in enumerate(alphas): |
| 216 | self.alpha = alpha |
| 217 | # Smaller swarm for speed |
| 218 | orig_particles = self.n_particles |
| 219 | orig_iter = self.max_iter |
| 220 | self.n_particles = 30 |
| 221 | self.max_iter = 100 |
| 222 | |
| 223 | best_pos, best_pred, _ = self.optimize(verbose=False) |
| 224 | |
| 225 | self.n_particles = orig_particles |
| 226 | self.max_iter = orig_iter |
| 227 | |
| 228 | solution = { |
| 229 | 'alpha': float(alpha), |
| 230 | 'Ha': float(best_pos[0]), |
| 231 | 'phi': float(best_pos[1]), |
| 232 | 'u_in': float(best_pos[2]), |
| 233 | 'T_max': float(best_pred[0]), |
| 234 | 'Nu': float(best_pred[1]), |
| 235 | 'S_gen': float(best_pred[2]), |
| 236 | 'delta_T': float(best_pred[3]), |
| 237 | 'BL_suppression': float(best_pred[4]), |
| 238 | 'k_ratio': float(best_pred[5]), |
| 239 | } |
| 240 | pareto_solutions.append(solution) |
| 241 | |
| 242 | if verbose: |
| 243 | print(f" α={alpha:.2f}: T_max={best_pred[0]:.1f}°C, " |
| 244 | f"S_gen={best_pred[2]:.4f}, Ha={best_pos[0]:.1f}, " |
| 245 | f"φ={best_pos[1]:.4f}") |
| 246 | |
| 247 | return pareto_solutions |
| 248 | |
| 249 | |
| 250 | def run_optimization(model_dir='/app/outputs'): |
| 251 | """Load trained model and run PSO optimization.""" |
| 252 | |
| 253 | # Load model |
| 254 | config = get_model_config() |
| 255 | model = ThermalSurrogateModel( |
| 256 | input_dim=config['input_dim'], |
| 257 | hidden_dims=config['hidden_dims'], |
| 258 | output_dim=config['output_dim'], |
| 259 | dropout=0.0 # No dropout during inference |
| 260 | ) |
| 261 | model.load_state_dict(torch.load( |
| 262 | os.path.join(model_dir, 'model.pt'), |
| 263 | weights_only=True |
| 264 | )) |
| 265 | model.eval() |
| 266 | |
| 267 | # Load normalizer |
| 268 | normalizer = DataNormalizer.load(os.path.join(model_dir, 'normalizer.json')) |
| 269 | |
| 270 | # Run PSO (paper parameters) |
| 271 | pso = PSOOptimizer( |
| 272 | model=model, |
| 273 | normalizer=normalizer, |
| 274 | n_particles=50, |
| 275 | max_iter=200, |
| 276 | alpha=0.55 # Paper: w1=0.55, w2=0.45 |
| 277 | ) |
| 278 | |
| 279 | best_pos, best_pred, convergence = pso.optimize(verbose=True) |
| 280 | |
| 281 | # Print results |
| 282 | print("\n" + "=" * 60) |
| 283 | print("OPTIMAL PARAMETERS") |
| 284 | print("=" * 60) |
| 285 | print(f" Hartmann Number (Ha): {best_pos[0]:.2f}") |
| 286 | print(f" Nanoparticle Fraction (φ): {best_pos[1]:.4f}") |
| 287 | print(f" Inlet Velocity (u₀): {best_pos[2]:.4f} m/s") |
| 288 | print(f"\n Paper PSO Optimal: Ha=32.4, φ=0.038, u₀=0.187 m/s") |
| 289 | |
| 290 | print("\n" + "=" * 60) |
| 291 | print("PREDICTED PERFORMANCE") |
| 292 | print("=" * 60) |
| 293 | print(f" Max Surface Temperature (T_max): {best_pred[0]:.2f}°C") |
| 294 | print(f" Nusselt Number (Nu): {best_pred[1]:.2f}") |
| 295 | print(f" Entropy Generation (S_gen): {best_pred[2]:.4f}") |
| 296 | print(f" Cell-to-Cell ΔT: {best_pred[3]:.2f}°C") |
| 297 | print(f" BL Suppression: {best_pred[4]:.2f}%") |
| 298 | print(f" k_ratio (k_hnf/k_bf): {best_pred[5]:.3f}") |
| 299 | |
| 300 | # Comparison with paper values |
| 301 | print("\n" + "=" * 60) |
| 302 | print("COMPARISON WITH PAPER RESULTS") |
| 303 | print("=" * 60) |
| 304 | paper_results = { |
| 305 | 'T_max': 40.8, |
| 306 | 'Nu': 18.7, |
| 307 | 'S_gen_reduction': 31.5, # % reduction |
| 308 | } |
| 309 | print(f" T_max: Paper={paper_results['T_max']}°C, Model={best_pred[0]:.1f}°C") |
| 310 | print(f" Nu: Paper={paper_results['Nu']}, Model={best_pred[1]:.1f}") |
| 311 | |
| 312 | # Generate Pareto front |
| 313 | pareto = pso.generate_pareto_front(n_alpha=15, verbose=True) |
| 314 | |
| 315 | # Save results |
| 316 | results = { |
| 317 | 'optimal_parameters': { |
| 318 | 'Ha': float(best_pos[0]), |
| 319 | 'phi': float(best_pos[1]), |
| 320 | 'u_in': float(best_pos[2]), |
| 321 | }, |
| 322 | 'optimal_performance': { |
| 323 | 'T_max': float(best_pred[0]), |
| 324 | 'Nu': float(best_pred[1]), |
| 325 | 'S_gen': float(best_pred[2]), |
| 326 | 'delta_T': float(best_pred[3]), |
| 327 | 'BL_suppression': float(best_pred[4]), |
| 328 | 'k_ratio': float(best_pred[5]), |
| 329 | }, |
| 330 | 'convergence': convergence, |
| 331 | 'pareto_front': pareto, |
| 332 | 'paper_comparison': paper_results, |
| 333 | } |
| 334 | |
| 335 | with open(os.path.join(model_dir, 'pso_results.json'), 'w') as f: |
| 336 | json.dump(results, f, indent=2) |
| 337 | print(f"\n Results saved: {model_dir}/pso_results.json") |
| 338 | |
| 339 | return results |
| 340 | |
| 341 | |
| 342 | if __name__ == "__main__": |
| 343 | np.random.seed(42) |
| 344 | run_optimization() |
| 345 | |