model.py
8.0 KB · 252 lines · python Raw
1 """
2 Neural Network Surrogate Model for MHD Hybrid Nanofluid EV Battery Thermal Management
3
4 Multi-output regression model:
5 Inputs: [Ha, phi, u_in] (3 features)
6 Outputs: [T_max, Nu, S_gen, delta_T, BL_suppression, k_ratio] (6 targets)
7
8 Architecture: Physics-aware MLP with Tanh activation
9 Based on: arxiv:1910.14194, arxiv:2302.14740 surrogate modeling approaches
10 """
11
12 import torch
13 import torch.nn as nn
14 import numpy as np
15 import json
16 import os
17
18
19 class ThermalSurrogateModel(nn.Module):
20 """
21 Multi-output MLP surrogate for MHD hybrid nanofluid thermal system.
22
23 Architecture: 4 hidden layers with Tanh activation + residual connections.
24 Tanh chosen over ReLU for physics smoothness (continuous derivatives).
25 """
26
27 def __init__(self, input_dim=3, hidden_dims=[64, 128, 128, 64], output_dim=6, dropout=0.1):
28 super().__init__()
29
30 self.input_dim = input_dim
31 self.output_dim = output_dim
32 self.hidden_dims = hidden_dims
33
34 # Input projection
35 self.input_proj = nn.Sequential(
36 nn.Linear(input_dim, hidden_dims[0]),
37 nn.Tanh()
38 )
39
40 # Hidden layers with residual connections
41 self.hidden_layers = nn.ModuleList()
42 self.residual_projs = nn.ModuleList()
43
44 for i in range(len(hidden_dims) - 1):
45 self.hidden_layers.append(nn.Sequential(
46 nn.Linear(hidden_dims[i], hidden_dims[i+1]),
47 nn.Tanh(),
48 nn.Dropout(dropout),
49 nn.Linear(hidden_dims[i+1], hidden_dims[i+1]),
50 nn.Tanh()
51 ))
52 # Residual projection if dimensions differ
53 if hidden_dims[i] != hidden_dims[i+1]:
54 self.residual_projs.append(nn.Linear(hidden_dims[i], hidden_dims[i+1]))
55 else:
56 self.residual_projs.append(nn.Identity())
57
58 # Output heads (separate for each physical quantity)
59 self.output_head = nn.Linear(hidden_dims[-1], output_dim)
60
61 # Physics constraint layers
62 # Softplus ensures non-negative outputs where physically required
63 self.softplus = nn.Softplus()
64
65 # Initialize weights (Xavier for Tanh)
66 self._init_weights()
67
68 def _init_weights(self):
69 for m in self.modules():
70 if isinstance(m, nn.Linear):
71 nn.init.xavier_uniform_(m.weight)
72 if m.bias is not None:
73 nn.init.zeros_(m.bias)
74
75 def forward(self, x):
76 """
77 Forward pass.
78
79 Args:
80 x: [batch, 3] - normalized inputs [Ha, phi, u_in]
81
82 Returns:
83 [batch, 6] - normalized outputs [T_max, Nu, S_gen, delta_T, BL_supp, k_ratio]
84 """
85 h = self.input_proj(x)
86
87 for layer, res_proj in zip(self.hidden_layers, self.residual_projs):
88 residual = res_proj(h)
89 h = layer(h) + residual # Residual connection
90
91 out = self.output_head(h)
92
93 return out
94
95 def predict_with_physics(self, x):
96 """
97 Predict with physics constraints enforced.
98
99 Ensures:
100 - T_max > T_inlet (25°C normalized)
101 - Nu >= 1.0
102 - S_gen >= 0
103 - delta_T >= 0
104 - BL_suppression >= 0
105 - k_ratio >= 1.0
106 """
107 out = self.forward(x)
108
109 # Apply constraints in normalized space
110 # These are soft during training, hard during inference
111 out[:, 1] = self.softplus(out[:, 1]) # Nu >= 0
112 out[:, 2] = self.softplus(out[:, 2]) # S_gen >= 0
113 out[:, 3] = self.softplus(out[:, 3]) # delta_T >= 0
114 out[:, 4] = self.softplus(out[:, 4]) # BL_suppression >= 0
115
116 return out
117
118
119 class PhysicsLoss(nn.Module):
120 """
121 Physics-informed loss with soft constraints from governing equations.
122
123 Enforces:
124 1. Entropy generation >= 0 (Second Law)
125 2. Nu >= 1 (minimum Nusselt)
126 3. Monotonicity: k_ratio increases with phi
127 4. Joule heating penalty at high Ha
128 """
129
130 def __init__(self, lambda_physics=0.01):
131 super().__init__()
132 self.lambda_physics = lambda_physics
133 self.mse = nn.MSELoss()
134
135 def forward(self, pred, target, inputs_normalized=None):
136 """
137 Combined data + physics loss.
138
139 Args:
140 pred: [batch, 6] predicted outputs
141 target: [batch, 6] ground truth
142 inputs_normalized: [batch, 3] normalized inputs (for physics constraints)
143 """
144 # Data loss (MSE on all outputs)
145 data_loss = self.mse(pred, target)
146
147 physics_loss = torch.tensor(0.0, device=pred.device)
148
149 if inputs_normalized is not None:
150 # Entropy must be non-negative (Second Law of Thermodynamics)
151 entropy_violation = torch.relu(-pred[:, 2]).mean()
152
153 # k_ratio should be > 1 (nanofluid always improves conductivity)
154 k_ratio_violation = torch.relu(0.0 - pred[:, 5]).mean()
155
156 # BL suppression should be non-negative
157 bl_violation = torch.relu(-pred[:, 4]).mean()
158
159 physics_loss = entropy_violation + k_ratio_violation + bl_violation
160
161 total_loss = data_loss + self.lambda_physics * physics_loss
162
163 return total_loss, data_loss, physics_loss
164
165
166 class DataNormalizer:
167 """Min-max normalizer for inputs and outputs."""
168
169 def __init__(self):
170 self.input_min = None
171 self.input_max = None
172 self.output_min = None
173 self.output_max = None
174
175 def fit(self, X, y):
176 self.input_min = X.min(axis=0)
177 self.input_max = X.max(axis=0)
178 self.output_min = y.min(axis=0)
179 self.output_max = y.max(axis=0)
180
181 # Prevent division by zero
182 input_range = self.input_max - self.input_min
183 input_range[input_range == 0] = 1.0
184 output_range = self.output_max - self.output_min
185 output_range[output_range == 0] = 1.0
186
187 self.input_range = input_range
188 self.output_range = output_range
189
190 def transform_input(self, X):
191 return (X - self.input_min) / self.input_range
192
193 def transform_output(self, y):
194 return (y - self.output_min) / self.output_range
195
196 def inverse_transform_output(self, y_norm):
197 return y_norm * self.output_range + self.output_min
198
199 def to_dict(self):
200 return {
201 'input_min': self.input_min.tolist(),
202 'input_max': self.input_max.tolist(),
203 'output_min': self.output_min.tolist(),
204 'output_max': self.output_max.tolist(),
205 'input_range': self.input_range.tolist(),
206 'output_range': self.output_range.tolist(),
207 }
208
209 @classmethod
210 def from_dict(cls, d):
211 norm = cls()
212 norm.input_min = np.array(d['input_min'])
213 norm.input_max = np.array(d['input_max'])
214 norm.output_min = np.array(d['output_min'])
215 norm.output_max = np.array(d['output_max'])
216 norm.input_range = np.array(d['input_range'])
217 norm.output_range = np.array(d['output_range'])
218 return norm
219
220 def save(self, path):
221 with open(path, 'w') as f:
222 json.dump(self.to_dict(), f, indent=2)
223
224 @classmethod
225 def load(cls, path):
226 with open(path) as f:
227 return cls.from_dict(json.load(f))
228
229
230 def get_model_config():
231 """Return default model configuration."""
232 return {
233 'input_dim': 3,
234 'hidden_dims': [64, 128, 128, 64],
235 'output_dim': 6,
236 'dropout': 0.1,
237 'learning_rate': 1e-3,
238 'weight_decay': 1e-4,
239 'epochs': 2000,
240 'batch_size': 64,
241 'physics_lambda': 0.01,
242 'scheduler_patience': 100,
243 'scheduler_factor': 0.5,
244 'input_features': ['Ha', 'phi', 'u_in'],
245 'output_features': ['T_max', 'Nu', 'S_gen', 'delta_T', 'BL_suppression', 'k_ratio'],
246 'input_ranges': {
247 'Ha': [0, 60],
248 'phi': [0.01, 0.05],
249 'u_in': [0.05, 0.30]
250 }
251 }
252