My Project
Loading...
Searching...
No Matches
ml_model.hpp
1/*
2 Copyright (c) 2016 Robert W. Rose
3 Copyright (c) 2018 Paul Maevskikh
4 Copyright (c) 2024 NORCE
5 This file is part of the Open Porous Media project (OPM).
6
7 OPM is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version.
11
12 OPM is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with OPM. If not, see <http://www.gnu.org/licenses/>.
19*/
20
21#ifndef ML_MODEL_H_
22#define ML_MODEL_H_
23
24#include <fmt/format.h>
25
26#include <algorithm>
27#include <chrono>
28#include <cmath>
29#include <cstdio>
30#include <numeric>
31#include <opm/common/ErrorMacros.hpp>
33#include <string>
34#include <vector>
35
36namespace Opm
37{
38
39namespace ML
40{
41
42 // NN layer
43 // ---------------------
47 template <class T>
48 class Tensor
49 {
50 public:
51 Tensor()
52 {
53 }
54
55 explicit Tensor(int i)
56 {
57 resizeI<std::vector<int>>({i});
58 }
59
60 Tensor(int i, int j)
61 {
62 resizeI<std::vector<int>>({i, j});
63 }
64
65 Tensor(int i, int j, int k)
66 {
67 resizeI<std::vector<int>>({i, j, k});
68 }
69
70 Tensor(int i, int j, int k, int l)
71 {
72 resizeI<std::vector<int>>({i, j, k, l});
73 }
74
75 template <typename Sizes>
76 void resizeI(const Sizes& sizes)
77 {
78 if (sizes.size() == 1)
79 dims_ = {(int)sizes[0]};
80 if (sizes.size() == 2)
81 dims_ = {(int)sizes[0], (int)sizes[1]};
82 if (sizes.size() == 3)
83 dims_ = {(int)sizes[0], (int)sizes[1], (int)sizes[2]};
84 if (sizes.size() == 4)
85 dims_ = {(int)sizes[0], (int)sizes[1], (int)sizes[2], (int)sizes[3]};
86
87 data_.resize(std::accumulate(begin(dims_), end(dims_), 1.0, std::multiplies<>()));
88 }
89
90 void flatten()
91 {
92 OPM_ERROR_IF(dims_.size() == 0, "Invalid tensor");
93
94 int elements = dims_[0];
95 for (unsigned int i = 1; i < dims_.size(); i++) {
96 elements *= dims_[i];
97 }
98 dims_ = {elements};
99 }
100
101 T& operator()(int i)
102 {
103 OPM_ERROR_IF(dims_.size() != 1, "Invalid indexing for tensor");
104
105 OPM_ERROR_IF(!(i < dims_[0] && i >= 0),
106 fmt::format(" Invalid i: "
107 "{}"
108 " max: "
109 "{}",
110 i,
111 dims_[0]));
112
113 return data_[i];
114 }
115
116 T& operator()(int i, int j)
117 {
118 OPM_ERROR_IF(dims_.size() != 2, "Invalid indexing for tensor");
119 OPM_ERROR_IF(!(i < dims_[0] && i >= 0),
120 fmt::format(" Invalid i: "
121 "{}"
122 " max: "
123 "{}",
124 i,
125 dims_[0]));
126 OPM_ERROR_IF(!(j < dims_[1] && j >= 0),
127 fmt::format(" Invalid j: "
128 "{}"
129 " max: "
130 "{}",
131 j,
132 dims_[1]));
133
134 return data_[dims_[1] * i + j];
135 }
136
137 const T& operator()(int i, int j) const
138 {
139 OPM_ERROR_IF(dims_.size() != 2, "Invalid indexing for tensor");
140 OPM_ERROR_IF(!(i < dims_[0] && i >= 0),
141 fmt::format(" Invalid i: "
142 "{}"
143 " max: "
144 "{}",
145 i,
146 dims_[0]));
147 OPM_ERROR_IF(!(j < dims_[1] && j >= 0),
148 fmt::format(" Invalid j: "
149 "{}"
150 " max: "
151 "{}",
152 j,
153 dims_[1]));
154 return data_[dims_[1] * i + j];
155 }
156
157 T& operator()(int i, int j, int k)
158 {
159 OPM_ERROR_IF(dims_.size() != 3, "Invalid indexing for tensor");
160 OPM_ERROR_IF(!(i < dims_[0] && i >= 0),
161 fmt::format(" Invalid i: "
162 "{}"
163 " max: "
164 "{}",
165 i,
166 dims_[0]));
167 OPM_ERROR_IF(!(j < dims_[1] && j >= 0),
168 fmt::format(" Invalid j: "
169 "{}"
170 " max: "
171 "{}",
172 j,
173 dims_[1]));
174 OPM_ERROR_IF(!(k < dims_[2] && k >= 0),
175 fmt::format(" Invalid k: "
176 "{}"
177 " max: "
178 "{}",
179 k,
180 dims_[2]));
181
182 return data_[dims_[2] * (dims_[1] * i + j) + k];
183 }
184
185 const T& operator()(int i, int j, int k) const
186 {
187 OPM_ERROR_IF(dims_.size() != 3, "Invalid indexing for tensor");
188 OPM_ERROR_IF(!(i < dims_[0] && i >= 0),
189 fmt::format(" Invalid i: "
190 "{}"
191 " max: "
192 "{}",
193 i,
194 dims_[0]));
195 OPM_ERROR_IF(!(j < dims_[1] && j >= 0),
196 fmt::format(" Invalid j: "
197 "{}"
198 " max: "
199 "{}",
200 j,
201 dims_[1]));
202 OPM_ERROR_IF(!(k < dims_[2] && k >= 0),
203 fmt::format(" Invalid k: "
204 "{}"
205 " max: "
206 "{}",
207 k,
208 dims_[2]));
209
210 return data_[dims_[2] * (dims_[1] * i + j) + k];
211 }
212
213 T& operator()(int i, int j, int k, int l)
214 {
215 OPM_ERROR_IF(dims_.size() != 4, "Invalid indexing for tensor");
216 OPM_ERROR_IF(!(i < dims_[0] && i >= 0),
217 fmt::format(" Invalid i: "
218 "{}"
219 " max: "
220 "{}",
221 i,
222 dims_[0]));
223 OPM_ERROR_IF(!(j < dims_[1] && j >= 0),
224 fmt::format(" Invalid j: "
225 "{}"
226 " max: "
227 "{}",
228 j,
229 dims_[1]));
230 OPM_ERROR_IF(!(k < dims_[2] && k >= 0),
231 fmt::format(" Invalid k: "
232 "{}"
233 " max: "
234 "{}",
235 k,
236 dims_[2]));
237 OPM_ERROR_IF(!(l < dims_[3] && l >= 0),
238 fmt::format(" Invalid l: "
239 "{}"
240 " max: "
241 "{}",
242 l,
243 dims_[3]));
244
245 return data_[dims_[3] * (dims_[2] * (dims_[1] * i + j) + k) + l];
246 }
247
248 const T& operator()(int i, int j, int k, int l) const
249 {
250 OPM_ERROR_IF(dims_.size() != 4, "Invalid indexing for tensor");
251 OPM_ERROR_IF(!(i < dims_[0] && i >= 0),
252 fmt::format(" Invalid i: "
253 "{}"
254 " max: "
255 "{}",
256 i,
257 dims_[0]));
258 OPM_ERROR_IF(!(j < dims_[1] && j >= 0),
259 fmt::format(" Invalid j: "
260 "{}"
261 " max: "
262 "{}",
263 j,
264 dims_[1]));
265 OPM_ERROR_IF(!(k < dims_[2] && k >= 0),
266 fmt::format(" Invalid k: "
267 "{}"
268 " max: "
269 "{}",
270 k,
271 dims_[2]));
272 OPM_ERROR_IF(!(l < dims_[3] && l >= 0),
273 fmt::format(" Invalid l: "
274 "{}"
275 " max: "
276 "{}",
277 l,
278 dims_[3]));
279
280 return data_[dims_[3] * (dims_[2] * (dims_[1] * i + j) + k) + l];
281 }
282
283 void fill(const T& value)
284 {
285 std::fill(data_.begin(), data_.end(), value);
286 }
287
288 // Tensor addition
289 Tensor operator+(const Tensor& other)
290 {
291 OPM_ERROR_IF(dims_.size() != other.dims_.size(),
292 "Cannot add tensors with different dimensions");
293 Tensor result;
294 result.dims_ = dims_;
295 result.data_.resize(data_.size());
296
297 std::transform(data_.begin(),
298 data_.end(),
299 other.data_.begin(),
300 result.data_.begin(),
301 [](const T& x, const T& y) { return x + y; });
302
303 return result;
304 }
305
306 // Tensor multiplication
307 Tensor multiply(const Tensor& other)
308 {
309 OPM_ERROR_IF(dims_.size() != other.dims_.size(),
310 "Cannot multiply elements with different dimensions");
311
312 Tensor result;
313 result.dims_ = dims_;
314 result.data_.resize(data_.size());
315
316 std::transform(data_.begin(),
317 data_.end(),
318 other.data_.begin(),
319 result.data_.begin(),
320 [](const T& x, const T& y) { return x * y; });
321
322 return result;
323 }
324
325 // Tensor dot for 2d tensor
326 Tensor dot(const Tensor& other)
327 {
328 OPM_ERROR_IF(dims_.size() != 2, "Invalid tensor dimensions");
329 OPM_ERROR_IF(other.dims_.size() != 2, "Invalid tensor dimensions");
330
331 OPM_ERROR_IF(dims_[1] != other.dims_[0],
332 "Cannot multiply with different inner dimensions");
333
334 Tensor tmp(dims_[0], other.dims_[1]);
335
336 for (int i = 0; i < dims_[0]; i++) {
337 for (int j = 0; j < other.dims_[1]; j++) {
338 for (int k = 0; k < dims_[1]; k++) {
339 tmp(i, j) += (*this)(i, k) * other(k, j);
340 }
341 }
342 }
343
344 return tmp;
345 }
346
347 void swap(Tensor& other)
348 {
349 dims_.swap(other.dims_);
350 data_.swap(other.data_);
351 }
352
353 std::vector<int> dims_;
354 std::vector<T> data_;
355 };
356
357 // NN layer
358 // ---------------------
363 template <class Evaluation>
365 {
366 public:
367 NNLayer()
368 {
369 }
370
371 virtual ~NNLayer()
372 {
373 }
374
375 // Loads the ML trained file, returns true if the file exists
376 virtual bool loadLayer(std::ifstream& file) = 0;
377 // Apply the NN layers
378 virtual bool apply(const Tensor<Evaluation>& in, Tensor<Evaluation>& out) = 0;
379 };
380
384 template <class Evaluation>
385 class NNLayerActivation : public NNLayer<Evaluation>
386 {
387 public:
388 enum class ActivationType {
389 kLinear = 1,
390 kRelu = 2,
391 kSoftPlus = 3,
392 kSigmoid = 4,
393 kTanh = 5,
394 kHardSigmoid = 6
395 };
396
398 : activation_type_(ActivationType::kLinear)
399 {
400 }
401
402 bool loadLayer(std::ifstream& file) override;
403
404 bool apply(const Tensor<Evaluation>& in, Tensor<Evaluation>& out) override;
405
406 private:
407 ActivationType activation_type_;
408 };
409
413 template <class Evaluation>
414 class NNLayerScaling : public NNLayer<Evaluation>
415 {
416 public:
418 : data_min(1.0f)
419 , data_max(1.0f)
420 , feat_inf(1.0f)
421 , feat_sup(1.0f)
422 {
423 }
424
425 bool loadLayer(std::ifstream& file) override;
426
427 bool apply(const Tensor<Evaluation>& in, Tensor<Evaluation>& out) override;
428
429 private:
430 Tensor<float> weights_;
431 Tensor<float> biases_;
432 float data_min;
433 float data_max;
434 float feat_inf;
435 float feat_sup;
436 };
437
441 template <class Evaluation>
442 class NNLayerUnScaling : public NNLayer<Evaluation>
443 {
444 public:
446 : data_min(1.0f)
447 , data_max(1.0f)
448 , feat_inf(1.0f)
449 , feat_sup(1.0f)
450 {
451 }
452
453 bool loadLayer(std::ifstream& file) override;
454
455 bool apply(const Tensor<Evaluation>& in, Tensor<Evaluation>& out) override;
456
457 private:
458 Tensor<float> weights_;
459 Tensor<float> biases_;
460 float data_min;
461 float data_max;
462 float feat_inf;
463 float feat_sup;
464 };
465
469 template <class Evaluation>
470 class NNLayerDense : public NNLayer<Evaluation>
471 {
472 public:
473 bool loadLayer(std::ifstream& file) override;
474
475 bool apply(const Tensor<Evaluation>& in, Tensor<Evaluation>& out) override;
476
477 private:
478 Tensor<float> weights_;
479 Tensor<float> biases_;
480
482 };
483
487 template <class Evaluation>
488 class NNLayerEmbedding : public NNLayer<Evaluation>
489 {
490 public:
491 bool loadLayer(std::ifstream& file) override;
492
493 bool apply(const Tensor<Evaluation>& in, Tensor<Evaluation>& out) override;
494
495 private:
496 Tensor<float> weights_;
497 };
498
502 template <class Evaluation>
504 {
505 public:
506 enum class LayerType { kScaling = 1, kUnScaling = 2, kDense = 3, kActivation = 4 };
507
508 virtual ~NNModel() = default;
509
510 // loads models (.model files) generated by Kerasify
511 virtual bool loadModel(const std::string& filename);
512
513 virtual bool apply(const Tensor<Evaluation>& in, Tensor<Evaluation>& out);
514
515 private:
516 std::vector<std::unique_ptr<NNLayer<Evaluation>>> layers_;
517 };
518
522 {
523 public:
524 void start()
525 {
526 start_ = std::chrono::high_resolution_clock::now();
527 }
528
529 float stop()
530 {
531 std::chrono::time_point<std::chrono::high_resolution_clock> now
532 = std::chrono::high_resolution_clock::now();
533
534 std::chrono::duration<double> diff = now - start_;
535
536 return diff.count();
537 }
538
539 private:
540 std::chrono::time_point<std::chrono::high_resolution_clock> start_;
541 };
542
543} // namespace ML
544
545} // namespace Opm
546
547#endif // ML_MODEL_H_
A number of commonly used algebraic functions for the localized OPM automatic differentiation (AD) fr...
Definition ml_model.hpp:386
Definition ml_model.hpp:471
Definition ml_model.hpp:489
Definition ml_model.hpp:415
Definition ml_model.hpp:443
Definition ml_model.hpp:365
Definition ml_model.hpp:504
Definition ml_model.hpp:522
Implements mathematical tensor (Max 4d)
Definition ml_model.hpp:49
This class implements a small container which holds the transmissibility mulitpliers for all the face...
Definition Exceptions.hpp:30