fix: function stubs for compilation without AI surrogate

This commit is contained in:
straile 2024-10-11 12:12:38 +02:00
parent 84c86a85f5
commit f7d3a7ea65
2 changed files with 12 additions and 13 deletions

View File

@ -99,8 +99,9 @@ following available options:
- **POET_PREPROCESS_BENCHS**=*boolean* - enables the preprocessing of
predefined models/benchmarks. Defaults to *ON*.
- **USE_AI_SURROGATE**=*boolean* - includes the functions of the AI
surrogate model. This relies on the presence of a Python environment
where Keras is installed.
surrogate model. When active, CMake relies on `find_package()` to find
an a implementation of `Threads` and a Python environment where Numpy
and Keras need to be installed. Defaults to _OFF_.
### Example: Build from scratch
@ -235,7 +236,10 @@ mpirun -n 4 ./poet --dht --dht-snaps=2 barite_het_rt.R barite_het.rds output
### Example: Preparing Environment and Running with AI surrogate
To run the AI surrogate, you need to install the R package `keras3`. The
To run the AI surrogate, you need to have a Keras installed in your Python environment.
The implementation in POET is agnostic to the exact Keras version, but If you use a
pretrained model, the model file must match your Keras version. Using Keras 3 with
`.keras` model files is recommended. recomenden
compilation process of POET remains the same as shown above.
In the following code block, the installation process on the Turing Cluster is

View File

@ -14,13 +14,15 @@
#ifndef AI_FUNCTIONS_H
#define AI_FUNCTIONS_H
#include <string>
#include <vector>
// PhreeqC definition of pi clashes with Eigen macros so we have to temporarily undef it
#pragma push_macro("pi")
#undef pi
#include <Eigen/Dense>
#pragma pop_macro("pi")
#include <string>
namespace poet {
// Define an aligned allocator for std::vector
@ -48,8 +50,6 @@ int Python_Keras_load_model(std::string model_file_path);
std::vector<double> Python_Keras_predict(std::vector<std::vector<double>> x, int batch_size);
void Python_Keras_train(std::vector<std::vector<double>> x, std::vector<std::vector<double>> y, int batch_size);
int Python_Keras_training_thread(EigenModel* Eigen_model,
std::mutex* Eigen_model_mutex,
TrainingData* training_data_buffer,
@ -68,21 +68,16 @@ std::vector<double> Eigen_predict(const EigenModel& model, std::vector<std::vect
// Otherwise, define the necessary stubs
#else
inline void Python_Keras_setup(std::string functions_file_path){}
inline void Python_finalize(){};
inline void Python_finalize(){}
inline void Python_Keras_load_model(std::string model_file_path){}
inline std::vector<double> Python_Keras_predict(std::vector<std::vector<double>>, int){return {};}
inline void Python_Keras_train(vector<std::vector<double>>, vector<std::vector<double>>, int){}
inline int Python_Keras_training_thread(EigenModel*, std::mutex*,
TrainingData*, std::mutex*,
std::condition_variable*, bool*,
int, int, int, bool){return {};}
inline void Python_Keras_set_weights_as_Eigen(EigenModel&){}
inline EigenModel transform_weights(const std::vector<std::vector<std::vector<double>>>){return {};}
inline std::vector<std::vector<std::vector<double>>> Python_Keras_get_weights(){return {};}
inline std::vector<double> Eigen_predict(const EigenModel&, std::vector<std::vector<double>>, int){return {};}
#endif
} // namespace poet
#endif // AI_FUNCTIONS_HPP