Compare commits

...

261 Commits

Author SHA1 Message Date
Marco De Lucia
9d16b2e92d removed .ai dir from this branch 2025-11-14 10:58:43 +01:00
rastogi
aa76830c5e Stop tracking bin/dolo/ and update .gitignore 2025-11-13 16:00:47 +01:00
rastogi
51d78dbb85 updated .gitignore 2025-11-12 10:25:58 +01:00
rastogi
28bd24f9f6 Store per-cell MAPE/RRMSE and add stabilization interval 2025-11-12 10:20:28 +01:00
rastogi
6fbeaed12d Fix cell_ID access in DHT/interpolation and restructure control cell metric computation to use row-major layout 2025-11-07 15:55:26 +01:00
rastogi
6c5b86cccc feat: Add control logic for surrogate validation with selective PHREEQC re-execution, error metrics (MAPE/RRMSE), and automatic checkpoint/rollback 2025-11-06 20:55:02 +01:00
rastogi
1b2d942960 feat: bcast control_cell_ids to workers 2025-11-02 22:26:14 +01:00
rastogi
7c97f29fa6 Added ctrl_cell_ids 2025-10-30 21:36:03 +01:00
rastogi
8c388e3c6c Restructuring prototyp 3 2025-10-30 16:15:27 +01:00
rastogi
8a247082aa Add .gitignore 2025-10-30 16:14:22 +01:00
rastogi
104301dd5f Merge branch 'control-loop' of git.gfz-potsdam.de:naaice/poet into control-loop 2025-10-02 13:21:57 +02:00
rastogi
7467bbe50a feat(control): dynamic prototype, penalty_iteration, error while disabling surrogate fixed 2025-10-02 13:20:53 +02:00
Max Lübke
1af661527d feat(grid): enable cell_ID integration in chemistry data flow 2025-09-25 15:08:13 +02:00
rastogi
c10d35fabe rollback implemented, triggered when MAPE exceeds epsilon 2025-09-12 09:47:00 +02:00
Max Lübke
279a975359 cleanup computeStats() 2025-09-04 14:20:08 +02:00
Max Lübke
fbdec9f015 fix mssing handling of different storage locatios of work packages 2025-09-04 14:19:54 +02:00
Max Lübke
0d22cc2056 formatting MasterFunctions.cpp 2025-09-04 13:54:43 +02:00
Max Lübke
35f95430a8 stuff in computeStats 2025-09-02 13:05:47 +02:00
Max Lübke
d9026b1017 Add fgcs dolo benchmark 2025-09-02 12:04:38 +02:00
Max Lübke
d241db00b8 rename control work package and subsitute interpolated work package 2025-09-02 11:03:33 +02:00
rastogi
1e06113d64 Initial control-loop setup. Unit tests for MAPE and RMSE added.
Note: computeStats function is not working correctly yet.
2025-09-01 12:54:24 +02:00
rastogi
0cc0c9cdf6 computeStats not working correctly, Unit Tests added 2025-09-01 12:39:01 +02:00
Max Lübke
12d39ecb9b feat: Implement checkpointing
Co-authored-by: hmars-t <hmars-t@users.noreply.github.com>
2025-08-01 11:27:25 +02:00
rastogi
3002a798c7 Initial commit 2025-08-01 11:09:59 +02:00
Max Lübke
b80ee926da Merge branch 'ml/add-build-type-to-readme' into 'main'
Add CMake Build Type to Readme

See merge request naaice/poet!53
2025-05-20 14:29:00 +02:00
Max Lübke
9ab13ca547 Add CMake Build Type to Readme 2025-05-20 14:28:45 +02:00
Marco De Lucia
d169c5583b Merge branch '17-new-barite-benchmarks-for-fgcs-journal' into 'main'
Resolve "New barite benchmarks for FGCS journal"

Closes #17

See merge request naaice/poet!44
2025-05-19 12:50:30 +02:00
Max Lübke
fe8f2f1edf fix: Correct boolean conversion in DHT hook call 2025-03-12 10:35:23 +01:00
Hannes Martin Signer
36776a1863 Merge branch 'ml/add-tug-solver-option' into 'main'
Add option to toggle tug numerical solver approach

See merge request naaice/poet!50
2025-01-29 16:17:52 +01:00
Max Luebke
ae6066414b Add option to toggle tug numerical solver approach 2025-01-29 15:53:32 +01:00
Max Lübke
caffae2d54 refactor: clean up CMakeLists.txt by removing unnecessary target_sources indentation 2025-01-09 12:18:13 +01:00
Max Lübke
e603a5a995 feat: add index tracking and flushing functionality to DHT 2025-01-09 12:18:05 +01:00
Max Lübke
375a5cb38b Merge branch '17-new-barite-benchmarks-for-fgcs-journal' of git.gfz-potsdam.de:naaice/poet into 17-new-barite-benchmarks-for-fgcs-journal 2024-12-20 10:12:51 +01:00
Max Lübke
912ab21802 [wip] add input validation for work_package in DHT_Wrapper and InterpolationModule 2024-12-20 10:12:19 +01:00
Max Lübke
db2f6cc71e fix: update grid dimensions and refine species definitions in barite_fgcs_2.R 2024-12-20 10:08:07 +01:00
Marco De Lucia
ed27e0d4cc Fancier benchmark for fgcs paper 2024-12-20 10:08:07 +01:00
Max Lübke
75ca4bc74e Merge branch 'hotfix-qs-option' into 'main'
fix: remove duplicate flag for saving output as .qs file in parseInitValues

See merge request naaice/poet!49
2024-12-20 10:07:21 +01:00
Max Lübke
aa2cb259a5 fix: remove duplicate flag for saving output as .qs file in parseInitValues 2024-12-20 10:05:49 +01:00
Marco De Lucia
894cc256b2 Fancier benchmark for fgcs paper 2024-12-20 09:24:19 +01:00
Max Lübke
efaac15a79 Merge branch 'ml/fix-interpolation' into 'main'
Fix Interpolation and add optional features to POET benchmark description

See merge request naaice/poet!48
2024-12-20 09:23:44 +01:00
Max Lübke
c6910eb96e Merge branch 'main' into 'ml/fix-interpolation'
# Conflicts:
#   src/initializer.cpp
#   src/poet.cpp
#   src/poet.hpp.in
2024-12-20 09:22:55 +01:00
Max Lübke
b9e18a1059 fix: initialize default values for RuntimeParameters in poet.hpp.in 2024-12-20 09:21:18 +01:00
Max Lübke
cfea048fdb fix: update dht_species values for barite and celestite in benchmark scripts 2024-12-20 09:21:18 +01:00
Max Lübke
f0eacc0f40 Revert "feat: make output of H(0) and O(0) optional"
This reverts commit bdb5ce944fd001cb36c8369d8c792f2f4a54c7be.
2024-12-20 09:21:18 +01:00
Max Lübke
af5d2a7a70 fix: handle zero distance case in inverseDistanceWeighting to prevent division by zero 2024-12-20 09:21:18 +01:00
Max Lübke
e1d1a577bc feat: add with_h0_o0 and with_redox parameters to ChemistryModule and related classes 2024-12-20 09:21:18 +01:00
Max Luebke
b4c437e614 feat: add support for redox in PhreeqcMatrix initialization 2024-12-20 09:21:18 +01:00
Max Lübke
0e683d98ce feat: enhance LookupKey with const isnan method and comparison operator 2024-12-20 09:21:18 +01:00
Max Lübke
7ba9ddfa83 refactor: streamline WorkPackage constructor and improve DHT_Wrapper::fillDHT logic 2024-12-20 09:21:17 +01:00
Max Luebke
6a889b5364 feat: add dht_snaps and dht_out_dir parameters to ChemistryModule and main function 2024-12-20 09:21:17 +01:00
Max Luebke
034ec5d9f7 fix: initialize includeH0O0 to false in command line options 2024-12-20 09:21:17 +01:00
Max Luebke
50e856b28b feat: make output of H(0) and O(0) optional 2024-12-20 09:21:17 +01:00
Max Lübke
cd53f43bd2 feat: add has_het_ids parameter to DHT initialization and related functions 2024-12-20 09:21:17 +01:00
Max Lübke
5f56ce9e3f fix: use const reference for to_calc in InterpolationModule to improve performance 2024-12-20 09:21:17 +01:00
Max Lübke
1a680d8ca6 fix: remove double initialization of dht_enabled in ChemistryModule 2024-12-20 09:21:17 +01:00
Max Lübke
1b4691c172 feat: implement caching for interpolation calculations and add NaN handling 2024-12-20 09:21:16 +01:00
Max Luebke
07ac2bfd8d [wip] fix: add base_totals to SurrogateSetup 2024-12-20 09:21:16 +01:00
Marco De Lucia
3a164ed716 reverting <format> since gcc < 13 does not support it 2024-12-20 09:21:16 +01:00
Marco De Lucia
c4780fe9af Update references to .qs2 in README 2024-12-20 09:21:16 +01:00
Marco De Lucia
3d0a9041b5 Update README: qs2 as default output format, gfz.de everywhere 2024-12-20 09:21:16 +01:00
Marco De Lucia
54d6b6bbf2 Less and more informative stdout messages 2024-12-20 09:21:16 +01:00
Marco De Lucia
5493cf88a9 Added qs2 as new default format 2024-12-20 09:21:15 +01:00
Max Lübke
ff7cf31e10 Merge branch 'ml/gitlab-ci' into 'main'
Refactor CI pipeline by removing the build stage and adjusting test job dependencies

See merge request naaice/poet!47
2024-12-20 09:06:18 +01:00
Max Lübke
101f00f452 Refactor CI pipeline by removing the build stage and adjusting test job dependencies 2024-12-20 09:04:30 +01:00
Max Lübke
c0d3485edb Merge branch 'ml/fix-license' into 'main'
Revert "No code changes made."

See merge request naaice/poet!46
2024-12-20 09:00:27 +01:00
Max Lübke
426429f8a9 Update LICENSE to include updated copyright notice and additional guidelines for program distribution 2024-12-20 08:58:40 +01:00
Max Lübke
6714082646 Revert "No code changes made."
This reverts commit 8b35513b8824a31c21ab53c27ca2f89ce92ae36c.
2024-12-20 08:57:51 +01:00
Max Lübke
ea0c21c9da Merge branch 'ml/fix-license' into 'main'
Fix license parsing error of GitLab?

See merge request naaice/poet!45
2024-12-20 08:55:43 +01:00
Max Lübke
8b35513b88 No code changes made. 2024-12-20 08:46:51 +01:00
Max Lübke
9d7fa5457d Merge branch 'mdl/fgcsbench' into 'main'
Mdl/fgcsbench

See merge request naaice/poet!43
2024-12-13 12:19:57 +01:00
Marco De Lucia
f1b166145e reverting <format> since gcc < 13 does not support it 2024-12-13 12:14:55 +01:00
Marco De Lucia
fbd4739360 Update references to .qs2 in README 2024-12-13 12:14:55 +01:00
Marco De Lucia
ce5db73604 Update README: qs2 as default output format, gfz.de everywhere 2024-12-13 12:14:55 +01:00
Marco De Lucia
a714211560 Less and more informative stdout messages 2024-12-13 12:14:55 +01:00
Marco De Lucia
18725bf87d Added qs2 as new default format 2024-12-13 12:14:55 +01:00
Hannes Martin Signer
cc439e7b5e Merge branch 'update-scheme' into 'main'
update poet scheme

See merge request naaice/poet!42
2024-12-10 15:03:54 +01:00
Hannes Martin Signer
4d20b7c5bd Update 2 files
- /docs/POET_scheme.svg
- /docs/POET.drawio
2024-12-10 15:02:45 +01:00
Max Lübke
4737330786 Merge branch 'update-poet-scheme' into 'main'
update POET scheme

See merge request naaice/poet!41
2024-12-10 11:15:48 +01:00
Hannes Martin Signer
e40e6cb82f update POET scheme 2024-12-10 11:14:01 +01:00
Max Lübke
e920ab2811 Merge branch 'ml/iphreeqc-v0.3' into 'main'
Update IPhreeqc Submodule to v0.3

See merge request naaice/poet!40
2024-12-04 11:35:27 +01:00
Max Lübke
4d63544092 refactor: Replace PhreeqcEngine instances with PhreeqcRunner for improved chemistry processing 2024-12-04 10:34:13 +00:00
Max Lübke
c1ed926198 build: Enhance Dockerfile for improved environment setup and dependency management 2024-12-04 08:44:55 +00:00
Max Lübke
e31019e957 Merge branch 'ml/misc' into 'main'
Various changes to build files (CMake & devcontainer)

See merge request naaice/poet!38
2024-11-07 20:57:45 +01:00
Max Lübke
0ea5d70423 build: Update Dockerfile and devcontainer.json for enhanced environment setup 2024-11-07 19:56:04 +00:00
Max Lübke
c094aeaf39 build: Refactor R runtime detection in CMake to ensure required parameters are set 2024-11-07 19:56:04 +00:00
Max Lübke
909308b000 Merge branch 'ml/new-iphreeqc-api' into 'main'
Update to IPhreeqc v3.8.2

See merge request naaice/poet!37
2024-11-07 14:23:58 +01:00
Max Lübke
1f7b9f84cf fix: Update getSolutionNames call to remove unnecessary argument 2024-11-07 14:22:58 +01:00
Max Lübke
9650e59115 Update subproject commit in ext/iphreeqc 2024-11-07 14:22:51 +01:00
Marco De Lucia
85aeb04bc4 Merge branch 'delucia-readme-patch-68839' into 'main'
Fix: README.md

See merge request naaice/poet!36
2024-09-26 11:47:46 +02:00
Marco De Lucia
0573001758 Update README.md 2024-09-26 11:45:07 +02:00
Marco De Lucia
d603589375 Merge branch 'mdl/qs-rebased' into 'main'
rebased mdl/enable-qs to merge with main

See merge request naaice/poet!34
2024-09-16 13:07:43 +02:00
Max Luebke
57d05f5c76 fix: Corrected log10 calculation in master_iteration_end function 2024-09-16 12:05:19 +02:00
Max Luebke
66fb9e601c feat: use CLI11 as argument parser
feat: improve poet_initializer
2024-09-16 11:57:00 +02:00
Marco De Lucia
faf0ed20c3 fixed damn source_R 2024-09-12 16:15:21 +02:00
Marco De Lucia
8e7c5b1d60 cosmetic fixes in DiffusionInit.cpp 2024-09-12 14:57:11 +02:00
Marco De Lucia
6a8edd02bf fix: removed Time-stamp lines from README and CMake/ 2024-09-12 14:56:42 +02:00
Marco De Lucia
742b0babff fix: removed all Time-stamp lines from src/ 2024-09-12 14:56:09 +02:00
Marco De Lucia
7166cef64e fixing stuff and adding comments 2024-09-12 14:54:05 +02:00
Marco De Lucia
ee40abf68b Fixes in README and poet.cpp 2024-09-12 14:53:58 +02:00
Marco De Lucia
2d4f4021ad Update README.md 2024-09-12 14:51:44 +02:00
Max Lübke
76fc5bc0b4 refactor: Rework deferred R function evaluation
applied commit
fix: Unique pointer behaviour of `global_rt_setup` was messed up
2024-09-12 14:48:05 +02:00
Marco De Lucia
e72272764d fixed initializer. Format is given by extension in the -o argument 2024-09-12 14:42:59 +02:00
Marco De Lucia
5606a718db feat: fast serialization/storage using qs package via --qs flag
rebasing/merging
2024-09-12 14:42:24 +02:00
Marco De Lucia
d0c912e078 cosmetic fixes in DiffusionInit.cpp 2024-09-12 12:37:49 +02:00
Marco De Lucia
2e1ea7f9c5 fix: removed Time-stamp lines from README and CMake/
Solve conflict
2024-09-12 12:37:28 +02:00
Marco De Lucia
4be947e5f2 fix: removed all Time-stamp lines from src/ 2024-09-12 12:36:40 +02:00
Marco De Lucia
c39d40dc45 fixing stuff and adding comments 2024-09-12 12:36:40 +02:00
Marco De Lucia
f15eb084d0 Switched default bench init storage to qs 2024-09-12 12:36:40 +02:00
Marco De Lucia
2e115c865b Fixing rebase conflicts 2024-09-12 12:36:11 +02:00
Marco De Lucia
e7c0f6cc49 Update README.md 2024-09-12 11:40:03 +02:00
Max Lübke
eee1f0d689 refactor: Rework deferred R function evaluation
fix: Unique pointer behaviour of `global_rt_setup` was messed up
2024-09-12 11:40:03 +02:00
Marco De Lucia
d35a9a6d95 fixed initializer. Format is given by extension in the -o argument 2024-09-12 11:40:03 +02:00
Marco De Lucia
4090c0a78f feat: fast serialization/storage using qs package via --qs flag 2024-09-12 11:39:44 +02:00
Max Lübke
1210bdf487 Merge branch 'ml/chore' into 'main'
chore: Update README and cleanup

See merge request naaice/poet!33
2024-08-29 14:26:13 +02:00
Max Lübke
9119504dcb chore: remove preprocessed file from project structure 2024-08-29 14:25:37 +02:00
Max Lübke
23b4182a97 chore: Remove unnecessary code in FindRRuntime.cmake 2024-08-29 14:25:37 +02:00
Max Lübke
ee92c75330 doc: Update R package installation process for AI surrogate 2024-08-29 14:25:37 +02:00
Max Lübke
3d9e6ae13d Merge branch 'ml/fix-dht' into 'main'
fix: Error during DHT usage

See merge request naaice/poet!32
2024-08-29 08:47:45 +02:00
Max Lübke
1f9ffe999f Update subproject commit reference 2024-08-29 08:35:17 +02:00
Max Lübke
e25ebfffdb fix: distribute species names across all processes 2024-08-29 08:35:11 +02:00
hans
b125016dab Fix: merge issues 2024-06-05 16:43:33 +02:00
hans
c2ee1a7f04 Merge branch 'ai-surrogate-v03-temp' into ai_surrogate_merge 2024-06-05 16:02:30 +02:00
hans
df6127fbda Merge branch 'origin/ai-surrogate-v03-temp-mdl' into ai_surrogate_merge 2024-06-05 15:59:00 +02:00
hans
efea0d5c19 fix: reserve vs resize in Worker 2024-06-05 15:54:19 +02:00
hans
692c47781e fix: wp_start_index in Worker 2024-05-31 11:45:19 +02:00
hans
dad82f22ba fix: wp_start_index in Worker 2024-05-31 11:26:50 +02:00
Marco De Lucia
7f522157d1 MDL: AI model seems correctly updated/stored 2024-05-30 13:37:28 +02:00
hans
330fcc897c docs: updated graphic 2024-05-30 13:30:43 +02:00
hans
e8dabdcfe2 docs: updated graphic 2024-05-30 13:30:10 +02:00
hans
f452ee3d81 docs: updated graphic 2024-05-30 13:28:48 +02:00
Hans Straile
9bf3c058df Update 0230720_Scheme_POET_en.svg 2024-05-30 13:25:15 +02:00
hans
4b07880a06 docs: updated docs for v03 2024-05-30 13:20:50 +02:00
Marco De Lucia
b974b96d27 MDL: added barite_50ai bench 2024-05-30 11:37:31 +02:00
Marco De Lucia
d00369def7 MDL: some fixes and some more output to make AI run 2024-05-30 11:32:08 +02:00
hans
99d0b8c70d feat: Add AI Surrogate functions to V.03 2024-05-27 15:22:29 +02:00
hans
a55a7ae095 feat: Add AI Surrogate functions to V.03 2024-05-27 09:09:01 +02:00
Max Lübke
e3633b4485 Merge branch 'ml/fix-chemistry' into 'main'
Fix chemistry

See merge request naaice/poet!30
2024-05-07 12:30:13 +02:00
Max Lübke
365017d811 chore: Update subproject commit in ext/iphreeqc 2024-05-07 09:52:31 +00:00
Max Lübke
282282e8b6 fix: Correct logic for updating output values in WorkerRunWorkPackage 2024-05-07 08:34:18 +00:00
Max Lübke
c660d5f117 Merge branch 'ml/build' into 'main'
Update CMakeLists.txt to conditionally add the bench subdirectory based on the...

See merge request naaice/poet!29
2024-05-06 13:27:27 +02:00
Max Lübke
9ac57682f1 Update CMakeLists.txt to conditionally add the bench subdirectory based on the POET_PREPROCESS_BENCHS option 2024-05-06 11:26:13 +00:00
Max Lübke
9addc002a2 Merge branch 'ml/ci' into 'main'
Refactor CI/CD pipeline

See merge request naaice/poet!28
2024-05-06 13:17:40 +02:00
Max Lübke
41fbd44c52 Refactor CI/CD pipeline 2024-05-06 13:17:39 +02:00
Max Lübke
8cab722ae3 Merge branch 'ml/surfex' into 'main'
Enable Surfex/Exchange reactants using IPhreeqc/POET API

See merge request naaice/poet!27
2024-05-06 12:11:50 +02:00
Max Lübke
0992143be5 Update Readme 2024-05-06 10:09:28 +00:00
Max Lübke
a12ac2c3d5 Add EGU debug model 2024-05-06 10:09:28 +00:00
Max Lübke
2b69da0c5d BREAKING CHANGE: Enable Surface/Exchange using new API of PhreeqcEngine 2024-05-06 10:01:57 +00:00
Max Luebke
e7d9afd971 Fix build process to only produce benchmarks when needed 2024-04-15 09:10:11 +00:00
Max Lübke
c101564ee8 Fix grid size assignment in GridInit.cpp 2024-04-12 12:35:43 +00:00
Max Lübke
546f6fadbe Refactor R_lib/kin_r_library.R to use setup$maxiter instead of iter for calculating max digits 2024-04-10 10:24:00 +00:00
Max Lübke
47a6a14100 Remove doctest submodule 2024-04-10 10:24:00 +00:00
Max Lübke
b3fd383674 Fix diffusion and chem field update order in RunMasterLoop 2024-04-10 10:24:00 +00:00
Max Lübke
32f7f5af70 Refactor process to output as DataFrames 2024-04-10 10:24:00 +00:00
Max Lübke
721388ae12 Refactor R package dependencies in Dockerfile 2024-04-09 08:09:47 +00:00
Max Luebke
41d00fd079 Add minimal flag to importList function in InitialList 2024-04-08 20:47:38 +00:00
Max Luebke
830e139122 Refactor Field.cpp to use Rcpp DataFrame for conversion to SEXP 2024-04-08 20:31:36 +00:00
Max Luebke
5182b6aa20 Refactor build file generation 2024-04-08 20:31:36 +00:00
Max Luebke
d53db0d5a5 Refactor benchmark files to current POET input expectations 2024-04-08 20:31:36 +00:00
Max Luebke
d5e4b03339 Refactor command line argument handling and improve script sourcing in initializer.cpp 2024-04-08 12:49:58 +00:00
Max Luebke
89cc239583 Refactor initializer.cpp to handle command line arguments and improve script sourcing 2024-04-08 12:02:47 +00:00
Max Luebke
fb62fd5a7f Refactor code for grid creation and result storage 2024-04-08 11:27:06 +00:00
Max Luebke
05d6170943 Fix inner_boundaries handling in DiffusionModule and InitialList 2024-04-08 10:32:00 +00:00
Max Luebke
18fd280f99 Update minimum required CMake version to 3.14 2024-04-08 10:31:47 +00:00
Max Luebke
2c7fc089cc Refactor code according to static analyzers 2024-04-08 09:19:48 +00:00
Max Luebke
9a7b6f0f24 Add dolo_inner benchmark 2024-04-05 08:34:41 +00:00
Max Luebke
e57f7923dd Move old benchmark files into old subdir
Move new benchmark files from `het` folder
2024-04-05 08:33:26 +00:00
Max Luebke
07cf952cee Add inner_boundaries handling in DiffusionModule and InitialList 2024-04-05 08:31:32 +00:00
Max Luebke
2ebbb48e4b Update .gitignore to ignore .codechecker directory 2024-04-05 07:05:09 +00:00
Max Luebke
e891774d6c Refactor code for grid creation and result storage 2024-04-04 09:27:52 +00:00
Max Luebke
3add44ce25 Add functions for checking sign and negativity in dol.pqi and dolo_200.R 2024-04-04 09:27:27 +00:00
Max Luebke
9978016854 Add file extension replacement and print output filename 2024-04-03 21:19:39 +00:00
Max Luebke
8097723d8e Update Dockerfile with required dependencies and install R packages 2024-04-03 21:03:16 +00:00
Max Luebke
6d7666abbc Refactor R functions and how they are called 2024-04-03 21:03:10 +00:00
Max Lübke
9eeb1502e1 Update iterations and dt values in dolo_200_rt.R 2024-04-03 16:46:50 +02:00
Max Luebke
0cf6d65128 Add parallel grid creation function and update pqc_to_grid function 2024-04-03 14:25:04 +00:00
Max Luebke
dd5c14aa88 Fix initialization of transport_names in ChemistryInit and GridInit 2024-04-03 14:25:04 +00:00
Max Luebke
7655376f89 Update solution parameters in dol.pqi 2024-04-03 14:25:04 +00:00
Max Luebke
dc3502685c Add support for missing species in init grid, which are injected (by boundary condition) 2024-04-03 14:25:04 +00:00
Max Luebke
7be00983e5 Update iphreeqc subproject commit 2024-04-03 10:47:33 +00:00
Max Luebke
d389347ad3 Update chem.GetField() to chem.getField() in poet.cpp 2024-04-03 10:47:32 +00:00
Max Luebke
465a6595b0 Update runtime and test scripts 2024-04-03 10:46:54 +00:00
Max Lübke
9fb97aeda9 Update units in dol.pqi 2024-04-03 10:59:31 +02:00
Max Lübke
72079543db Update GIT_SUBMODULE_STRATEGY in .gitlab-ci.yml 2024-04-03 10:10:02 +02:00
Max Lübke
efc1bda554 Update Calcite and Dolomite parameters in dol.pqi 2024-04-03 10:08:37 +02:00
Max Lübke
5f0dc14802 Update out_save in dolo_200_rt.R 2024-04-03 10:08:02 +02:00
Max Luebke
4784fc3e12 Refactor to wrap everything in main function into scope, to ensure DHT is freed before MPI_FInalize 2024-04-02 20:47:32 +00:00
Max Luebke
b5dedb3422 Remove SimParams class 2024-04-02 20:38:23 +00:00
Max Luebke
9b38cd2dc1 Add check_sign_cal_dol_dht and fuzz_input_dht_keys functions 2024-04-02 20:35:28 +00:00
Max Luebke
e380e8fd19 Refactor WorkerRunWorkPackage to copy input instead of overwriting 2024-04-02 20:24:04 +00:00
Max Luebke
90287ab445 Refactor chemistry module and RHookFunction 2024-04-02 20:23:42 +00:00
Max Luebke
f2e8f6023d Update iteration end message in kin_r_library.R 2024-04-02 14:37:20 +00:00
Max Luebke
bda9993bcb Add profiling data for DHT and interpolation 2024-04-02 14:17:59 +00:00
Max Luebke
47aa43d1e1 Add DHT functionality, still need to be validated 2024-04-02 14:14:19 +00:00
Max Luebke
082a758f18 Add chemistry initialization to InitialList class 2024-04-02 12:06:55 +00:00
Max Luebke
bd4d7a38ca Add dolo_200_rt.R, dol.pqi, and dolo_200.R files for simulation setup 2024-04-02 11:41:38 +00:00
Max Luebke
fb725f0b50 Update dependencies and refactor code 2024-04-02 11:41:38 +00:00
Max Luebke
f184fb7962 Remove doctest submodule and update gitmodules 2024-04-02 10:33:56 +00:00
Max Luebke
20d0eb52ae Update CMakeLists.txt and include headers in test files 2024-04-02 09:25:18 +00:00
Max Luebke
53f68a2e56 Remove unnecessary includes and update function signature 2024-04-02 09:16:58 +00:00
Max Lübke
a345191d5a Update dependencies and refactor code 2024-03-28 14:23:20 +00:00
Max Lübke
38a631f0ab Add Basic heterogeneous diffusion functionality 2024-03-27 20:34:48 +00:00
Max Luebke
d3632156a5 Add total_grid_cells to ChemistryInit struct 2024-03-21 22:33:40 +00:00
Max Lübke
756988da31 Add support for importing and exporting initial grid in InitialList class 2024-03-21 21:36:09 +00:00
Max Lübke
94a4f71038 Add Init/CMakeLists.txt and ChemistryInit.cpp files, and update initializer.cpp and InitialList.hpp 2024-03-20 23:46:57 +00:00
Max Lübke
4e86e7f86d Update include statement and add CMakeLists.txt for DataStructures 2024-03-20 22:33:23 +00:00
Max Luebke
ce2f9e237f Update grid and diffusion setup in test.R 2024-03-19 22:03:22 +00:00
Max Lübke
bbeff06083 Add resolvePqcBound function and refactor parseBoundaries2D function 2024-03-19 15:37:29 +00:00
Max Lübke
64ec44034c Update alpha matrix parsing 2024-03-19 14:07:25 +00:00
Max Lübke
1d24e670e5 Refactor initialization code for diffusion and grid 2024-03-19 13:39:59 +00:00
Max Lübke
b6a5a7a6a2 Add file reading functionality and update script and database handling 2024-03-19 13:11:42 +00:00
Max Lübke
14411ac319 Refactor grid initialization and update member access in InitialList 2024-03-19 12:32:16 +00:00
Max Lübke
15d9b06516 Update getModuleSizes function to use new API 2024-03-19 09:48:31 +00:00
Max Lübke
822e069492 Refactor pqc_to_grid function to use matrix instead of data.table 2024-03-19 09:47:40 +00:00
Max Luebke
f832a9e0db Save latest changes, boundaries still missing or incomplete 2024-03-19 08:12:20 +00:00
Max Luebke
87de816f93 Add modify_module_sizes function and modify module_sizes in InitialList class 2024-03-13 23:50:13 +01:00
Max Luebke
62ad47952b Update function name to replaceRawKeywordIDs in GridInit.cpp 2024-03-13 22:06:07 +01:00
Max Luebke
2a0dc93778 Update RAW Keywords to always refer to ID 1 2024-03-13 22:05:06 +01:00
Max Luebke
f7a4bcd0aa Update CMakeLists.txt and add new files for barite_het simulation 2024-03-13 17:11:53 +01:00
Max Luebke
ec555c879c Remove installation of poet binary 2024-03-13 16:58:37 +01:00
Max Luebke
3380eb4a8f Add iphreeqc and init_r_lib submodules, and make necessary changes to CMakeLists.txt and src/initializer.cpp 2024-03-13 16:57:44 +01:00
Max Lübke
27e99052b2 Add apps directory and initializer application 2024-03-07 13:54:53 +00:00
Max Lübke
805f674043 Merge branch 'v0.x' into 'main'
Fix path in GitLab CI

See merge request naaice/poet!25
2024-03-07 14:43:03 +01:00
Max Lübke
2612bf04ed Fix path in GitLab CI script 2024-03-07 13:41:50 +00:00
Max Lübke
ff6cf66be1 Update dependencies in ci.Dockerfile 2024-03-07 13:41:34 +00:00
Max Lübke
9d2c839a3d Merge branch 'v0.x' into 'main'
CI fixes

See merge request naaice/poet!24
2024-03-07 14:40:15 +01:00
Max Lübke
2c5c6dd166 Fix build of pages 2024-03-07 13:39:27 +00:00
Max Lübke
99a62420e7 Add Dockerfile for CI environment 2024-03-07 13:39:14 +00:00
Max Lübke
f2ae204ddc Merge branch 'v0.x' into 'main'
Preparation for EuroPar24 contribution

See merge request naaice/poet!23
2024-03-06 14:37:39 +01:00
Max Lübke
1c170d687b Merge remote-tracking branch 'origin/main' into v0.x 2024-03-06 13:36:36 +00:00
Max Lübke
7181465835 Update doxygen configuration 2024-03-06 12:25:30 +00:00
Max Lübke
a3683f0459 Add Dockerfile and devcontainer.json for VS Code development environment 2024-03-06 12:25:17 +00:00
Max Lübke
263f23534d Remove R_lib subdirectory from CMakeLists.txt 2024-03-06 12:24:07 +00:00
Max Lübke
66d64a5772 Merge branch 'include_scripts' into 'v0.x'
Add R library file as string to executable

See merge request naaice/poet!21
2024-03-06 12:34:49 +01:00
Max Lübke
ab83d8c350 Add R library file as string to executable 2024-03-06 12:34:02 +01:00
Max Lübke
f542c5638b Merge branch 'restructure' into 'v0.x'
Refactor build process

See merge request naaice/poet!20
2024-03-06 12:03:49 +01:00
Max Lübke
0fa0aa699b Remove unused CMakeLists.txt file for SurrogateModels 2024-03-06 12:03:02 +01:00
Max Lübke
ffb4fe6bc6 Add OMPI_SKIP_MPICXX to target_compile_definitions in CMakeLists.txt 2024-03-06 12:01:19 +01:00
Max Lübke
a2ebc3f1d7 Update CMakeLists.txt and include headers in test files 2024-03-06 11:59:18 +01:00
Max Lübke
7abf987123 Add Macros.hpp and RInsidePOET.hpp***
***Add RInsidePOET class and RHookFunction template***
***Update include paths in SimParams.hpp***
***Update CMakeLists.txt to link poetlib***
***Update DHT_Wrapper.hpp include path***
***Update DiffusionModule.cpp include path***
***Update poet.cpp include paths***
***Add poet.hpp.in
2024-03-06 11:59:12 +01:00
Max Lübke
fbff818e48 Remove unused 'app' directory 2024-03-06 11:32:45 +01:00
Max Lübke
62412e8d87 Refactor build process
Now include/app paths are combined in the src dir.
2024-03-06 11:26:52 +01:00
Max Lübke
e5ed6f9f87 Fix broken Dolo large benchmark
Update surfex to a larger grid size
2024-03-01 12:57:42 +01:00
Max Lübke
ee0bd9167c Update initial and boundary conditions for dolo large model 2024-02-26 12:34:42 +01:00
Max Lübke
caa54ebf5d Add .vscode to .gitignore 2024-02-22 09:47:05 +01:00
Marco De Lucia
0b14de480b fix: updated bench/barite/{barite.R, barite_interp_eval.R}, added bench/barite/barite_200.R 2024-01-12 12:50:54 +01:00
Max Lübke
433f095f58 Update file dolo_interp_long.R 2023-11-21 16:25:50 +01:00
Max Lübke
75702bb351 docs: improve main SVG 2023-11-01 11:01:38 +01:00
Max Lübke
bb12c1bb81 feat: instead of always storing exact input and output make it dependent
on the usage of interpolation
2023-11-01 10:55:46 +01:00
Max Lübke
384f5bd336 feat: make storage of input values in DHT optional 2023-09-08 22:43:52 +02:00
Max Lübke
5cd517510f fix: output DHT size in megabyte 2023-09-07 09:12:44 +02:00
Marco De Lucia
2b896fa070 docs: README for surfex bench 2023-08-26 18:39:50 +02:00
Marco De Lucia
e9ce4eeb1d fix: stdout time for diffusion step. Fixes in READMEs 2023-08-26 17:34:21 +02:00
Marco De Lucia
bd5be576a7 docs: readmes for dolo & barite 2023-08-26 17:22:22 +02:00
Marco De Lucia
f0c9c3ea71 ICs, BCs, fixes 2023-08-26 14:24:26 +02:00
Marco De Lucia
b86931bffe refs, eqs and text 2023-08-26 13:55:06 +02:00
Marco De Lucia
dbb25dc0af more eqs and text 2023-08-26 13:41:00 +02:00
Marco De Lucia
a8d23a9563 eqs and text 2023-08-26 13:10:17 +02:00
Marco De Lucia
44c6e45fef some fixes 2023-08-26 12:50:12 +02:00
Marco De Lucia
3c530a68d0 Revert "Update file README.org"
This reverts commit 79ef735be72bfa3461efbef9c1b2b80af0d177f6
2023-08-26 12:49:31 +02:00
Max Lübke
751da30399 Merge branch 'remove-prop-type' into 'main'
refactor: remove DHT prop type from input script

See merge request naaice/poet!17
2023-07-21 17:25:12 +02:00
Max Luebke
2469e0fa23 refactor: remove DHT prop type from input script 2023-07-21 17:24:25 +02:00
237 changed files with 27654 additions and 6120 deletions

108
.devcontainer/Dockerfile Normal file
View File

@ -0,0 +1,108 @@
FROM gcc:11.2.0 AS builder
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update \
&& apt-get install -y \
sudo \
git \
ninja-build \
libmpfr-dev \
python3-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
WORKDIR /tmp
ARG OPENMPI_VERSION=4.1.1
ADD https://download.open-mpi.org/release/open-mpi/v${OPENMPI_VERSION%.*}/openmpi-${OPENMPI_VERSION}.tar.gz /tmp/openmpi.tar.gz
RUN mkdir openmpi && \
tar xf openmpi.tar.gz -C openmpi --strip-components 1 && \
cd openmpi && \
./configure --prefix=/usr/local && \
make -j $(nproc) && \
make install && \
rm -rf /tmp/openmpi tmp/openmpi.tar.gz
ARG CMAKE_VERSION=3.30.5
ADD https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-linux-x86_64.sh /tmp/cmake.sh
RUN bash ./cmake.sh --skip-license --prefix=/usr/local \
&& rm cmake.sh
ARG LAPACK_VERSION=3.12.0
ADD https://github.com/Reference-LAPACK/lapack/archive/refs/tags/v${LAPACK_VERSION}.tar.gz /tmp/lapack.tar.gz
RUN mkdir lapack && \
tar xf lapack.tar.gz -C lapack --strip-components 1 && \
cd lapack && \
mkdir build && \
cd build && \
cmake .. -G Ninja -DBUILD_SHARED_LIBS=ON && \
ninja install && \
rm -rf /tmp/lapack tmp/lapack.tar.gz
ARG R_VERSION=4.4.2
ADD https://cran.r-project.org/src/base/R-${R_VERSION%%.*}/R-${R_VERSION}.tar.gz /tmp/R.tar.gz
RUN mkdir R && \
tar xf R.tar.gz -C R --strip-components 1 && \
cd R && \
./configure --prefix=/usr/local --enable-R-shlib --with-blas --with-lapack && \
make -j $(nproc) && \
make install && \
rm -rf /tmp/R tmp/R.tar.gz
RUN /usr/local/bin/R -q -e "install.packages(c('Rcpp', 'RInside', 'qs'), repos='https://cran.rstudio.com/')"
ARG EIGEN_VERSION=3.4.0
ADD https://gitlab.com/libeigen/eigen/-/archive/${EIGEN_VERSION}/eigen-${EIGEN_VERSION}.tar.bz2 /tmp/eigen.tar.bz2
RUN mkdir eigen && \
tar xf eigen.tar.bz2 -C eigen --strip-components 1 && \
cd eigen && \
mkdir build && \
cd build && \
cmake .. -G Ninja && \
ninja install && \
rm -rf /tmp/eigen tmp/eigen.tar.bz2
ARG GDB_VERSION=15.2
ADD https://ftp.gnu.org/gnu/gdb/gdb-${GDB_VERSION}.tar.xz /tmp/gdb.tar.xz
RUN mkdir gdb && \
tar xf gdb.tar.xz -C gdb --strip-components 1 && \
cd gdb && \
./configure --prefix=/usr/local && \
make -j $(nproc) && \
make install && \
rm -rf /tmp/gdb tmp/gdb.tar.xz
RUN useradd -m -s /bin/bash -G sudo vscode \
&& echo "vscode ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers
USER vscode
ENV LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH
RUN sudo apt-get update && \
sudo apt-get install -y zsh && \
sudo apt-get clean && \
sudo rm -rf /var/lib/apt/lists/*
RUN sh -c "$(wget -O- https://github.com/deluan/zsh-in-docker/releases/download/v1.2.1/zsh-in-docker.sh)" -- \
-t agnoster \
-p zsh-syntax-highlighting
RUN zsh -c "git clone https://github.com/zsh-users/zsh-syntax-highlighting.git ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-syntax-highlighting"
RUN zsh -c "git clone --depth 1 https://github.com/junegunn/fzf.git ~/.fzf && ~/.fzf/install"
RUN mkdir -p /home/vscode/.config/gdb \
&& echo "set auto-load safe-path /" > /home/vscode/.config/gdb/gdbinit
ENV CMAKE_GENERATOR=Ninja
ENV CMAKE_EXPORT_COMPILE_COMMANDS=ON
WORKDIR /home/vscode

View File

@ -0,0 +1,29 @@
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
// README at: https://github.com/devcontainers/templates/tree/main/src/docker-existing-dockerfile
{
"build": {
"dockerfile": "Dockerfile"
},
// Features to add to the dev container. More info: https://containers.dev/features.
// "features": {},
// Use 'forwardPorts' to make a list of ports inside the container available locally.
// "forwardPorts": [],
// Uncomment the next line to run commands after the container is created.
// "postCreateCommand": "cat /etc/os-release",
// Configure tool-specific properties.
"customizations": {
"vscode": {
"extensions": [
"twxs.cmake",
"llvm-vs-code-extensions.vscode-clangd"
]
}
},
// in case you want to push/pull from remote repositories using ssh
"mounts": [
"source=${localEnv:HOME}/.ssh,target=/home/vscode/.ssh,type=bind,consistency=cached",
"source=${localEnv:HOME}/.gitconfig,target=/home/vscode/.gitconfig,type=bind,consistency=cached"
]
// Uncomment to connect as an existing user other than the container default. More info: https://aka.ms/dev-containers-non-root.
// "remoteUser": "devcontainer"
}

22
.gitignore vendored
View File

@ -141,3 +141,25 @@ vignettes/*.pdf
build/
/.cache/
.vscode
.codechecker
# Prevent upload of local installations
bin/*
share/
lib/
include/
# But keep these specific files
!bin/plot/
!bin/barite_fgcs_3.pqi
!bin/barite_fgcs_4_rt.R
!bin/barite_fgcs_4.R
!bin/barite_fgcs_4.qs2
!bin/db_barite.dat
!bin/dolo_fgcs_3.qs2
!bin/dolo_fgcs_3.R
!bin/dolo_fgcs_3.pqi
!bin/phreeqc_kin.dat
!bin/run_poet.shbin/dolo/

View File

@ -19,7 +19,6 @@
image: git.gfz-potsdam.de:5000/naaice/poet:ci
stages: # List of stages for jobs, and their order of execution
- build
- release
- test
@ -28,89 +27,77 @@ variables:
SOURCE_ARCHIVE_NAME: 'poet_${CI_COMMIT_TAG}_sources.tar.gz'
CHANGELOG_FILE: 'commit_changelog.md'
build-poet: # This job runs in the build stage, which runs first.
stage: build
script:
- mkdir build && cd build
- cmake -DPOET_ENABLE_TESTING=ON ..
- make -j$(nproc)
artifacts:
paths:
- build
expire_in: 1 day
test-poet:
test: # This job runs in the build stage, which runs first.
stage: test
dependencies:
- build-poet
script:
- cd build
- mkdir -p build && cd build
- cmake -DPOET_ENABLE_TESTING=ON -DPOET_PREPROCESS_BENCHS=OFF -DCMAKE_BUILD_TYPE=Release ..
- make -j$(nproc) check
archive-sources: # This job runs in the build stage, which runs first.
image: python:3
stage: release
before_script:
- pip install git-archive-all
- echo ARCHIVE_JOB_ID=${CI_JOB_ID} >> archives.env
script:
- git-archive-all ${SOURCE_ARCHIVE_NAME}
artifacts:
paths:
- ${SOURCE_ARCHIVE_NAME}
expire_in: never
reports:
dotenv: archives.env
rules:
- if: $CI_COMMIT_TAG
release-description:
image: golang:bullseye
stage: release
rules:
- if: $CI_COMMIT_TAG
before_script:
- go install github.com/git-chglog/git-chglog/cmd/git-chglog@v0.15.2
script:
- git-chglog -o ${CHANGELOG_FILE} ${CI_COMMIT_TAG}
artifacts:
paths:
- ${CHANGELOG_FILE}
release-create:
stage: release
image: registry.gitlab.com/gitlab-org/release-cli:latest
rules:
- if: $CI_COMMIT_TAG
script:
- echo "Running release job"
needs:
- job: archive-sources
artifacts: true
- job: release-description
artifacts: true
release:
tag_name: $CI_COMMIT_TAG
name: 'POET $CI_COMMIT_TAG'
description: ${CHANGELOG_FILE}
assets:
links:
- name: '${SOURCE_ARCHIVE_NAME}'
url: 'https://git.gfz-potsdam.de/naaice/poet/-/jobs/${ARCHIVE_JOB_ID}/artifacts/file/${SOURCE_ARCHIVE_NAME}'
pages:
stage: release
before_script:
- apt-get update && apt-get install -y doxygen graphviz
- mkdir {build,public}
- mkdir {build_pages,public}
script:
- pushd build
- pushd build_pages
- cmake .. && make doxygen
- popd && mv build/docs/html/* public/
- popd && mv build_pages/docs/html/* public/
artifacts:
paths:
- public
rules:
- if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH
- if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH || $CI_COMMIT_TAG
#archive-sources: # This job runs in the build stage, which runs first.
# image: python:3
# stage: release
#
# before_script:
# - pip install git-archive-all
# - echo ARCHIVE_JOB_ID=${CI_JOB_ID} >> archives.env
# script:
# - git-archive-all ${SOURCE_ARCHIVE_NAME}
# artifacts:
# paths:
# - ${SOURCE_ARCHIVE_NAME}
# expire_in: never
# reports:
# dotenv: archives.env
# rules:
# - if: $CI_COMMIT_TAG
#release-description:
# image: golang:bullseye
# stage: release
# rules:
# - if: $CI_COMMIT_TAG
# before_script:
# - go install github.com/git-chglog/git-chglog/cmd/git-chglog@v0.15.2
# script:
# - git-chglog -o ${CHANGELOG_FILE} ${CI_COMMIT_TAG}
# artifacts:
# paths:
# - ${CHANGELOG_FILE}
#
#
#release-create:
# stage: release
# image: registry.gitlab.com/gitlab-org/release-cli:latest
# rules:
# - if: $CI_COMMIT_TAG
# script:
# - echo "Running release job"
# needs:
# - job: archive-sources
# artifacts: true
# - job: release-description
# artifacts: true
# release:
# tag_name: $CI_COMMIT_TAG
# name: 'POET $CI_COMMIT_TAG'
# description: ${CHANGELOG_FILE}
# assets:
# links:
# - name: '${SOURCE_ARCHIVE_NAME}'
# url: 'https://git.gfz-potsdam.de/naaice/poet/-/jobs/${ARCHIVE_JOB_ID}/artifacts/file/${SOURCE_ARCHIVE_NAME}'

9
.gitmodules vendored
View File

@ -2,9 +2,6 @@
path = ext/tug
url = ../tug.git
[submodule "ext/phreeqcrm"]
path = ext/phreeqcrm
url = ../phreeqcrm-gfz.git
[submodule "ext/doctest"]
path = ext/doctest
url = https://github.com/doctest/doctest.git
[submodule "ext/iphreeqc"]
path = ext/iphreeqc
url = ../iphreeqc.git

View File

@ -1,30 +1,27 @@
# prepare R environment (Rcpp + RInside)
find_program(R_EXE "R")
find_program(R_EXE "R"
REQUIRED
)
# search for R executable, R header file and library path
if(R_EXE)
execute_process(
COMMAND ${R_EXE} RHOME
OUTPUT_VARIABLE R_ROOT_DIR
OUTPUT_STRIP_TRAILING_WHITESPACE
)
execute_process(
COMMAND ${R_EXE} RHOME
OUTPUT_VARIABLE R_ROOT_DIR
OUTPUT_STRIP_TRAILING_WHITESPACE
)
find_path(
R_INCLUDE_DIR R.h
HINTS ${R_ROOT_DIR}
PATHS /usr/include /usr/local/include /usr/share
PATH_SUFFIXES include/R R/include
)
find_path(
R_INCLUDE_DIR R.h
HINTS /usr/include /usr/local/include /usr/share ${R_ROOT_DIR}/include
PATH_SUFFIXES R/include R
REQUIRED
)
find_library(
R_LIBRARY libR.so
HINTS ${R_ROOT_DIR}/lib
)
else()
message(FATAL_ERROR "No R runtime found!")
endif()
mark_as_advanced(R_INCLUDE_DIR R_LIBRARY R_EXE)
find_library(
R_LIBRARY libR.so
HINTS ${R_ROOT_DIR}/lib
REQUIRED
)
set(R_LIBRARIES ${R_LIBRARY})
set(R_INCLUDE_DIRS ${R_INCLUDE_DIR})
@ -45,8 +42,6 @@ find_path(R_Rcpp_INCLUDE_DIR Rcpp.h
HINTS ${RCPP_PATH}
PATH_SUFFIXES include)
mark_as_advanced(R_Rcpp_INCLUDE_DIR)
list(APPEND R_INCLUDE_DIRS ${R_Rcpp_INCLUDE_DIR})
# find RInside libraries and include path
@ -72,16 +67,11 @@ find_path(R_RInside_INCLUDE_DIR RInside.h
list(APPEND R_LIBRARIES ${R_RInside_LIBRARY})
list(APPEND R_INCLUDE_DIRS ${R_RInside_INCLUDE_DIR})
mark_as_advanced(R_RInside_LIBRARY R_RInside_INCLUDE_DIR)
# putting all together into interface library
add_library(RRuntime INTERFACE IMPORTED)
set_target_properties(
RRuntime PROPERTIES
INTERFACE_LINK_LIBRARIES "${R_LIBRARIES}"
INTERFACE_INCLUDE_DIRECTORIES "${R_INCLUDE_DIRS}"
)
target_link_libraries(RRuntime INTERFACE ${R_LIBRARIES})
target_include_directories(RRuntime INTERFACE ${R_INCLUDE_DIRS})
unset(R_LIBRARIES)
unset(R_INCLUDE_DIRS)

View File

@ -13,7 +13,7 @@ macro(get_POET_version)
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
OUTPUT_VARIABLE POET_GIT_VERSION
OUTPUT_STRIP_TRAILING_WHITESPACE)
if(NOT POET_GIT_BRANCH STREQUAL "master")
if(NOT POET_GIT_BRANCH STREQUAL "main")
set(POET_VERSION "${POET_GIT_BRANCH}/${POET_GIT_VERSION}")
else()
set(POET_VERSION "${POET_GIT_VERSION}")
@ -21,7 +21,7 @@ macro(get_POET_version)
elseif(EXISTS ${PROJECT_SOURCE_DIR}/.svn)
file(STRINGS .gitversion POET_VERSION)
else()
set(POET_VERSION "0.1")
set(POET_VERSION "not_versioned")
endif()
message(STATUS "Configuring POET version ${POET_VERSION}")

View File

@ -1,12 +1,11 @@
# Version 3.9+ offers new MPI package variables
cmake_minimum_required(VERSION 3.9)
cmake_minimum_required(VERSION 3.14)
project(POET
LANGUAGES CXX C
DESCRIPTION "A coupled reactive transport simulator")
# specify the C++ standard
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD 20)
set(CMAKE_CXX_STANDARD_REQUIRED True)
set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
@ -16,27 +15,27 @@ list(APPEND CMAKE_MODULE_PATH "${POET_SOURCE_DIR}/CMake")
get_poet_version()
# set(GCC_CXX_FLAGS "-D STRICT_R_HEADERS") add_definitions(${GCC_CXX_FLAGS})
find_package(MPI REQUIRED)
find_package(RRuntime REQUIRED)
add_subdirectory(src)
add_subdirectory(R_lib)
add_subdirectory(app)
add_subdirectory(bench)
option(POET_PREPROCESS_BENCHS "Preprocess benchmarks" ON)
if (POET_PREPROCESS_BENCHS)
add_subdirectory(bench)
endif()
# as tug will also pull in doctest as a dependency
set(TUG_ENABLE_TESTING OFF CACHE BOOL "" FORCE)
add_subdirectory(ext/tug EXCLUDE_FROM_ALL)
add_subdirectory(ext/phreeqcrm EXCLUDE_FROM_ALL)
add_subdirectory(ext/iphreeqc EXCLUDE_FROM_ALL)
option(POET_ENABLE_TESTING "Build test suite for POET" OFF)
if (POET_ENABLE_TESTING)
add_subdirectory(ext/doctest EXCLUDE_FROM_ALL)
add_subdirectory(test)
endif()

62
LICENSE
View File

@ -1,8 +1,8 @@
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc., <http://fsf.org/>
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
<https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
@ -278,3 +278,61 @@ PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) year name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
`Gnomovision' (which makes passes at compilers) written by James Hacker.
<signature of Moe Ghoul>, 1 April 1989
Moe Ghoul, President of Vice
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License.

329
README.md
View File

@ -1,77 +1,103 @@
<!--
Time-stamp: "Last modified 2023-08-02 13:55:11 mluebke"
-->
# POET
[POET](https://doi.org/10.5281/zenodo.4757913) is a coupled reactive transport
simulator implementing a parallel architecture and a fast, original MPI-based
Distributed Hash Table.
[POET](https://doi.org/10.5281/zenodo.4757913) is a coupled reactive
transport simulator implementing a parallel architecture and a fast,
original MPI-based Distributed Hash Table.
![POET's Coupling Scheme](./docs/20230720_Scheme_POET_en.svg)
![POET's Coupling Scheme](./docs/POET_scheme.svg)
## Parsed code documentiation
A parsed version of POET's documentiation can be found at [Gitlab
pages](https://naaice.git-pages.gfz-potsdam.de/poet).
A parsed version of POET's documentation can be found at [Gitlab
pages](https://naaice.git-pages.gfz.de/poet).
## External Libraries
The following external header library is shipped with POET:
The following external libraries are shipped with POET:
- **argh** - https://github.com/adishavit/argh (BSD license)
- **PhreeqcRM** with patches from GFZ -
https://www.usgs.gov/software/phreeqc-version-3 -
https://git.gfz-potsdam.de/mluebke/phreeqcrm-gfz
- **tug** - https://git.gfz-potsdam.de/sec34/tug
- **CLI11** - <https://github.com/CLIUtils/CLI11>
- **IPhreeqc** with patches from GFZ/UP -
<https://github.com/usgs-coupled/iphreeqc> -
<https://git.gfz.de/naaice/iphreeqc>
- **tug** - <https://git.gfz.de/naaice/tug>
## Installation
### Requirements
To compile POET you need several software to be installed:
To compile POET you need following software to be installed:
- C/C++ compiler (tested with GCC)
- MPI-Implementation (tested with OpenMPI and MVAPICH)
- R language and environment
- CMake 3.9+
- *optional*: `doxygen` with `dot` bindings for documentiation
- Eigen3 3.4+ (required by `tug`)
- *optional*: `doxygen` with `dot` bindings for documentation
- R language and environment including headers or `-dev` packages
(distro dependent)
The following R libraries must then be installed, which will get the
needed dependencies automatically:
The following R packages (and their dependencies) must also be
installed:
- [Rcpp](https://cran.r-project.org/web/packages/Rcpp/index.html)
- [RInside](https://cran.r-project.org/web/packages/RInside/index.html)
- [qs](https://cran.r-project.org/web/packages/qs/index.html)
- [qs2](https://cran.r-project.org/web/packages/qs2/index.html)
This can be simply achieved by issuing the following commands:
```sh
# start R environment
$ R
# install R dependencies (case sensitive!)
> install.packages(c("Rcpp", "RInside","qs","qs2"))
> q(save="no")
```
### Clone the repository
POET can be anonimously cloned from this repo over https. Make sure to
also download the submodules:
```sh
git clone --recurse-submodules https://git.gfz.de/naaice/poet.git
```
The `--recurse-submodules` option is a shorthand for:
```sh
cd poet
git submodule init && git submodule update
```
### Compiling source code
The generation of makefiles is done with CMake. You should be able to generate
Makefiles by running:
POET is built with CMake. You can generate Makefiles by running the
usual:
```sh
mkdir build && cd build
cmake ..
cmake -DCMAKE_BUILD_TYPE=Release ..
```
This will create the directory `build` and processes the CMake files
and generate Makefiles from it. You're now able to run `make` to start
build process.
If everything went well you'll find the executable at
`build/app/poet`, but it is recommended to install the POET project
If everything went well you'll find the executables at
`build/src/poet`, but it is recommended to install the POET project
structure to a desired `CMAKE_INSTALL_PREFIX` with `make install`.
During the generation of Makefiles, various options can be specified
via `cmake -D <option>=<value> [...]`. Currently, there are the
following available options:
- **POET_DHT_Debug**=_boolean_ - toggles the output of detailed statistics about
DHT usage. Defaults to _OFF_.
- **POET_ENABLE_TESTING**=_boolean_ - enables small set of unit tests (more to
come). Defaults to _OFF_.
- **POET_PHT_ADDITIONAL_INFO**=_boolean_ - enabling the count of accesses to one
PHT bucket. Use with caution, as things will get slowed down significantly.
Defaults to _OFF_.
- **POET_DHT_Debug**=_boolean_ - toggles the output of detailed
statistics about DHT usage. Defaults to _OFF_.
- **POET_ENABLE_TESTING**=_boolean_ - enables small set of unit tests
(more to come). Defaults to _OFF_.
- **POET_PHT_ADDITIONAL_INFO**=_boolean_ - enabling the count of
accesses to one PHT bucket. Use with caution, as things will get
slowed down significantly. Defaults to _OFF_.
- **POET_PREPROCESS_BENCHS**=*boolean* - enables the preprocessing of
predefined models/benchmarks. Defaults to *ON*.
### Example: Build from scratch
@ -84,7 +110,7 @@ follows:
$ R
# install R dependencies
> install.packages(c("Rcpp", "RInside"))
> install.packages(c("Rcpp", "RInside","qs","qs2"))
> q(save="no")
# cd into POET project root
@ -92,7 +118,7 @@ $ cd <POET_dir>
# Build process
$ mkdir build && cd build
$ cmake -DCMAKE_INSTALL_PREFIX=/home/<user>/poet ..
$ cmake -DCMAKE_INSTALL_PREFIX=/home/<user>/poet -DCMAKE_BUILD_TYPE=Release ..
$ make -j<max_numprocs>
$ make install
```
@ -107,66 +133,64 @@ The correspondending directory tree would look like this:
```sh
poet
├── bin
│ └── poet
├── R_lib
│ └── kin_r_library.R
│   ├── poet
│   └── poet_init
└── share
└── poet
└── bench
├── barite
│ ├── barite_interp_eval.R
│ ├── barite.pqi
│ ├── barite.R
│ └── db_barite.dat
├── dolo
│ ├── dolo_diffu_inner_large.R
│ ├── dolo_diffu_inner.R
│ ├── dolo_inner.pqi
│ ├── dolo_interp_long.R
│ └── phreeqc_kin.dat
└── surfex
├── ExBase.pqi
├── ex.R
├── SMILE_2021_11_01_TH.dat
├── SurfExBase.pqi
└── surfex.R
├── barite
│   ├── barite_200.qs2
│   ├── barite_200_rt.R
│   ├── barite_het.qs2
│   └── barite_het_rt.R
├── dolo
│   ├── dolo_inner_large.qs2
│   ├── dolo_inner_large_rt.R
│   ├── dolo_interp.qs2
│   └── dolo_interp_rt.R
└── surfex
├── PoetEGU_surfex_500.qs2
└── PoetEGU_surfex_500_rt.R
```
The R libraries will be loaded at runtime and the paths are hardcoded
absolute paths inside `poet.cpp`. So, if you consider to move
`bin/poet` either change paths of the R source files and recompile
POET or also move `R_lib/*` relative to the binary.
With the installation of POET, two executables are provided:
- `poet` - the main executable to run simulations
- `poet_init` - a preprocessor to generate input files for POET from
R scripts
The benchmarks consist of input scripts, which are provided as .R files.
Additionally, Phreeqc scripts and their corresponding databases are required,
stored as .pqi and .dat files, respectively.
Preprocessed benchmarks can be found in the `share/poet` directory
with an according *runtime* setup. More on those files and how to
create them later.
## Running
Run POET by `mpirun ./poet <OPTIONS> <SIMFILE> <OUTPUT_DIRECTORY>`
where:
Run POET by `mpirun ./poet [OPTIONS] <RUNFILE> <SIMFILE>
<OUTPUT_DIRECTORY>` where:
- **OPTIONS** - runtime parameters (explained below)
- **SIMFILE** - simulation described as R script (e.g.
`<POET_INSTALL_DIR>/share/poet/bench/dolo/dolo_interp_long.R`)
- **OUTPUT_DIRECTORY** - path, where all output of POET should be stored
- **OPTIONS** - POET options (explained below)
- **RUNFILE** - Runtime parameters described as R script
- **SIMFILE** - Simulation input prepared by `poet_init`
- **OUTPUT_DIRECTORY** - path, where all output of POET should be
stored
### Runtime options
### POET command line arguments
The following parameters can be set:
| Option | Value | Description |
|-----------------------------|--------------|--------------------------------------------------------------------------------------------------------------------------|
| **--work-package-size=** | _1..n_ | size of work packages (defaults to _5_) |
| **--ignore-result** | | disables store of simulation resuls |
| **--dht** | | enabling DHT usage (defaults to _OFF_) |
| **--dht-strategy=** | _0-1_ | change DHT strategy. **NOT IMPLEMENTED YET** (Defaults to _0_) |
| **--dht-size=** | _1-n_ | size of DHT per process involved in megabyte (defaults to _1000 MByte_) |
| **--dht-snaps=** | _0-2_ | disable or enable storage of DHT snapshots |
| **--dht-file=** | `<SNAPSHOT>` | initializes DHT with the given snapshot file |
| **--interp-size** | _1-n_ | size of PHT (interpolation) per process in megabyte |
| **--interp-bucket-entries** | _1-n_ | number of entries to store at maximum in one PHT bucket |
| **--interp-min** | _1-n_ | number of entries in PHT bucket needed to start interpolation |
| Option | Value | Description |
|-----------------------------|--------------|----------------------------------------------------------------------------------|
| **--work-package-size=** | _1..n_ | size of work packages (defaults to _5_) |
| **-P, --progress** | | show progress bar |
| **--ai-surrogate** | | activates the AI surrogate chemistry model (defaults to _OFF_) |
| **--dht** | | enabling DHT usage (defaults to _OFF_) |
| **--qs** | | store results using qs::qsave() (.qs extension) instead of default qs2 (.qs2) |
| **--rds** | | store results using saveRDS() (.rds extension) instead of default qs2 (.qs2) |
| **--dht-strategy=** | _0-1_ | change DHT strategy. **NOT IMPLEMENTED YET** (Defaults to _0_) |
| **--dht-size=** | _1-n_ | size of DHT per process involved in megabyte (defaults to _1000 MByte_) |
| **--dht-snaps=** | _0-2_ | disable or enable storage of DHT snapshots |
| **--dht-file=** | `<SNAPSHOT>` | initializes DHT with the given snapshot file |
| **--interp-size** | _1-n_ | size of PHT (interpolation) per process in megabyte |
| **--interp-bucket-entries** | _1-n_ | number of entries to store at maximum in one PHT bucket |
| **--interp-min** | _1-n_ | number of entries in PHT bucket needed to start interpolation |
#### Additions to `dht-snaps`
@ -181,28 +205,106 @@ Following values can be set:
### Example: Running from scratch
We will continue the above example and start a simulation with
`dolo_diffu_inner.R`. As transport a simple fixed-coefficient diffusion is used.
It's a 2D, 100x100 grid, simulating 10 time steps. To start the simulation with
4 processes `cd` into your previously installed POET-dir
`<POET_INSTALL_DIR>/bin` and run:
*barite_het*, which simulation files can be found in
`<INSTALL_DIR>/share/poet/barite/barite_het*`. As transport a
heterogeneous diffusion is used. It's a small 2D grid, 2x5 grid,
simulating 50 time steps with a time step size of 100 seconds. To
start the simulation with 4 processes `cd` into your previously
installed POET-dir `<POET_INSTALL_DIR>/bin` and run:
```sh
mpirun -n 4 ./poet ../share/poet/bench/dolo/dolo_diffu_inner.R/ output
cp ../share/poet/barite/barite_het* .
mpirun -n 4 ./poet barite_het_rt.R barite_het.qs2 output
```
After a finished simulation all data generated by POET will be found
in the directory `output`.
You might want to use the DHT to cache previously simulated data and reuse them
in further time-steps. Just append `--dht` to the options of POET to activate
the usage of the DHT. Also, after each iteration a DHT snapshot shall be
produced. This is done by appending the `--dht-snaps=<value>` option. The
resulting call would look like this:
You might want to use the DHT to cache previously simulated data and
reuse them in further time-steps. Just append `--dht` to the options
of POET to activate the usage of the DHT. Also, after each iteration a
DHT snapshot shall be produced. This is done by appending the
`--dht-snaps=<value>` option. The resulting call would look like this:
```sh
mpirun -n 4 ./poet --dht --dht-snaps=2 ../share/poet/bench/dolo/dolo_diffu_inner.R/ output
mpirun -n 4 ./poet --dht --dht-snaps=2 barite_het_rt.R barite_het.qs2 output
```
### Example: Preparing Environment and Running with AI surrogate
To run the AI surrogate, you need to install the R package `keras3`. The
compilation process of POET remains the same as shown above.
In the following code block, the installation process on the Turing Cluster is
shown. `miniconda` is used to create a virtual environment to install
tensorflow/keras. Please adapt the installation process to your needs.
<!-- Start an R interactive session and install the required packages: -->
```sh
# First, install the required R packages
R -e "install.packages('keras3', repos='https://cloud.r-project.org/')"
# manually create a virtual environment to install keras/python using conda,
# as this is somehow broken on the Turing Cluster when using the `keras::install_keras()` function
cd poet
# create a virtual environment in the .ai directory with python 3.11
conda create -p ./.ai python=3.11
conda activate ./.ai
# install tensorflow and keras
pip install keras tensorflow[and-cuda]
# add conda's python path to the R environment
# make sure to have the conda environment activated
echo -e "RETICULATE_PYTHON=$(which python)\n" >> ~/.Renviron
```
After setup the R environment, recompile POET and you're ready to run the AI
surrogate.
```sh
cd <installation_dir>/bin
# copy the benchmark files to the installation directory
cp <project_root_dir>/bench/barite/{barite_50ai*,db_barite.dat,barite.pqi} .
# preprocess the benchmark
./poet_init barite_50ai.R
# run POET with AI surrogate and GPU utilization
srun --gres=gpu -N 1 -n 12 ./poet --ai-surrogate barite_50ai_rt.R barite_50ai.qs2 output
```
Keep in mind that the AI surrogate is currently not stable or might also not
produce any valid predictions.
## Defining a model
In order to provide a model to POET, you need to setup a R script
which can then be used by `poet_init` to generate the simulation
input. Which parameters are required can be found in the
[Wiki](https://git.gfz.de/naaice/poet/-/wikis/Initialization).
We try to keep the document up-to-date. However, if you encounter
missing information or need help, please get in touch with us via the
issue tracker or E-Mail.
`poet_init` can be used as follows:
```sh
./poet_init [-o, --output output_file] [-s, --setwd] <script.R>
```
where:
- **output** - name of the output file (defaults to the input file
name with the extension `.qs2`)
- **setwd** - set the working directory to the directory of the input
file (e.g. to allow relative paths in the input script). However,
the output file will be stored in the directory from which
`poet_init` was called.
## About the usage of MPI_Wtime()
Implemented time measurement functions uses `MPI_Wtime()`. Some
@ -211,3 +313,44 @@ important information from the OpenMPI Man Page:
For example, on platforms that support it, the clock_gettime()
function will be used to obtain a monotonic clock value with whatever
precision is supported on that platform (e.g., nanoseconds).
## Additional functions for the AI surrogate
The AI surrogate can be activated for any benchmark and is by default
initiated as a sequential keras model with three hidden layer of depth
48, 96, 24 with relu activation and adam optimizer. All functions in
`ai_surrogate_model.R` can be overridden by adding custom definitions
via an R file in the input script. This is done by adding the path to
this file in the input script. Simply add the path as an element
called `ai_surrogate_input_script` to the `chemistry_setup` list.
Please use the global variable `ai_surrogate_base_path` as a base path
when relative filepaths are used in custom funtions.
**There is currently no default implementation to determine the
validity of predicted values.** This means, that every input script
must include an R source file with a custom function
`validate_predictions(predictors, prediction)`. Examples for custom
functions can be found for the barite_200 benchmark
The functions can be defined as follows:
`validate_predictions(predictors, prediction)`: Returns a boolean
index vector that signals for each row in the predictions if the
values are considered valid. Can eg. be implemented as a mass balance
threshold between the predictors and the prediction.
`initiate_model()`: Returns a keras model. Can be used to load
pretrained models.
`preprocess(df, backtransform = FALSE, outputs = FALSE)`: Returns the
scaled/transformed/backtransformed dataframe. The `backtransform` flag
signals if the current processing step is applied to data that's
assumed to be scaled and expects backtransformed values. The `outputs`
flag signals if the current processing step is applied to the output
or tatget of the model. This can be used to eg. skip these processing
steps and only scale the model input.
`training_step (model, predictor, target, validity)`: Trains the model
after each iteration. `validity` is the bool index vector given by
`validate_predictions` and can eg. be used to only train on values
that have not been valid predictions.

View File

@ -1 +0,0 @@
install(FILES kin_r_library.R DESTINATION R_lib)

View File

@ -0,0 +1,75 @@
## This file contains default function implementations for the ai surrogate.
## To load pretrained models, use pre-/postprocessing or change hyperparameters
## it is recommended to override these functions with custom implementations via
## the input script. The path to the R-file containing the functions mus be set
## in the variable "ai_surrogate_input_script". See the barite_200.R file as an
## example and the general README for more information.
require(keras3)
require(tensorflow)
initiate_model <- function() {
hidden_layers <- c(48, 96, 24)
activation <- "relu"
loss <- "mean_squared_error"
input_length <- length(ai_surrogate_species)
output_length <- length(ai_surrogate_species)
## Creates a new sequential model from scratch
model <- keras_model_sequential()
## Input layer defined by input data shape
model %>% layer_dense(units = input_length,
activation = activation,
input_shape = input_length,
dtype = "float32")
for (layer_size in hidden_layers) {
model %>% layer_dense(units = layer_size,
activation = activation,
dtype = "float32")
}
## Output data defined by output data shape
model %>% layer_dense(units = output_length,
activation = activation,
dtype = "float32")
model %>% compile(loss = loss,
optimizer = "adam")
return(model)
}
gpu_info <- function() {
msgm(tf_gpu_configured())
}
prediction_step <- function(model, predictors) {
prediction <- predict(model, as.matrix(predictors))
colnames(prediction) <- colnames(predictors)
return(as.data.frame(prediction))
}
preprocess <- function(df, backtransform = FALSE, outputs = FALSE) {
return(df)
}
postprocess <- function(df, backtransform = TRUE, outputs = TRUE) {
return(df)
}
set_valid_predictions <- function(temp_field, prediction, validity) {
temp_field[validity == 1, ] <- prediction[validity == 1, ]
return(temp_field)
}
training_step <- function(model, predictor, target, validity) {
msgm("Training:")
x <- as.matrix(predictor)
y <- as.matrix(target[colnames(x)])
model %>% fit(x, y)
model %>% save_model_tf(paste0(out_dir, "/current_model.keras"))
}

115
R_lib/init_r_lib.R Normal file
View File

@ -0,0 +1,115 @@
### Copyright (C) 2018-2024 Marco De Lucia, Max Luebke (GFZ Potsdam, University of Potsdam)
###
### POET is free software; you can redistribute it and/or modify it under the
### terms of the GNU General Public License as published by the Free Software
### Foundation; either version 2 of the License, or (at your option) any later
### version.
###
### POET is distributed in the hope that it will be useful, but WITHOUT ANY
### WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
### A PARTICULAR PURPOSE. See the GNU General Public License for more details.
###
### You should have received a copy of the GNU General Public License along with
### this program; if not, write to the Free Software Foundation, Inc., 51
### Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
##' @param pqc_mat matrix, containing IDs and PHREEQC outputs
##' @param grid matrix, zonation referring to pqc_mat$ID
##' @return a data.frame
# pqc_to_grid <- function(pqc_mat, grid) {
# # Convert the input DataFrame to a matrix
# pqc_mat <- as.matrix(pqc_mat)
# # Flatten the matrix into a vector
# id_vector <- as.integer(t(grid))
# # Find the matching rows in the matrix
# row_indices <- match(id_vector, pqc_mat[, "ID"])
# # Extract the matching rows from pqc_mat to size of grid matrix
# result_mat <- pqc_mat[row_indices, ]
# # Convert the result matrix to a data frame
# res_df <- as.data.frame(result_mat)
# # Remove all columns which only contain NaN
# res_df <- res_df[, colSums(is.na(res_df)) != nrow(res_df)]
# # Remove row names
# rownames(res_df) <- NULL
# return(res_df)
# }
##' @param pqc_mat matrix, containing IDs and PHREEQC outputs
##' @param grid matrix, zonation referring to pqc_mat$ID
##' @return a data.frame
pqc_to_grid <- function(pqc_mat, grid) {
# Convert the input DataFrame to a matrix
pqc_mat <- as.matrix(pqc_mat)
# Flatten the matrix into a vector
id_vector <- as.integer(t(grid))
# Find the matching rows in the matrix
row_indices <- match(id_vector, pqc_mat[, "ID"])
# Extract the matching rows from pqc_mat to size of grid matrix
result_mat <- pqc_mat[row_indices, ]
# Convert the result matrix to a data frame
res_df <- as.data.frame(result_mat)
# Add cell_ID column to beginning of res_df
res_df <- cbind(cell_ID = seq(0, nrow(res_df) - 1), res_df)
# Remove all columns which only contain NaN
# res_df <- res_df[, colSums(is.na(res_df)) != nrow(res_df)]
# Remove row names
rownames(res_df) <- NULL
return(res_df)
}
##' @param pqc_mat matrix,
##' @param transport_spec column name of species in pqc_mat
##' @param id
##' @title
##' @return
resolve_pqc_bound <- function(pqc_mat, transport_spec, id) {
df <- as.data.frame(pqc_mat, check.names = FALSE)
value <- df[df$ID == id, transport_spec]
if (is.nan(value)) {
value <- 0
}
return(value)
}
##' @title
##' @param init_grid
##' @param new_names
##' @return
add_missing_transport_species <- function(init_grid, new_names) {
# add 'ID' to new_names front, as it is not a transport species but required
new_names <- c("ID", new_names)
sol_length <- length(new_names)
new_grid <- data.frame(matrix(0, nrow = nrow(init_grid), ncol = sol_length))
names(new_grid) <- new_names
matching_cols <- intersect(names(init_grid), new_names)
# Copy matching columns from init_grid to new_grid
new_grid[, matching_cols] <- init_grid[, matching_cols]
# Add missing columns to new_grid
append_df <- init_grid[, !(names(init_grid) %in% new_names)]
new_grid <- cbind(new_grid, append_df)
return(new_grid)
}

View File

@ -1,5 +1,3 @@
## Time-stamp: "Last modified 2023-08-15 11:58:23 delucia"
### Copyright (C) 2018-2023 Marco De Lucia, Max Luebke (GFZ Potsdam)
###
### POET is free software; you can redistribute it and/or modify it under the
@ -15,279 +13,199 @@
### this program; if not, write to the Free Software Foundation, Inc., 51
### Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
## Simple function to check file extension. It is needed to check if
## the GridFile is SUM (MUFITS format) or rds/RData
FileExt <- function(x) {
pos <- regexpr("\\.([[:alnum:]]+)$", x)
ifelse(pos > -1L, substring(x, pos + 1L), "")
}
master_init <- function(setup) {
msgm("Process with rank 0 reading GRID properties")
master_init <- function(setup, out_dir, init_field) {
## Setup the directory where we will store the results
verb <- FALSE
if (local_rank == 0) {
verb <- TRUE ## verbosity loading MUFITS results
if (!dir.exists(fileout)) {
dir.create(fileout)
msgm("created directory ", fileout)
} else {
msgm("dir ", fileout, " already exists, I will overwrite!")
}
if (!exists("store_result")) {
msgm("store_result doesn't exist!")
} else {
msgm("store_result is ", store_result)
}
if (!dir.exists(out_dir)) {
dir.create(out_dir)
msgm("created directory ", out_dir)
} else {
msgm("dir ", out_dir, " already exists, I will overwrite!")
}
if (is.null(setup$store_result)) {
msgm("store_result doesn't exist!")
} else {
msgm("store_result is ", setup$store_result)
}
setup$iter <- 1
setup$maxiter <- setup$iterations
setup$timesteps <- setup$timesteps
setup$maxiter <- length(setup$timesteps)
setup$iterations <- setup$maxiter
setup$simulation_time <- 0
dgts <- as.integer(ceiling(log10(setup$maxiter)))
## string format to use in sprintf
fmt <- paste0("%0", dgts, "d")
if (is.null(setup[["store_result"]])) {
setup$store_result <- TRUE
}
if (setup$store_result) {
init_field_out <- paste0(out_dir, "/iter_", sprintf(fmt = fmt, 0), ".", setup$out_ext)
init_field <- data.frame(init_field, check.names = FALSE)
SaveRObj(x = init_field, path = init_field_out)
msgm("Stored initial field in ", init_field_out)
if (is.null(setup[["out_save"]])) {
setup$out_save <- seq(1, setup$iterations)
}
}
setup$out_dir <- out_dir
return(setup)
}
## This function, called only by master, stores on disk the last
## calculated time step if store_result is TRUE and increments the
## iteration counter
master_iteration_end <- function(setup) {
master_iteration_end <- function(setup, state_T, state_C) {
iter <- setup$iter
# print(iter)
## max digits for iterations
dgts <- as.integer(ceiling(log10(setup$iterations + 1)))
## string format to use in sprintf
dgts <- as.integer(ceiling(log10(setup$maxiter + 1)))
## string format to use in sprintf
fmt <- paste0("%0", dgts, "d")
## Write on disk state_T and state_C after every iteration
## comprised in setup$out_save
if (setup$store_result) {
if (iter %in% setup$out_save) {
nameout <- paste0(fileout, "/iter_", sprintf(fmt=fmt, iter), ".rds")
info <- list(
tr_req_dt = as.integer(setup$req_dt)
## tr_allow_dt = setup$allowed_dt,
## tr_inniter = as.integer(setup$inniter)
nameout <- paste0(setup$out_dir, "/iter_", sprintf(fmt = fmt, iter), ".", setup$out_ext)
state_T <- data.frame(state_T, check.names = FALSE)
state_C <- data.frame(state_C, check.names = FALSE)
ai_surrogate_info <- list(
prediction_time = if (exists("ai_prediction_time")) as.integer(ai_prediction_time) else NULL,
training_time = if (exists("ai_training_time")) as.integer(ai_training_time) else NULL,
valid_predictions = if (exists("validity_vector")) validity_vector else NULL
)
saveRDS(list(
T = setup$state_T, C = setup$state_C,
simtime = as.integer(setup$simtime),
tr_info = info
), file = nameout)
SaveRObj(x = list(
T = state_T,
C = state_C,
simtime = as.integer(setup$simulation_time),
totaltime = as.integer(totaltime),
ai_surrogate_info = ai_surrogate_info
), path = nameout)
msgm("results stored in <", nameout, ">")
}
}
msgm("done iteration", iter, "/", setup$maxiter)
## Add last time step to simulation time
setup$simulation_time <- setup$simulation_time + setup$timesteps[iter]
## msgm("done iteration", iter, "/", length(setup$timesteps))
setup$iter <- setup$iter + 1
return(setup)
}
## function for the workers to compute chemistry through PHREEQC
slave_chemistry <- function(setup, data) {
base <- setup$base
first <- setup$first
prop <- setup$prop
immobile <- setup$immobile
kin <- setup$kin
ann <- setup$ann
iter <- setup$iter
timesteps <- setup$timesteps
dt <- timesteps[iter]
state_T <- data ## not the global field, but the work-package
## treat special H+/pH, e-/pe cases
state_T <- RedModRphree::Act2pH(state_T)
## reduction of the problem
if (setup$reduce) {
reduced <- ReduceStateOmit(state_T, omit = setup$ann)
} else {
reduced <- state_T
}
## form the PHREEQC input script for the current work package
inplist <- SplitMultiKin(
data = reduced, procs = 1, base = base, first = first,
ann = ann, prop = prop, minerals = immobile, kin = kin, dt = dt
)
## if (local_rank==1 & iter==1)
## RPhreeWriteInp("FirstInp", inplist)
tmpC <- RunPQC(inplist, procs = 1, second = TRUE)
## recompose after the reduction
if (setup$reduce) {
state_C <- RecomposeState(tmpC, reduced)
} else {
state_C <- tmpC
}
## the next line is needed since we don't need all columns of
## PHREEQC output
return(state_C[, prop])
}
## This function, called by master
master_chemistry <- function(setup, data) {
state_T <- setup$state_T
msgm(" chemistry iteration", setup$iter)
## treat special H+/pH, e-/pe cases
state_T <- RedModRphree::Act2pH(state_T)
## reduction of the problem
if (setup$reduce) {
reduced <- ReduceStateOmit(state_T, omit = setup$ann)
} else {
reduced <- state_T
}
## inject data from workers
res_C <- data
rownames(res_C) <- NULL
## print(res_C)
if (nrow(res_C) > nrow(reduced)) {
res_C <- res_C[seq(2, nrow(res_C), by = 2), ]
}
## recompose after the reduction
if (setup$reduce) {
state_C <- RecomposeState(res_C, reduced)
} else {
state_C <- res_C
}
setup$state_C <- state_C
setup$reduced <- reduced
return(setup)
}
## Adapted version for "reduction"
ReduceStateOmit <- function(data, omit = NULL, sign = 6) {
require(mgcv)
rem <- colnames(data)
if (is.list(omit)) {
indomi <- match(names(omit), colnames(data))
datao <- data[, -indomi]
} else {
datao <- data
}
datao <- signif(datao, sign)
red <- mgcv::uniquecombs(datao)
inds <- attr(red, "index")
now <- ncol(red)
## reattach the omitted column(s)
## FIXME: control if more than one ann is present
if (is.list(omit)) {
red <- cbind(red, rep(data[1, indomi], nrow(red)))
colnames(red)[now + 1] <- names(omit)
ret <- red[, colnames(data)]
} else {
ret <- red
}
rownames(ret) <- NULL
attr(ret, "index") <- inds
return(ret)
}
## Attach the name of the calling function to the message displayed on
## R's stdout
msgm <- function(...) {
if (local_rank == 0) {
fname <- as.list(sys.call(-1))[[1]]
prefix <- paste0("R: ", fname, " ::")
cat(paste(prefix, ..., "\n"))
}
prefix <- paste0("R: ")
cat(paste(prefix, ..., "\n"))
invisible()
}
## Function called by master R process to store on disk all relevant
## parameters for the simulation
StoreSetup <- function(setup) {
to_store <- vector(mode = "list", length = 4)
## names(to_store) <- c("Sim", "Flow", "Transport", "Chemistry", "DHT")
names(to_store) <- c("Sim", "Transport", "DHT", "Cmdline")
## read the setup R file, which is sourced in kin.cpp
tmpbuff <- file(filesim, "r")
setupfile <- readLines(tmpbuff)
close.connection(tmpbuff)
to_store$Sim <- setupfile
## to_store$Flow <- list(
## snapshots = setup$snapshots,
## gridfile = setup$gridfile,
## phase = setup$phase,
## density = setup$density,
## dt_differ = setup$dt_differ,
## prolong = setup$prolong,
## maxiter = setup$maxiter,
## saved_iter = setup$iter_output,
## out_save = setup$out_save )
to_store$Transport <- setup$diffusion
## to_store$Chemistry <- list(
## nprocs = n_procs,
## wp_size = work_package_size,
## base = setup$base,
## first = setup$first,
## init = setup$initsim,
## db = db,
## kin = setup$kin,
## ann = setup$ann)
if (dht_enabled) {
to_store$DHT <- list(
enabled = dht_enabled,
log = dht_log
## signif = dht_final_signif,
## proptype = dht_final_proptype
)
} else {
to_store$DHT <- FALSE
}
to_store$Cmdline <- commandArgs(trailingOnly=FALSE)
saveRDS(to_store, file = paste0(fileout, "/setup.rds"))
msgm("initialization stored in ", paste0(fileout, "/setup.rds"))
}
GetWorkPackageSizesVector <- function(n_packages, package_size, len) {
ids <- rep(1:n_packages, times=package_size, each = 1)[1:len]
ids <- rep(1:n_packages, times = package_size, each = 1)[1:len]
return(as.integer(table(ids)))
}
## Handler to read R objs from binary files using either builtin
## readRDS(), qs::qread() or qs2::qs_read() based on file extension
ReadRObj <- function(path) {
## code borrowed from tools::file_ext()
pos <- regexpr("\\.([[:alnum:]]+)$", path)
extension <- ifelse(pos > -1L, substring(path, pos + 1L), "")
switch(extension,
rds = readRDS(path),
qs = qs::qread(path),
qs2 = qs2::qs_read(path)
)
}
## Handler to store R objs to binary files using either builtin
## saveRDS() or qs::qsave() based on file extension
SaveRObj <- function(x, path) {
## msgm("Storing to", path)
## code borrowed from tools::file_ext()
pos <- regexpr("\\.([[:alnum:]]+)$", path)
extension <- ifelse(pos > -1L, substring(path, pos + 1L), "")
switch(extension,
rds = saveRDS(object = x, file = path),
qs = qs::qsave(x = x, file = path),
qs2 = qs2::qs_save(object = x, file = path)
)
}
######## Old relic code
## ## Function called by master R process to store on disk all relevant
## ## parameters for the simulation
## StoreSetup <- function(setup, filesim, out_dir) {
## to_store <- vector(mode = "list", length = 4)
## ## names(to_store) <- c("Sim", "Flow", "Transport", "Chemistry", "DHT")
## names(to_store) <- c("Sim", "Transport", "DHT", "Cmdline")
## ## read the setup R file, which is sourced in kin.cpp
## tmpbuff <- file(filesim, "r")
## setupfile <- readLines(tmpbuff)
## close.connection(tmpbuff)
## to_store$Sim <- setupfile
## ## to_store$Flow <- list(
## ## snapshots = setup$snapshots,
## ## gridfile = setup$gridfile,
## ## phase = setup$phase,
## ## density = setup$density,
## ## dt_differ = setup$dt_differ,
## ## prolong = setup$prolong,
## ## maxiter = setup$maxiter,
## ## saved_iter = setup$iter_output,
## ## out_save = setup$out_save )
## to_store$Transport <- setup$diffusion
## ## to_store$Chemistry <- list(
## ## nprocs = n_procs,
## ## wp_size = work_package_size,
## ## base = setup$base,
## ## first = setup$first,
## ## init = setup$initsim,
## ## db = db,
## ## kin = setup$kin,
## ## ann = setup$ann)
## if (dht_enabled) {
## to_store$DHT <- list(
## enabled = dht_enabled,
## log = dht_log
## ## signif = dht_final_signif,
## ## proptype = dht_final_proptype
## )
## } else {
## to_store$DHT <- FALSE
## }
## if (dht_enabled) {
## to_store$DHT <- list(
## enabled = dht_enabled,
## log = dht_log
## # signif = dht_final_signif,
## # proptype = dht_final_proptype
## )
## } else {
## to_store$DHT <- FALSE
## }
## saveRDS(to_store, file = paste0(fileout, "/setup.rds"))
## msgm("initialization stored in ", paste0(fileout, "/setup.rds"))
## }

View File

@ -1,7 +0,0 @@
configure_file(poet.h.in poet.h)
add_executable(poet poet.cpp)
target_include_directories(poet PUBLIC "${CMAKE_CURRENT_BINARY_DIR}")
target_link_libraries(poet PUBLIC poet_lib MPI::MPI_CXX)
install(TARGETS poet DESTINATION bin)

View File

@ -1,369 +0,0 @@
/*
** Copyright (C) 2018-2021 Alexander Lindemann, Max Luebke (University of
** Potsdam)
**
** Copyright (C) 2018-2022 Marco De Lucia, Max Luebke (GFZ Potsdam)
**
** POET is free software; you can redistribute it and/or modify it under the
** terms of the GNU General Public License as published by the Free Software
** Foundation; either version 2 of the License, or (at your option) any later
** version.
**
** POET is distributed in the hope that it will be useful, but WITHOUT ANY
** WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
** A PARTICULAR PURPOSE. See the GNU General Public License for more details.
**
** You should have received a copy of the GNU General Public License along with
** this program; if not, write to the Free Software Foundation, Inc., 51
** Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <Rcpp.h>
#include <cstdint>
#include <cstdlib>
#include <poet/ChemistryModule.hpp>
#include <poet/DiffusionModule.hpp>
#include <poet/Grid.hpp>
#include <poet/Macros.hpp>
#include <poet/RInsidePOET.hpp>
#include <poet/SimParams.hpp>
#include <cstring>
#include <iostream>
#include <string>
#include <vector>
#include <mpi.h>
#include <poet.h>
using namespace std;
using namespace poet;
using namespace Rcpp;
poet::ChemistryModule::SingleCMap DFToHashMap(const Rcpp::DataFrame &df) {
std::unordered_map<std::string, double> out_map;
vector<string> col_names = Rcpp::as<vector<string>>(df.names());
for (const auto &name : col_names) {
double val = df[name.c_str()];
out_map.insert({name, val});
}
return out_map;
}
// HACK: this is a step back as the order and also the count of fields is
// predefined, but it will change in the future
void writeFieldsToR(RInside &R, const Field &trans, const Field &chem) {
R["TMP"] = Rcpp::wrap(trans.AsVector());
R["TMP_PROPS"] = Rcpp::wrap(trans.GetProps());
R.parseEval(std::string(
"mysetup$state_T <- setNames(data.frame(matrix(TMP, nrow=" +
std::to_string(trans.GetRequestedVecSize()) + ")), TMP_PROPS)"));
R["TMP"] = Rcpp::wrap(chem.AsVector());
R["TMP_PROPS"] = Rcpp::wrap(chem.GetProps());
R.parseEval(std::string(
"mysetup$state_C <- setNames(data.frame(matrix(TMP, nrow=" +
std::to_string(chem.GetRequestedVecSize()) + ")), TMP_PROPS)"));
}
void set_chem_parameters(poet::ChemistryModule &chem, uint32_t wp_size,
const std::string &database_path) {
chem.SetErrorHandlerMode(1);
chem.SetComponentH2O(false);
chem.SetRebalanceFraction(0.5);
chem.SetRebalanceByCell(true);
chem.UseSolutionDensityVolume(false);
chem.SetPartitionUZSolids(false);
// Set concentration units
// 1, mg/L; 2, mol/L; 3, kg/kgs
chem.SetUnitsSolution(2);
// 0, mol/L cell; 1, mol/L water; 2 mol/L rock
chem.SetUnitsPPassemblage(1);
// 0, mol/L cell; 1, mol/L water; 2 mol/L rock
chem.SetUnitsExchange(1);
// 0, mol/L cell; 1, mol/L water; 2 mol/L rock
chem.SetUnitsSurface(1);
// 0, mol/L cell; 1, mol/L water; 2 mol/L rock
chem.SetUnitsGasPhase(1);
// 0, mol/L cell; 1, mol/L water; 2 mol/L rock
chem.SetUnitsSSassemblage(1);
// 0, mol/L cell; 1, mol/L water; 2 mol/L rock
chem.SetUnitsKinetics(1);
// Set representative volume
std::vector<double> rv;
rv.resize(wp_size, 1.0);
chem.SetRepresentativeVolume(rv);
// Set initial porosity
std::vector<double> por;
por.resize(wp_size, 1);
chem.SetPorosity(por);
// Set initial saturation
std::vector<double> sat;
sat.resize(wp_size, 1.0);
chem.SetSaturation(sat);
// Load database
chem.LoadDatabase(database_path);
}
inline double RunMasterLoop(SimParams &params, RInside &R,
const GridParams &g_params, uint32_t nxyz_master) {
DiffusionParams d_params{R};
DiffusionModule diffusion(d_params, g_params);
/* Iteration Count is dynamic, retrieving value from R (is only needed by
* master for the following loop) */
uint32_t maxiter = R.parseEval("mysetup$iterations");
double sim_time = .0;
ChemistryModule chem(nxyz_master, params.getNumParams().wp_size, maxiter,
params.getChemParams(), MPI_COMM_WORLD);
set_chem_parameters(chem, nxyz_master, params.getChemParams().database_path);
chem.RunInitFile(params.getChemParams().input_script);
poet::ChemistryModule::SingleCMap init_df = DFToHashMap(d_params.initial_t);
chem.initializeField(diffusion.getField());
if (params.getNumParams().print_progressbar) {
chem.setProgressBarPrintout(true);
}
/* SIMULATION LOOP */
double dSimTime{0};
for (uint32_t iter = 1; iter < maxiter + 1; iter++) {
double start_t = MPI_Wtime();
uint32_t tick = 0;
// cout << "CPP: Evaluating next time step" << endl;
// R.parseEvalQ("mysetup <- master_iteration_setup(mysetup)");
double dt = Rcpp::as<double>(
R.parseEval("mysetup$timesteps[" + std::to_string(iter) + "]"));
// cout << "CPP: Next time step is " << dt << "[s]" << endl;
MSG("Next time step is " + std::to_string(dt) + " [s]");
/* displaying iteration number, with C++ and R iterator */
MSG("Going through iteration " + std::to_string(iter));
MSG("R's $iter: " +
std::to_string((uint32_t)(R.parseEval("mysetup$iter"))) +
". Iteration");
/* run transport */
// TODO: transport to diffusion
diffusion.simulate(dt);
chem.getField().update(diffusion.getField());
MSG("Chemistry step");
chem.SetTimeStep(dt);
chem.RunCells();
writeFieldsToR(R, diffusion.getField(), chem.GetField());
diffusion.getField().update(chem.GetField());
R["req_dt"] = dt;
R["simtime"] = (sim_time += dt);
R.parseEval("mysetup$req_dt <- req_dt");
R.parseEval("mysetup$simtime <- simtime");
// MDL master_iteration_end just writes on disk state_T and
// state_C after every iteration if the cmdline option
// --ignore-results is not given (and thus the R variable
// store_result is TRUE)
R.parseEvalQ("mysetup <- master_iteration_end(setup=mysetup)");
MSG("End of *coupling* iteration " + std::to_string(iter) + "/" +
std::to_string(maxiter));
MSG();
// MPI_Barrier(MPI_COMM_WORLD);
double end_t = MPI_Wtime();
dSimTime += end_t - start_t;
} // END SIMULATION LOOP
R.parseEvalQ("profiling <- list()");
R["simtime_chemistry"] = chem.GetChemistryTime();
R.parseEvalQ("profiling$simtime_chemistry <- simtime_chemistry");
R["chemistry_loop"] = chem.GetMasterLoopTime();
R.parseEvalQ("profiling$chemistry_loop <- chemistry_loop");
R["chemistry_sequential"] = chem.GetMasterSequentialTime();
R.parseEvalQ("profiling$simtime_sequential <- chemistry_sequential");
R["idle_master"] = chem.GetMasterIdleTime();
R.parseEvalQ("profiling$idle_master <- idle_master");
R["idle_worker"] = Rcpp::wrap(chem.GetWorkerIdleTimings());
R.parseEvalQ("profiling$idle_worker <- idle_worker");
R["phreeqc_time"] = Rcpp::wrap(chem.GetWorkerPhreeqcTimings());
R.parseEvalQ("profiling$phreeqc <- phreeqc_time");
R["simtime_transport"] = diffusion.getTransportTime();
R.parseEvalQ("profiling$simtime_transport <- simtime_transport");
// R["phreeqc_count"] = phreeqc_counts;
// R.parseEvalQ("profiling$phreeqc_count <- phreeqc_count");
if (params.getChemParams().use_dht) {
R["dht_hits"] = Rcpp::wrap(chem.GetWorkerDHTHits());
R.parseEvalQ("profiling$dht_hits <- dht_hits");
R["dht_evictions"] = Rcpp::wrap(chem.GetWorkerDHTEvictions());
R.parseEvalQ("profiling$dht_evictions <- dht_evictions");
R["dht_get_time"] = Rcpp::wrap(chem.GetWorkerDHTGetTimings());
R.parseEvalQ("profiling$dht_get_time <- dht_get_time");
R["dht_fill_time"] = Rcpp::wrap(chem.GetWorkerDHTFillTimings());
R.parseEvalQ("profiling$dht_fill_time <- dht_fill_time");
}
if (params.getChemParams().use_interp) {
R["interp_w"] = Rcpp::wrap(chem.GetWorkerInterpolationWriteTimings());
R.parseEvalQ("profiling$interp_write <- interp_w");
R["interp_r"] = Rcpp::wrap(chem.GetWorkerInterpolationReadTimings());
R.parseEvalQ("profiling$interp_read <- interp_r");
R["interp_g"] = Rcpp::wrap(chem.GetWorkerInterpolationGatherTimings());
R.parseEvalQ("profiling$interp_gather <- interp_g");
R["interp_fc"] =
Rcpp::wrap(chem.GetWorkerInterpolationFunctionCallTimings());
R.parseEvalQ("profiling$interp_function_calls <- interp_fc");
R["interp_calls"] = Rcpp::wrap(chem.GetWorkerInterpolationCalls());
R.parseEvalQ("profiling$interp_calls <- interp_calls");
R["interp_cached"] = Rcpp::wrap(chem.GetWorkerPHTCacheHits());
R.parseEvalQ("profiling$interp_cached <- interp_cached");
}
chem.MasterLoopBreak();
diffusion.end();
return dSimTime;
}
int main(int argc, char *argv[]) {
double dSimTime, sim_end;
int world_size, world_rank;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
RInsidePOET &R = RInsidePOET::getInstance();
if (world_rank == 0) {
MSG("Running POET version " + std::string(poet_version));
}
if (world_rank > 0) {
{
SimParams params(world_rank, world_size);
int pret = params.parseFromCmdl(argv, R);
if (pret == poet::PARSER_ERROR) {
MPI_Finalize();
return EXIT_FAILURE;
} else if (pret == poet::PARSER_HELP) {
MPI_Finalize();
return EXIT_SUCCESS;
}
// ChemistryModule worker(nxyz, nxyz, MPI_COMM_WORLD);
ChemistryModule worker = poet::ChemistryModule::createWorker(
MPI_COMM_WORLD, params.getChemParams());
set_chem_parameters(worker, worker.GetWPSize(),
params.getChemParams().database_path);
worker.WorkerLoop();
}
MPI_Barrier(MPI_COMM_WORLD);
MSG("finished, cleanup of process " + std::to_string(world_rank));
MPI_Finalize();
return EXIT_SUCCESS;
}
/*Loading Dependencies*/
// TODO: kann raus
std::string r_load_dependencies = "source('../R_lib/kin_r_library.R');";
R.parseEvalQ(r_load_dependencies);
SimParams params(world_rank, world_size);
int pret = params.parseFromCmdl(argv, R);
if (pret == poet::PARSER_ERROR) {
MPI_Finalize();
return EXIT_FAILURE;
} else if (pret == poet::PARSER_HELP) {
MPI_Finalize();
return EXIT_SUCCESS;
}
MSG("RInside initialized on process " + std::to_string(world_rank));
R.parseEvalQ("mysetup <- setup");
// if (world_rank == 0) { // get timestep vector from
// grid_init function ... //
std::string master_init_code = "mysetup <- master_init(setup=setup)";
R.parseEval(master_init_code);
GridParams g_params(R);
params.initVectorParams(R);
// MDL: store all parameters
if (world_rank == 0) {
MSG("Calling R Function to store calling parameters");
R.parseEvalQ("StoreSetup(setup=mysetup)");
}
if (world_rank == 0) {
MSG("Init done on process with rank " + std::to_string(world_rank));
}
// MPI_Barrier(MPI_COMM_WORLD);
uint32_t nxyz_master = (world_size == 1 ? g_params.total_n : 1);
dSimTime = RunMasterLoop(params, R, g_params, nxyz_master);
MSG("finished simulation loop");
MSG("start timing profiling");
R["simtime"] = dSimTime;
R.parseEvalQ("profiling$simtime <- simtime");
string r_vis_code;
r_vis_code = "saveRDS(profiling, file=paste0(fileout,'/timings.rds'));";
R.parseEval(r_vis_code);
MSG("Done! Results are stored as R objects into <" + params.getOutDir() +
"/timings.rds>");
MPI_Barrier(MPI_COMM_WORLD);
MSG("finished, cleanup of process " + std::to_string(world_rank));
MPI_Finalize();
if (world_rank == 0) {
MSG("done, bye!");
}
exit(0);
}

View File

@ -1,11 +0,0 @@
#ifndef POET_H
#define POET_H
#include "poet/ChemistryModule.hpp"
#include <Rcpp.h>
const char *poet_version = "@POET_VERSION@";
const char *CHEMISTRY_MODULE_NAME = "state_C";
#endif // POET_H

4
apps/CMakeLists.txt Normal file
View File

@ -0,0 +1,4 @@
file(GLOB INIT_SRCS CONFIGURE_DEPENDS "initializer/*.cpp")
add_executable(poet_initializer ${INIT_SRCS})
target_link_libraries(poet_initializer RRuntime tug)

View File

@ -0,0 +1,3 @@
#include <Rcpp.h>
int main(int argc, char **argv) {}

View File

@ -1,3 +1,43 @@
function(ADD_BENCH_TARGET TARGET POET_BENCH_LIST RT_FILES OUT_PATH)
set(bench_install_dir share/poet/${OUT_PATH})
# create empty list
set(OUT_FILES_LIST "")
foreach(BENCH_FILE ${${POET_BENCH_LIST}})
get_filename_component(BENCH_NAME ${BENCH_FILE} NAME_WE)
set(OUT_FILE ${CMAKE_CURRENT_BINARY_DIR}/${BENCH_NAME})
set(OUT_FILE_EXT ${OUT_FILE}.qs2)
add_custom_command(
OUTPUT ${OUT_FILE_EXT}
COMMAND $<TARGET_FILE:poet_init> -n ${OUT_FILE} -s ${CMAKE_CURRENT_SOURCE_DIR}/${BENCH_FILE}
COMMENT "Running poet_init on ${BENCH_FILE}"
DEPENDS poet_init ${CMAKE_CURRENT_SOURCE_DIR}/${BENCH_FILE}
VERBATIM
COMMAND_EXPAND_LISTS
)
list(APPEND OUT_FILES_LIST ${OUT_FILE_EXT})
endforeach(BENCH_FILE ${${POET_BENCH_LIST}})
add_custom_target(
${TARGET}
DEPENDS ${OUT_FILES_LIST})
install(FILES ${OUT_FILES_LIST} DESTINATION ${bench_install_dir})
# install all ADD_FILES to the same location
install(FILES ${${RT_FILES}} DESTINATION ${bench_install_dir})
endfunction()
# define target name
set(BENCHTARGET benchmarks)
add_custom_target(${BENCHTARGET} ALL)
add_subdirectory(barite)
add_subdirectory(dolo)
add_subdirectory(surfex)
add_subdirectory(barite)

View File

@ -1,8 +1,20 @@
install(FILES
barite.R
barite_interp_eval.R
barite.pqi
db_barite.dat
DESTINATION
share/poet/bench/barite
# Create a list of files
set(bench_files
barite_200.R
barite_het.R
)
set(runtime_files
barite_200_rt.R
barite_het_rt.R
)
# add_custom_target(barite_bench DEPENDS ${bench_files} ${runtime_files})
ADD_BENCH_TARGET(barite_bench
bench_files
runtime_files
"barite"
)
add_dependencies(${BENCHTARGET} barite_bench)

View File

@ -1,4 +1,4 @@
#+TITLE: Description of \texttt{barite} benchmark
#+TITLE: Description of =barite= benchmark
#+AUTHOR: MDL <delucia@gfz-potsdam.de>
#+DATE: 2023-08-26
#+STARTUP: inlineimages
@ -13,34 +13,122 @@
#+begin_src sh :language sh :frame single
mpirun -np 4 ./poet barite.R barite_results
mpirun -np 4 ./poet --interp barite_interp_eval.R barite_results
#+end_src
* Chemical system
The benchmark accounts for reaction kinetics for celestite dissolution
and barite precipitation. The system is initially at equilibrium with
celestite; following diffusion of $BaCl_2$ celestite dissolution
occurs Dissolution of celestite and the successive release of
$SO_4^{2-}$ into solution causes barite to precipitate:
```\mathrm{Ba}^{2+}_{\mathrm{(aq)}} + \mathrm{SrSO}_{4, \mathrm{(s)}} \rightarrow \mathrm{BaSO}_{4,\mathrm{(s)}} + \mathrm{Sr}^{2+}_{\mathrm{(s)}}```
Reaction rates are calculated using a general kinetics rate law for
both dissolution and precipitation based on transition state
theory:
```\frac{\mathrm{d}m_{m}}{\mathrm{d}t} = -\mathrm{SA}_m k_{\mathrm{r},m} (1-\mathrm{SR}_{m})```
where $\mathrm{d}m\,(\mathrm{mol/s})$ is the rate of a mineral phase
$m$, $\mathrm{SA}\,\mathrm{(m^2)}$ is the reactive surface area,
$k_{\mathrm{r}}\,\mathrm{(mol/m^2/s)}$ is the rate constant, and
$\mathrm{SR}\, {(\text{--})}$ is the saturation ratio, i.e., the ratio
of the ion activity product of the reacting species and the solubility
constant.
* List of Files
- =barite_het.R=: POET input script with homogeneous zones for a 5x2 simulation
grid
- =barite_200.R=: POET input script for a 200x200 simulation
grid
- =barite_200ai_surrogate_input_script.R=: Defines the ai surrogate functions
to load a pretrained model and apply min-max-feature scaling on the model inputs
and target. Prediction validity is assessed with a threshold of 3e-5 on the mass
balance of Ba and Sr.
- =barite_200min_max_bounds=: Minimum and maximum values from 50 iterations of the
barite_200 benchmark. Used for feature scaling in the ai surrogate.
- =barite_200model_min_max.keras=: A sequential keras model that has been trained
on 50 iterations of the barite_200 benchmark with min-max-scaled inputs
and targets/outputs.
- =db_barite.dat=: PHREEQC database containing the kinetic expressions
for barite and celestite, stripped down from =phreeqc.dat=
- =barite.pqi=: PHREEQC input script defining the chemical system
* Chemical system
The benchmark depicts an isotherm porous system at *25 °C* where pure
water is initially at equilibrium with *celestite* (strontium sulfate;
brute formula: SrSO_{4}). A solution containing only dissolved Ba^{2+}
and Cl^- diffuses into the system causing celestite dissolution. The
increased concentration of dissolved sulfate SO_{4}^{2-} induces
precipitation of *barite* (barium sulfate; brute formula:
BaSO_{4}^{2-}). The overall reaction can be written:
Ba^{2+} + celestite \rightarrow barite + Sr^{2+}
Both celestite dissolution and barite precipitation are calculated
using a kinetics rate law based on transition state theory:
rate = -S_{m} k_{barite} (1-SR_{m})
where the reaction rate has units mol/s, S_{m} (m^{2}) is the reactive
surface area, k (mol/m^{2}/s) is the kinetic coefficient, and SR is
the saturation ratio, i.e., the ratio of the ion activity product of
the reacting species and the solubility constant, calculated
internally by PHREEQC from the speciated solution.
For barite, the reaction rate is computed as sum of two mechanisms,
r_{/acid/} and r_{/neutral/}:
rate_{barite} = S_{barite} (k_{/acid/} + k_{/neutral/}) * (1 - SR_{barite})
where:
k_{/acid/} = 10^{-6.9} e^{-30800 / R} \cdot act(H^{+})^{0.22}
k_{/neutral/} = 10^{-7.9} e^{-30800 / R}
R (8.314462 J K^{-1} mol^{-1}) is the gas constant.
For celestite the kinetic law considers only the acidic mechanism and
reads:
rate_{celestite} = S_{celestite} 10^{-5.66} e^{-23800 / R} \cdot
act(H^{+})^{0.109} \cdot (1 - SR_{celestite})
The kinetic rates as implemented in the =db_barite.dat= file accepts
one parameter which represents reactive surface area in m^{2}. For the
benchmarks the surface areas are set to
- S_{barite}: 50 m^{2}
- S_{celestite}: 10 m^{2}
A starting seed for barite is given at 0.001 mol in each domain
element.
* Nucleation (TODO)
Geochemical benchmark inspired by Tranter et al. (2021) without
nucleation.
* POET simulations
Currently these benchmarks are pure diffusion simulations. There are 7
transported species: H, O, Charge, Ba, Cl, S(6), Sr.
** =barite.R=
- Grid discretization: square domain of 1 \cdot 1 m^{2} discretized in
20x20 cells
- Boundary conditions: E, S and W sides of the domain are closed; the
N boundary has a *fixed concentration* (Dirichlet) of 0.1 molal
BaCl_{2}
- Diffusion coefficients: isotropic homogeneous \alpha = 1E-06
- Time steps & iterations: 20 iteration with \Delta t = 250 s
- *DHT* parameters:
| H | O | Charge | Ba | Cl | S(6) | Sr |
| 10 | 10 | 3 | 5 | 5 | 5 | 5 |
** =barite_interp_eval.R=
- Grid discretization: rectangular domain of 40 \cdot 20 m^{2}
discretized in 400 \cdot 200 cells
- Boundary conditions: all boundaries are closed. The center of the
domain at indeces (200, 100) has fixed concentration of 0.1 molal of
BaCl_{2}
- Diffusion coefficients: isotropic homogeneous \alpha = 1E-06
- Time steps & iterations: 200 iterations with \Delta t = 250 s
- *DHT* parameters:
| H | O | Charge | Ba | Cl | S(6) | Sr |
| 10 | 10 | 3 | 5 | 5 | 5 | 5 |
* References
- Tranter, Wetzel, De Lucia and Kühn (2021): Reactive transport model
of kinetically controlled celestite to barite replacement, Advances
in Geosciences, 56, 57-65, 2021.
https://doi.org/10.5194/adgeo-56-57-20211

View File

@ -1,147 +0,0 @@
## Time-stamp: "Last modified 2023-08-02 13:59:22 mluebke"
database <- normalizePath("../share/poet/bench/barite/db_barite.dat")
input_script <- normalizePath("../share/poet/bench/barite/barite.pqi")
#################################################################
## Section 1 ##
## Grid initialization ##
#################################################################
n <- 20
m <- 20
types <- c("scratch", "phreeqc", "rds")
init_cell <- list(
"H" = 110.0124,
"O" = 55.5087,
"Charge" = -1.217E-09,
"Ba" = 1.E-10,
"Cl" = 2.E-10,
"S" = 6.205E-4,
"Sr" = 6.205E-4,
"Barite" = 0.001,
"Celestite" = 1
)
grid <- list(
n_cells = c(n, m),
s_cells = c(1, 1),
type = types[1]
)
##################################################################
## Section 2 ##
## Diffusion parameters and boundary conditions ##
##################################################################
## initial conditions
init_diffu <- list(
#"H" = 110.0124,
"H" = 0.00000028904,
#"O" = 55.5087,
"O" = 0.000000165205,
#"Charge" = -1.217E-09,
"Charge" = -3.337E-08,
"Ba" = 1.E-10,
"Cl" = 1.E-10,
"S(6)" = 6.205E-4,
"Sr" = 6.205E-4
)
injection_diff <- list(
list(
#"H" = 111.0124,
"H" = 0.0000002890408,
#"O" = 55.50622,
"O" = 0.00002014464,
#"Charge" = -3.337E-08,
"Charge" = -3.337000004885E-08,
"Ba" = 0.1,
"Cl" = 0.2,
"S(6)" = 0,
"Sr" = 0)
)
## diffusion coefficients
alpha_diffu <- c(
"H" = 1E-06,
"O" = 1E-06,
"Charge" = 1E-06,
"Ba" = 1E-06,
"Cl" = 1E-06,
"S(6)" = 1E-06,
"Sr" = 1E-06
)
## vecinj_inner <- list(
## l1 = c(1,20,20),
## l2 = c(2,80,80),
## l3 = c(2,60,80)
## )
boundary <- list(
"N" = rep(1, n),
## "N" = rep(0, n),
"E" = rep(0, n),
"S" = rep(0, n),
"W" = rep(0, n)
)
diffu_list <- names(alpha_diffu)
vecinj <- do.call(rbind.data.frame, injection_diff)
names(vecinj) <- names(init_diffu)
diffusion <- list(
init = as.data.frame(init_diffu, check.names = FALSE),
vecinj = vecinj,
# vecinj_inner = vecinj_inner,
vecinj_index = boundary,
alpha = alpha_diffu
)
#################################################################
## Section 3 ##
## Chemistry module (Phreeqc) ##
#################################################################
## # Needed when using DHT
dht_species <- c(
"H" = 10,
"O" = 10,
"Charge" = 3,
"Ba" = 5,
"Cl" = 5,
"S(6)" = 5,
"Sr" = 5
)
chemistry <- list(
database = database,
input_script = input_script,
dht_species = dht_species
)
#################################################################
## Section 4 ##
## Putting all those things together ##
#################################################################
iterations <- 4
dt <- 100
setup <- list(
grid = grid,
diffusion = diffusion,
chemistry = chemistry,
iterations = iterations,
timesteps = rep(dt, iterations),
store_result = TRUE,
out_save = seq(1, iterations)
)

View File

@ -1,25 +1,32 @@
SELECTED_OUTPUT
-high_precision true
-reset false
-kinetic_reactants Barite Celestite
-saturation_indices Barite Celestite
SOLUTION 1
units mol/kgw
water 1
temperature 25
pH 7
pe 10.799
Ba 0.1
Cl 0.2
S 1e-9
Sr 1e-9
KINETICS 1
Barite
-m 0.001
-parms 50. # reactive surface area
-tol 1e-9
Celestite
-m 1
-parms 10.0 # reactive surface area
-tol 1e-9
units mol/kgw
water 1
temperature 25
pH 7
PURE 1
Celestite 0.0 1
END
RUN_CELLS
-cells 1
COPY solution 1 2
KINETICS 2
Barite
-m 0.001
-parms 50. # reactive surface area
-tol 1e-9
Celestite
-m 1
-parms 10.0 # reactive surface area
-tol 1e-9
END
SOLUTION 3
units mol/kgw
water 1
temperature 25
Ba 0.1
Cl 0.2
END

59
bench/barite/barite_200.R Normal file
View File

@ -0,0 +1,59 @@
cols <- 200
rows <- 200
s_cols <- 1
s_rows <- 1
grid_def <- matrix(2, nrow = rows, ncol = cols)
# Define grid configuration for POET model
grid_setup <- list(
pqc_in_file = "./barite.pqi",
pqc_db_file = "./db_barite.dat", # Path to the database file for Phreeqc
grid_def = grid_def, # Definition of the grid, containing IDs according to the Phreeqc input script
grid_size = c(s_rows, s_cols), # Size of the grid in meters
constant_cells = c() # IDs of cells with constant concentration
)
bound_length <- 2
bound_def <- list(
"type" = rep("constant", bound_length),
"sol_id" = rep(3, bound_length),
"cell" = seq(1, bound_length)
)
homogenous_alpha <- 1e-6
diffusion_setup <- list(
boundaries = list(
"W" = bound_def,
"N" = bound_def
),
alpha_x = homogenous_alpha,
alpha_y = homogenous_alpha
)
dht_species <- c(
"H" = 3,
"O" = 3,
"Charge" = 6,
"Ba" = 6,
"Cl" = 6,
"S" = 6,
"Sr" = 6,
"Barite" = 5,
"Celestite" = 5
)
chemistry_setup <- list(
dht_species = dht_species,
ai_surrogate_input_script = "./barite_200ai_surrogate_input_script.R"
)
# Define a setup list for simulation configuration
setup <- list(
Grid = grid_setup, # Parameters related to the grid structure
Diffusion = diffusion_setup, # Parameters related to the diffusion process
Chemistry = chemistry_setup
)

View File

@ -0,0 +1,7 @@
iterations <- 50
dt <- 100
list(
timesteps = rep(dt, iterations),
store_result = TRUE
)

View File

@ -0,0 +1,48 @@
## load a pretrained model from tensorflow file
## Use the global variable "ai_surrogate_base_path" when using file paths
## relative to the input script
initiate_model <- function() {
init_model <- normalizePath(paste0(ai_surrogate_base_path,
"model_min_max_float64.keras"))
return(load_model_tf(init_model))
}
scale_min_max <- function(x, min, max, backtransform) {
if (backtransform) {
return((x * (max - min)) + min)
} else {
return((x - min) / (max - min))
}
}
preprocess <- function(df, backtransform = FALSE, outputs = FALSE) {
minmax_file <- normalizePath(paste0(ai_surrogate_base_path,
"min_max_bounds.rds"))
global_minmax <- readRDS(minmax_file)
for (column in colnames(df)) {
df[column] <- lapply(df[column],
scale_min_max,
global_minmax$min[column],
global_minmax$max[column],
backtransform)
}
return(df)
}
mass_balance <- function(predictors, prediction) {
dBa <- abs(prediction$Ba + prediction$Barite -
predictors$Ba - predictors$Barite)
dSr <- abs(prediction$Sr + prediction$Celestite -
predictors$Sr - predictors$Celestite)
return(dBa + dSr)
}
validate_predictions <- function(predictors, prediction) {
epsilon <- 3e-5
mb <- mass_balance(predictors, prediction)
msgm("Mass balance mean:", mean(mb))
msgm("Mass balance variance:", var(mb))
msgm("Rows where mass balance meets threshold", epsilon, ":",
sum(mb < epsilon))
return(mb < epsilon)
}

View File

@ -0,0 +1,60 @@
## Time-stamp: "Last modified 2024-05-30 13:34:14 delucia"
cols <- 50
rows <- 50
s_cols <- 0.25
s_rows <- 0.25
grid_def <- matrix(2, nrow = rows, ncol = cols)
# Define grid configuration for POET model
grid_setup <- list(
pqc_in_file = "./barite.pqi",
pqc_db_file = "./db_barite.dat", ## Path to the database file for Phreeqc
grid_def = grid_def, ## Definition of the grid, containing IDs according to the Phreeqc input script
grid_size = c(s_rows, s_cols), ## Size of the grid in meters
constant_cells = c() ## IDs of cells with constant concentration
)
bound_length <- 2
bound_def <- list(
"type" = rep("constant", bound_length),
"sol_id" = rep(3, bound_length),
"cell" = seq(1, bound_length)
)
homogenous_alpha <- 1e-8
diffusion_setup <- list(
boundaries = list(
"W" = bound_def,
"N" = bound_def
),
alpha_x = homogenous_alpha,
alpha_y = homogenous_alpha
)
dht_species <- c(
"H" = 3,
"O" = 3,
"Charge" = 3,
"Ba" = 6,
"Cl" = 6,
"S" = 6,
"Sr" = 6,
"Barite" = 5,
"Celestite" = 5
)
chemistry_setup <- list(
dht_species = dht_species,
ai_surrogate_input_script = "./barite_50ai_surr_mdl.R"
)
# Define a setup list for simulation configuration
setup <- list(
Grid = grid_setup, # Parameters related to the grid structure
Diffusion = diffusion_setup, # Parameters related to the diffusion process
Chemistry = chemistry_setup
)

Binary file not shown.

View File

@ -0,0 +1,9 @@
iterations <- 1000
dt <- 200
list(
timesteps = rep(dt, iterations),
store_result = TRUE,
out_save = c(1, 5, seq(20, iterations, by=20))
)

View File

@ -0,0 +1,90 @@
## Time-stamp: "Last modified 2024-05-30 13:27:06 delucia"
## load a pretrained model from tensorflow file
## Use the global variable "ai_surrogate_base_path" when using file paths
## relative to the input script
initiate_model <- function() {
require(keras3)
require(tensorflow)
init_model <- normalizePath(paste0(ai_surrogate_base_path,
"barite_50ai_all.keras"))
Model <- keras3::load_model(init_model)
msgm("Loaded model:")
print(str(Model))
return(Model)
}
scale_min_max <- function(x, min, max, backtransform) {
if (backtransform) {
return((x * (max - min)) + min)
} else {
return((x - min) / (max - min))
}
}
minmax <- list(min = c(H = 111.012433592824, O = 55.5062185549492, Charge = -3.1028354471876e-08,
Ba = 1.87312878574393e-141, Cl = 0, `S(6)` = 4.24227510643685e-07,
Sr = 0.00049382996130541, Barite = 0.000999542409828586, Celestite = 0.244801877115968),
max = c(H = 111.012433679682, O = 55.5087003521685, Charge = 5.27666636082035e-07,
Ba = 0.0908849779513762, Cl = 0.195697626449355, `S(6)` = 0.000620774752665846,
Sr = 0.0558680070692722, Barite = 0.756779139057097, Celestite = 1.00075422160624
))
preprocess <- function(df) {
if (!is.data.frame(df))
df <- as.data.frame(df, check.names = FALSE)
as.data.frame(lapply(colnames(df),
function(x) scale_min_max(x=df[x],
min=minmax$min[x],
max=minmax$max[x],
backtransform=FALSE)),
check.names = FALSE)
}
postprocess <- function(df) {
if (!is.data.frame(df))
df <- as.data.frame(df, check.names = FALSE)
as.data.frame(lapply(colnames(df),
function(x) scale_min_max(x=df[x],
min=minmax$min[x],
max=minmax$max[x],
backtransform=TRUE)),
check.names = FALSE)
}
mass_balance <- function(predictors, prediction) {
dBa <- abs(prediction$Ba + prediction$Barite -
predictors$Ba - predictors$Barite)
dSr <- abs(prediction$Sr + prediction$Celestite -
predictors$Sr - predictors$Celestite)
return(dBa + dSr)
}
validate_predictions <- function(predictors, prediction) {
epsilon <- 1E-7
mb <- mass_balance(predictors, prediction)
msgm("Mass balance mean:", mean(mb))
msgm("Mass balance variance:", var(mb))
ret <- mb < epsilon
msgm("Rows where mass balance meets threshold", epsilon, ":",
sum(ret))
return(ret)
}
training_step <- function(model, predictor, target, validity) {
msgm("Starting incremental training:")
## x <- as.matrix(predictor)
## y <- as.matrix(target[colnames(x)])
history <- model %>% keras3::fit(x = data.matrix(predictor),
y = data.matrix(target),
epochs = 10, verbose=1)
keras3::save_model(model,
filepath = paste0(out_dir, "/current_model.keras"),
overwrite=TRUE)
return(model)
}

32
bench/barite/barite_het.R Normal file
View File

@ -0,0 +1,32 @@
grid_def <- matrix(c(2, 3), nrow = 2, ncol = 5)
# Define grid configuration for POET model
grid_setup <- list(
pqc_in_file = "./barite_het.pqi",
pqc_db_file = "./db_barite.dat", # Path to the database file for Phreeqc
grid_def = grid_def, # Definition of the grid, containing IDs according to the Phreeqc input script
grid_size = c(ncol(grid_def), nrow(grid_def)), # Size of the grid in meters
constant_cells = c() # IDs of cells with constant concentration
)
diffusion_setup <- list(
boundaries = list(
"W" = list(
"type" = rep("constant", nrow(grid_def)),
"sol_id" = rep(4, nrow(grid_def)),
"cell" = seq_len(nrow(grid_def))
)
),
alpha_x = 1e-6,
alpha_y = matrix(runif(10, 1e-8, 1e-7),
nrow = nrow(grid_def),
ncol = ncol(grid_def)
)
)
# Define a setup list for simulation configuration
setup <- list(
Grid = grid_setup, # Parameters related to the grid structure
Diffusion = diffusion_setup, # Parameters related to the diffusion process
Chemistry = list()
)

View File

@ -0,0 +1,80 @@
## Initial: everywhere equilibrium with Celestite NB: The aqueous
## solution *resulting* from this calculation is to be used as initial
## state everywhere in the domain
SOLUTION 1
units mol/kgw
water 1
temperature 25
pH 7
pe 4
S(6) 1e-12
Sr 1e-12
Ba 1e-12
Cl 1e-12
PURE 1
Celestite 0.0 1
SAVE SOLUTION 2 # <- phreeqc keyword to store and later reuse these results
END
RUN_CELLS
-cells 1
COPY solution 1 2-3
## Here a 5x2 domain:
|---+---+---+---+---|
-> | 2 | 2 | 2 | 2 | 2 |
4 |---+---+---+---+---|
-> | 3 | 3 | 3 | 3 | 3 |
|---+---+---+---+---|
## East boundary: "injection" of solution 4. North, W, S boundaries: closed
## Here the two distinct zones: nr 2 with kinetics Celestite (initial
## amount is 0, is then allowed to precipitate) and nr 3 with kinetic
## Celestite and Barite (both initially > 0) where the actual
## replacement takes place
#USE SOLUTION 2 <- PHREEQC keyword to reuse the results from previous calculation
KINETICS 2
Celestite
-m 0 # Allowed to precipitate
-parms 10.0
-tol 1e-9
END
#USE SOLUTION 2 <- PHREEQC keyword to reuse the results from previous calculation
KINETICS 3
Barite
-m 0.001
-parms 50.
-tol 1e-9
Celestite
-m 1
-parms 10.0
-tol 1e-9
END
## A BaCl2 solution (nr 4) is "injected" from the left boundary:
SOLUTION 4
units mol/kgw
pH 7
water 1
temp 25
Ba 0.1
Cl 0.2
END
## NB: again, the *result* of the SOLUTION 4 script defines the
## concentration of all elements (+charge, tot H, tot O)
## Ideally, in the initial state SOLUTION 1 we should not have to
## define the 4 elemental concentrations (S(6), Sr, Ba and Cl) but
## obtain them having run once the scripts with the aqueous solution
## resulting from SOLUTION 1 once with KINETICS 2 and once with
## KINETICS 3.
RUN_CELLS
-cells 2-4

View File

@ -0,0 +1,4 @@
list(
timesteps = rep(50, 100),
store_result = TRUE
)

View File

@ -1,151 +0,0 @@
## Time-stamp: "Last modified 2023-07-21 15:04:49 mluebke"
database <- normalizePath("../share/poet/bench/barite/db_barite.dat")
input_script <- normalizePath("../share/poet/bench/barite/barite.pqi")
#################################################################
## Section 1 ##
## Grid initialization ##
#################################################################
n <- 400
m <- 200
types <- c("scratch", "phreeqc", "rds")
init_cell <- list(
"H" = 110.0124,
"O" = 55.5087,
"Charge" = -1.217E-09,
"Ba" = 1.E-10,
"Cl" = 2.E-10,
"S" = 6.205E-4,
"Sr" = 6.205E-4,
"Barite" = 0.001,
"Celestite" = 1
)
grid <- list(
n_cells = c(n, m),
s_cells = c(n / 10, m / 10),
type = types[1],
init_cell = as.data.frame(init_cell, check.names = FALSE),
props = names(init_cell),
database = database,
input_script = input_script
)
##################################################################
## Section 2 ##
## Diffusion parameters and boundary conditions ##
##################################################################
## initial conditions
init_diffu <- list(
# "H" = 110.0124,
"H" = 0.00000028904,
# "O" = 55.5087,
"O" = 0.000000165205,
# "Charge" = -1.217E-09,
"Charge" = -3.337E-08,
"Ba" = 1.E-10,
"Cl" = 1.E-10,
"S(6)" = 6.205E-4,
"Sr" = 6.205E-4
)
injection_diff <- list(
list(
# "H" = 111.0124,
"H" = 0.0000002890408,
# "O" = 55.50622,
"O" = 0.00002014464,
# "Charge" = -3.337E-08,
"Charge" = -3.337000004885E-08,
"Ba" = 0.1,
"Cl" = 0.2,
"S(6)" = 0,
"Sr" = 0
)
)
## diffusion coefficients
alpha_diffu <- c(
"H" = 1E-06,
"O" = 1E-06,
"Charge" = 1E-06,
"Ba" = 1E-06,
"Cl" = 1E-06,
"S(6)" = 1E-06,
"Sr" = 1E-06
)
vecinj_inner <- list(
l1 = c(1, floor(n / 2), floor(m / 2))
## l2 = c(2,80,80),
## l3 = c(2,60,80)
)
boundary <- list(
# "N" = rep(1, n),
"N" = rep(0, n),
"E" = rep(0, n),
"S" = rep(0, n),
"W" = rep(0, n)
)
diffu_list <- names(alpha_diffu)
vecinj <- do.call(rbind.data.frame, injection_diff)
names(vecinj) <- names(init_diffu)
diffusion <- list(
init = as.data.frame(init_diffu, check.names = FALSE),
vecinj = vecinj,
vecinj_inner = vecinj_inner,
vecinj_index = boundary,
alpha = alpha_diffu
)
#################################################################
## Section 3 ##
## Chemistry module (Phreeqc) ##
#################################################################
## # Needed when using DHT
dht_species <- c(
"H" = 10,
"O" = 10,
"Charge" = 3,
"Ba" = 5,
"Cl" = 5,
"S(6)" = 5,
"Sr" = 5
)
chemistry <- list(
database = database,
input_script = input_script,
dht_species = dht_species
)
#################################################################
## Section 4 ##
## Putting all those things together ##
#################################################################
iterations <- 200
dt <- 250
setup <- list(
grid = grid,
diffusion = diffusion,
chemistry = chemistry,
iterations = iterations,
timesteps = rep(dt, iterations),
store_result = TRUE,
out_save = seq(1, iterations)
)

Binary file not shown.

Binary file not shown.

View File

@ -1,9 +1,18 @@
install(FILES
dolo_diffu_inner.R
dolo_diffu_inner_large.R
dolo_inner.pqi
dolo_interp_long.R
phreeqc_kin.dat
DESTINATION
share/poet/bench/dolo
set(bench_files
dolo_inner_large.R
dolo_interp.R
)
set(runtime_files
dolo_inner_large_rt.R
dolo_interp_rt.R
)
ADD_BENCH_TARGET(
dolo_bench
bench_files
runtime_files
"dolo"
)
add_dependencies(${BENCHTARGET} dolo_bench)

View File

@ -1,51 +0,0 @@
## Time-stamp: "Last modified 2022-12-16 20:26:03 delucia"
source("../../../util/data_evaluation/RFun_Eval.R")
sd <- ReadRTSims("naaice_2d")
sd <- ReadRTSims("Sim2D")
sd <- ReadRTSims("inner")
tim <- readRDS("inner/timings.rds")
simtimes <- sapply(sd, "[","simtime")
## workhorse function to be used with package "animation"
PlotAn <- function(tot, prop, grid, breaks) {
for (step in seq(1, length(tot))) {
snap <- tot[[step]]$C
time <- tot[[step]]$simtime/3600/24
ind <- match(prop, colnames(snap))
Plot2DCellData(snap[,ind], grid=grid, contour=FALSE, breaks=breaks, nlevels=length(breaks), scale=TRUE, main=paste0(prop," after ", time, "days"))
}
}
options(width=110)
library(viridis)
Plot2DCellData(sd$iter_050$C$Cl, nx=1/100, ny=1/100, contour = TRUE,
nlevels = 12, palette = "heat.colors",
rev.palette = TRUE, scale = TRUE, main="Cl")
Plot2DCellData(sd$iter_050$C$Dolomite, nx=100, ny=100, contour = FALSE,
nlevels = 12, palette = "heat.colors",
rev.palette = TRUE, scale = TRUE, )
cairo_pdf("naaice_inner_Dolo.pdf", width=8, height = 6, family="serif")
Plot2DCellData(sd$iter_100$C$Dolomite, nx=100, ny=100, contour = FALSE,
nlevels = 12, palette = "viridis",
rev.palette = TRUE, scale = TRUE, plot.axes = FALSE,
main="2D Diffusion - Dolomite after 2E+4 s (100 iterations)")
dev.off()
cairo_pdf("naaice_inner_Mg.pdf", width=8, height = 6, family="serif")
Plot2DCellData(sd$iter_100$C$Mg, nx=100, ny=100, contour = FALSE,
nlevels = 12, palette = "terrain.colors",
rev.palette = TRUE, scale = TRUE, plot.axes=FALSE,
main="2D Diffusion - Mg after 2E+4 s (100 iterations)")
dev.off()

159
bench/dolo/README.org Normal file
View File

@ -0,0 +1,159 @@
#+TITLE: Description of =dolo= benchmark
#+AUTHOR: MDL <delucia@gfz-potsdam.de>
#+DATE: 2023-08-26
#+STARTUP: inlineimages
#+LATEX_CLASS_OPTIONS: [a4paper,9pt]
#+LATEX_HEADER: \usepackage{fullpage}
#+LATEX_HEADER: \usepackage{amsmath, systeme}
#+LATEX_HEADER: \usepackage{graphicx}
#+LATEX_HEADER: \usepackage{charter}
#+OPTIONS: toc:nil
* Quick start
#+begin_src sh :language sh :frame single
mpirun -np 4 ./poet dolo_diffu_inner.R dolo_diffu_inner_res
mpirun -np 4 ./poet --dht --interp dolo_interp_long.R dolo_interp_long_res
#+end_src
* List of Files
- =dolo_interp.R=: POET input script for a 400x200 simulation
grid
- =dolo_diffu_inner_large.R=: POET input script for a 400x200
simulation grid
- =phreeqc_kin.dat=: PHREEQC database containing the kinetic expressions
for dolomite and celestite, stripped down from =phreeqc.dat=
- =dol.pqi=: PHREEQC input script for the chemical system
* Chemical system
This model describes a simplified version of /dolomitization/ of
calcite, itself a complex and not yet fully understood natural process
which is observed naturally at higher temperatures (Möller and De
Lucia, 2020). Variants of such model have been widely used in many
instances especially for the purpose of benchmarking numerical codes
(De Lucia et al., 2021 and references therein).
We consider an isotherm porous system at *25 °C* in which pure water
is initially at equilibrium with *calcite* (calcium carbonate; brute
formula: CaCO_{3}). An MgCl_{2} solution enters the system causing
calcite dissolution. The released excess concentration of dissolved
calcium Ca^{2+} and carbonate (CO_{3}^{2-}) induces after a while
supersaturation and hence precipitation of *dolomite*
(calcium-magnesium carbonate; brute formula: CaMg(CO_{3})_{2}). The
overall /dolomitization/ reaction can be written:
Mg^{2+} + 2 \cdot calcite \rightarrow dolomite + 2 \cdot Ca^{2+}
The precipitation of dolomite continues until enough Ca^{2+} is
present in solution. Further injection of MgCl_{2} changes its
saturation state causing its dissolution too. After enough time, the
whole system has depleted all minerals and the injected MgCl_{2}
solution fills up the domain.
Both calcite dissolution and dolomite precipitation/dissolution follow
a kinetics rate law based on transition state theory (Palandri and
Karhaka, 2004; De Lucia et al., 2021).
rate = -S_{m} k_{m} (1-SR_{m})
where the reaction rate has units mol/s, S_{m} (m^{2}) is the reactive
surface area, k_{m} (mol/m^{2}/s) is the kinetic coefficient, and SR
is the saturation ratio, i.e., the ratio of the ion activity product
of the reacting species and the solubility constant, calculated
internally by PHREEQC from the speciated solution.
For dolomite, the kinetic coefficient results from the sum of two
mechanisms, r_{/acid/} and r_{/neutral/}:
rate_{dolomite} = S_{dolomite} (k_{/acid/} + k_{/neutral/}) * (1 - SR_{dolomite})
where:
k_{/acid/} = 10^{-3.19} e^{-36100 / R} \cdot act(H^{+})^{0.5}
k_{/neutral/} = 10^{-7.53} e^{-52200 / R}
R (8.314462 J K^{-1} mol^{-1}) is the gas constant.
Similarly, the kinetic law for calcite reads:
k_{/acid/} = 10^{-0.3} e^{-14400 / R} \cdot act(H^{+})^{0.5}
k_{/neutral/} = 10^{-5.81} e^{-23500 / R}
The kinetic laws as implemented in the =phreeqc_kin.dat= file accepts
one parameter which represents reactive surface area in m^{2}. For the
benchmarks the surface areas are set to
- S_{dolomite}: 0.005 m^{2}
- S_{calcite}: 0.05 m^{2}
The initial content of calcite in the domain is of 0.0002 mol per kg
of water. A constant partial pressure of 10^{-1675} atm of O_{2(g)} is
maintained at any time in the domain in order to fix the redox
potential of the solution to an oxidizing state (pe around 9).
Note that Cl is unreactive in this system and only effects the
computation of the activities in solution.
* POET simulations
Several benchmarks based on the same chemical system are defined here
with different grid sizes, resolution and boundary conditions. The
transported elemental concentrations are 7: C(4), Ca, Cl, Mg and the
implicit total H, total O and Charge as required by PHREEQC_RM.
** =dolo_diffu_inner.R=
- Grid discretization: square domain of 1 \cdot 1 m^{2} discretized in
100x100 cells
- Boundary conditions: All sides of the domain are closed. *Fixed
concentration* of 0.001 molal of MgCl_{2} is defined in the domain
cell (20, 20) and of 0.002 molal MgCl_{2} at cells (60, 60) and
(80, 80)
- Diffusion coefficients: isotropic homogeneous \alpha = 1E-06
- Time steps & iterations: 10 iterations with \Delta t of 200 s
- *DHT* parameters:
| H | O | Charge | C(4) | Ca | Cl | Mg | Calcite | Dolomite |
| 10 | 10 | 3 | 5 | 5 | 5 | 5 | 5 | 5 |
- Hooks: the following hooks are defined:
1. =dht_fill=:
2. =dht_fuzz=:
3. =interp_pre_func=:
4. =interp_post_func=:
** =dolo_interp_long.R=
- Grid discretization: rectangular domain of 5 \cdot 2.5 m^{2}
discretized in 400 \times 200 cells
- Boundary conditions: *Fixed concentrations* equal to the initial
state are imposed at all four sides of the domain. *Fixed
concentration* of 0.001 molal of MgCl_{2} is defined in the domain
center at cell (100, 50)
- Diffusion coefficients: isotropic homogeneous \alpha = 1E-06
- Time steps & iterations: 20000 iterations with \Delta t of 200 s
- *DHT* parameters:
| H | O | Charge | C(4) | Ca | Cl | Mg | Calcite | Dolomite |
| 10 | 10 | 3 | 5 | 5 | 5 | 5 | 5 | 5 |
- Hooks: the following hooks are defined:
1. =dht_fill=:
2. =dht_fuzz=:
3. =interp_pre_func=:
4. =interp_post_func=:
* References
- De Lucia, Kühn, Lindemann, Lübke, Schnor: POET (v0.1): speedup of
many-core parallel reactive transport simulations with fast DHT
lookups, Geosci. Model Dev., 14, 73917409, 2021.
https://doi.org/10.5194/gmd-14-7391-2021
- Möller, Marco De Lucia: The impact of Mg^{2+} ions on equilibration
of Mg-Ca carbonates in groundwater and brines, Geochemistry
80, 2020. https://doi.org/10.1016/j.chemer.2020.125611
- Palandri, Kharaka: A Compilation of Rate Parameters of Water-Mineral
Interaction Kinetics for Application to Geochemical Modeling, Report
2004-1068, USGS, 2004.

View File

@ -1,35 +1,43 @@
SELECTED_OUTPUT
-high_precision true
-reset false
-time
-soln
-temperature true
-water true
-pH
-pe
-totals C Ca Cl Mg
-kinetic_reactants Calcite Dolomite
-equilibrium O2g
SOLUTION 1
units mol/kgw
temp 25.0
water 1
pH 9.91 charge
pe 4.0
C 1.2279E-04
Ca 1.2279E-04
Cl 1E-12
Mg 1E-12
units mol/kgw
water 1
temperature 25
pH 7
pe 4
PURE 1
O2g -0.1675 10
KINETICS 1
Calcite
-m 0.000207
-parms 0.0032
-tol 1e-10
Dolomite
-m 0.0
-parms 0.00032
-tol 1e-10
Calcite 0.0 1
END
RUN_CELLS
-cells 1
COPY solution 1 2
PURE 2
O2g -0.1675 10
KINETICS 2
Calcite
-m 0.000207
-parms 0.05
-tol 1e-10
Dolomite
-m 0.0
-parms 0.005
-tol 1e-10
END
SOLUTION 3
units mol/kgw
water 1
temp 25
Mg 0.001
Cl 0.002
END
SOLUTION 4
units mol/kgw
water 1
temp 25
Mg 0.002
Cl 0.004
END

View File

@ -1,190 +0,0 @@
## Time-stamp: "Last modified 2023-08-16 17:04:42 mluebke"
database <- normalizePath("../share/poet/bench/dolo/phreeqc_kin.dat")
input_script <- normalizePath("../share/poet/bench/dolo/dolo_inner.pqi")
#################################################################
## Section 1 ##
## Grid initialization ##
#################################################################
n <- 100
m <- 100
types <- c("scratch", "phreeqc", "rds")
init_cell <- list(
"H" = 110.683,
"O" = 55.3413,
"Charge" = -5.0822e-19,
"C(4)" = 1.2279E-4,
"Ca" = 1.2279E-4,
"Cl" = 0,
"Mg" = 0,
"O2g" = 0.499957,
"Calcite" = 2.07e-4,
"Dolomite" = 0
)
grid <- list(
n_cells = c(n, m),
s_cells = c(1, 1),
type = types[1]
)
##################################################################
## Section 2 ##
## Diffusion parameters and boundary conditions ##
##################################################################
## initial conditions
init_diffu <- list(
"H" = 110.683,
"O" = 55.3413,
"Charge" = -5.0822e-19,
"C(4)" = 1.2279E-4,
"Ca" = 1.2279E-4,
"Cl" = 0,
"Mg" = 0
)
## diffusion coefficients
alpha_diffu <- c(
"H" = 1E-6,
"O" = 1E-6,
"Charge" = 1E-6,
"C(4)" = 1E-6,
"Ca" = 1E-6,
"Cl" = 1E-6,
"Mg" = 1E-6
)
## list of boundary conditions/inner nodes
vecinj_diffu <- list(
list(
"H" = 110.683,
"O" = 55.3413,
"Charge" = 1.90431e-16,
"C(4)" = 0,
"Ca" = 0,
"Cl" = 0.002,
"Mg" = 0.001
),
list(
"H" = 110.683,
"O" = 55.3413,
"Charge" = 1.90431e-16,
"C(4)" = 0,
"Ca" = 0.0,
"Cl" = 0.004,
"Mg" = 0.002
)
)
vecinj_inner <- list(
l1 = c(1, 20, 20),
l2 = c(2, 80, 80),
l3 = c(2, 60, 80)
)
boundary <- list(
# "N" = c(1, rep(0, n-1)),
"N" = rep(0, n),
"E" = rep(0, n),
"S" = rep(0, n),
"W" = rep(0, n)
)
diffu_list <- names(alpha_diffu)
vecinj <- do.call(rbind.data.frame, vecinj_diffu)
names(vecinj) <- names(init_diffu)
diffusion <- list(
init = as.data.frame(init_diffu, check.names = FALSE),
vecinj = vecinj,
vecinj_inner = vecinj_inner,
vecinj_index = boundary,
alpha = alpha_diffu
)
#################################################################
## Section 3 ##
## Chemistry module (Phreeqc) ##
#################################################################
## # Needed when using DHT
dht_species <- c(
"H" = 10,
"O" = 10,
"Charge" = 3,
"C(4)" = 5,
"Ca" = 5,
"Cl" = 5,
"Mg" = 5,
"Calcite" = 5,
"Dolomite" = 5
)
check_sign_cal_dol_dht <- function(old, new) {
if ((old["Calcite"] == 0) != (new["Calcite"] == 0)) {
return(TRUE)
}
if ((old["Dolomite"] == 0) != (new["Dolomite"] == 0)) {
return(TRUE)
}
return(FALSE)
}
fuzz_input_dht_keys <- function(input) {
return(input[names(dht_species)])
}
check_sign_cal_dol_interp <- function(to_interp, data_set) {
data_set <- as.data.frame(do.call(rbind, data_set), check.names = FALSE, optional = TRUE)
names(data_set) <- names(dht_species)
cal <- (data_set$Calcite == 0) == (to_interp["Calcite"] == 0)
dol <- (data_set$Dolomite == 0) == (to_interp["Dolomite"] == 0)
cal_dol_same_sig <- cal == dol
return(rev(which(!cal_dol_same_sig)))
}
check_neg_cal_dol <- function(result) {
neg_sign <- (result["Calcite"] <- 0) || (result["Dolomite"] < 0)
return(any(neg_sign))
}
hooks <- list(
dht_fill = check_sign_cal_dol_dht,
dht_fuzz = fuzz_input_dht_keys,
interp_pre_func = check_sign_cal_dol_interp,
interp_post_func = check_neg_cal_dol
)
chemistry <- list(
database = database,
input_script = input_script,
dht_species = dht_species,
hooks = hooks
)
#################################################################
## Section 4 ##
## Putting all those things together ##
#################################################################
iterations <- 10
dt <- 200
setup <- list(
grid = grid,
diffusion = diffusion,
chemistry = chemistry,
iterations = iterations,
timesteps = rep(dt, iterations),
store_result = TRUE
)

View File

@ -1,190 +0,0 @@
## Time-stamp: "Last modified 2023-08-16 17:05:04 mluebke"
database <- normalizePath("../share/poet/bench/dolo/phreeqc_kin.dat")
input_script <- normalizePath("../share/poet/bench/dolo/dolo_inner.pqi")
#################################################################
## Section 1 ##
## Grid initialization ##
#################################################################
n <- 2000
m <- 1000
types <- c("scratch", "phreeqc", "rds")
init_cell <- list(
"H" = 110.683,
"O" = 55.3413,
"Charge" = -5.0822e-19,
"C" = 1.2279E-4,
"Ca" = 1.2279E-4,
"Cl" = 0,
"Mg" = 0,
"O2g" = 0.499957,
"Calcite" = 2.07e-4,
"Dolomite" = 0
)
grid <- list(
n_cells = c(n, m),
s_cells = c(2, 1),
type = types[1]
)
##################################################################
## Section 2 ##
## Diffusion parameters and boundary conditions ##
##################################################################
## initial conditions
init_diffu <- list(
"H" = 0.000211313883539788,
"O" = 0.00398302904424952,
"Charge" = -5.0822e-19,
"C(4)" = 1.2279E-4,
"Ca" = 1.2279E-4,
"Cl" = 0,
"Mg" = 0
)
## diffusion coefficients
alpha_diffu <- c(
"H" = 1E-6,
"O" = 1E-6,
"Charge" = 1E-6,
"C(4)" = 1E-6,
"Ca" = 1E-6,
"Cl" = 1E-6,
"Mg" = 1E-6
)
## list of boundary conditions/inner nodes
vecinj_diffu <- list(
list(
"H" = 0.0001540445,
"O" = 0.002148006,
"Charge" = 1.90431e-16,
"C(4)" = 0,
"Ca" = 0,
"Cl" = 0.002,
"Mg" = 0.001
),
list(
"H" = 0.0001610193,
"O" = 0.002386934,
"Charge" = 1.90431e-16,
"C(4)" = 0,
"Ca" = 0.0,
"Cl" = 0.004,
"Mg" = 0.002
)
)
vecinj_inner <- list(
l1 = c(1, 400, 200),
l2 = c(2, 1400, 800),
l3 = c(2, 1600, 800)
)
boundary <- list(
# "N" = c(1, rep(0, n-1)),
"N" = rep(0, n),
"E" = rep(0, m),
"S" = rep(0, n),
"W" = rep(0, m)
)
diffu_list <- names(alpha_diffu)
vecinj <- do.call(rbind.data.frame, vecinj_diffu)
names(vecinj) <- names(init_diffu)
diffusion <- list(
init = as.data.frame(init_diffu, check.names = FALSE),
vecinj = vecinj,
vecinj_inner = vecinj_inner,
vecinj_index = boundary,
alpha = alpha_diffu
)
#################################################################
## Section 3 ##
## Chemistry module (Phreeqc) ##
#################################################################
## # Needed when using DHT
dht_species <- c(
"H" = 10,
"O" = 10,
"Charge" = 3,
"C(4)" = 5,
"Ca" = 5,
"Cl" = 5,
"Mg" = 5,
"Calcite" = 5,
"Dolomite" = 5
)
check_sign_cal_dol_dht <- function(old, new) {
if ((old["Calcite"] == 0) != (new["Calcite"] == 0)) {
return(TRUE)
}
if ((old["Dolomite"] == 0) != (new["Dolomite"] == 0)) {
return(TRUE)
}
return(FALSE)
}
fuzz_input_dht_keys <- function(input) {
return(input[names(dht_species)])
}
check_sign_cal_dol_interp <- function(to_interp, data_set) {
data_set <- as.data.frame(do.call(rbind, data_set), check.names = FALSE, optional = TRUE)
names(data_set) <- names(dht_species)
cal <- (data_set$Calcite == 0) == (to_interp["Calcite"] == 0)
dol <- (data_set$Dolomite == 0) == (to_interp["Dolomite"] == 0)
cal_dol_same_sig <- cal == dol
return(rev(which(!cal_dol_same_sig)))
}
check_neg_cal_dol <- function(result) {
neg_sign <- (result["Calcite"] <- 0) || (result["Dolomite"] < 0)
return(any(neg_sign))
}
hooks <- list(
dht_fill = check_sign_cal_dol_dht,
dht_fuzz = fuzz_input_dht_keys,
interp_pre_func = check_sign_cal_dol_interp,
interp_post_func = check_neg_cal_dol
)
chemistry <- list(
database = database,
input_script = input_script,
dht_species = dht_species,
hooks = hooks
)
#################################################################
## Section 4 ##
## Putting all those things together ##
#################################################################
iterations <- 500
dt <- 50
setup <- list(
grid = grid,
diffusion = diffusion,
chemistry = chemistry,
iterations = iterations,
timesteps = rep(dt, iterations),
store_result = TRUE,
out_save = seq(5, iterations, by = 5)
)

48
bench/dolo/dolo_fgcs.pqi Normal file
View File

@ -0,0 +1,48 @@
SOLUTION 1
units mol/kgw
water 1
temperature 25
pH 7
pe 4
PURE 1
Calcite 0.0 1
END
RUN_CELLS
-cells 1
END
COPY solution 1 2
#PURE 2
# O2g -0.1675 10
KINETICS 2
Calcite
-m 0.00207
-parms 0.05
-tol 1e-10
Dolomite
-m 0.0
-parms 0.01
-tol 1e-10
END
SOLUTION 3
units mol/kgw
water 1
temp 25
Mg 0.001
Cl 0.002
END
SOLUTION 4
units mol/kgw
water 1
temp 25
Mg 0.002
Cl 0.004
END
RUN_CELLS
-cells 2-4
END

116
bench/dolo/dolo_fgcs_3.R Normal file
View File

@ -0,0 +1,116 @@
rows <- 400
cols <- 400
grid_def <- matrix(2, nrow = rows, ncol = cols)
# Define grid configuration for POET model
grid_setup <- list(
pqc_in_file = "./dolo_fgcs.pqi",
pqc_db_file = "./phreeqc_kin.dat", # Path to the database file for Phreeqc
grid_def = grid_def, # Definition of the grid, containing IDs according to the Phreeqc input script
grid_size = c(5, 5), # Size of the grid in meters
constant_cells = c() # IDs of cells with constant concentration
)
bound_def_we <- list(
"type" = rep("constant", rows),
"sol_id" = rep(1, rows),
"cell" = seq(1, rows)
)
bound_def_ns <- list(
"type" = rep("constant", cols),
"sol_id" = rep(1, cols),
"cell" = seq(1, cols)
)
diffusion_setup <- list(
boundaries = list(
"W" = bound_def_we,
"E" = bound_def_we,
"N" = bound_def_ns,
"S" = bound_def_ns
),
inner_boundaries = list(
"row" = floor(rows / 2),
"col" = floor(cols / 2),
"sol_id" = c(3)
),
alpha_x = 1e-6,
alpha_y = 1e-6
)
check_sign_cal_dol_dht <- function(old, new) {
if ((old["Calcite"] == 0) != (new["Calcite"] == 0)) {
return(TRUE)
}
if ((old["Dolomite"] == 0) != (new["Dolomite"] == 0)) {
return(TRUE)
}
return(FALSE)
}
check_sign_cal_dol_interp <- function(to_interp, data_set) {
dht_species <- c(
"H" = 3,
"O" = 3,
"C" = 6,
"Ca" = 6,
"Cl" = 3,
"Mg" = 5,
"Calcite" = 4,
"Dolomite" = 4
)
data_set <- as.data.frame(do.call(rbind, data_set), check.names = FALSE, optional = TRUE)
names(data_set) <- names(dht_species)
cal <- (data_set$Calcite == 0) == (to_interp["Calcite"] == 0)
dol <- (data_set$Dolomite == 0) == (to_interp["Dolomite"] == 0)
cal_dol_same_sig <- cal == dol
return(rev(which(!cal_dol_same_sig)))
}
check_neg_cal_dol <- function(result) {
neg_sign <- (result["Calcite"] < 0) || (result["Dolomite"] < 0)
return(neg_sign)
}
# Optional when using Interpolation (example with less key species and custom
# significant digits)
pht_species <- c(
"C" = 3,
"Ca" = 3,
"Mg" = 3,
"Cl" = 3,
"Calcite" = 3,
"Dolomite" = 3
)
dht_species <- c(
"H" = 3,
"O" = 3,
"C" = 6,
"Ca" = 6,
"Cl" = 3,
"Mg" = 5,
"Calcite" = 4,
"Dolomite" = 4)
chemistry_setup <- list(
dht_species = dht_species,
pht_species = pht_species,
hooks = list(
dht_fill = check_sign_cal_dol_dht,
interp_pre = check_sign_cal_dol_interp,
interp_post = check_neg_cal_dol
)
)
# Define a setup list for simulation configuration
setup <- list(
Grid = grid_setup, # Parameters related to the grid structure
Diffusion = diffusion_setup, # Parameters related to the diffusion process
Chemistry = chemistry_setup # Parameters related to the chemistry process
)

View File

@ -1,28 +0,0 @@
SELECTED_OUTPUT
-high_precision true
-reset false
-kinetic_reactants Calcite Dolomite
-equilibrium O2g
SOLUTION 1
units mol/kgw
temp 25.0
water 1
pH 9.91 charge
pe 4.0
C 1.2279E-04
Ca 1.2279E-04
Cl 1E-12
Mg 1E-12
PURE 1
O2g -0.1675 10
KINETICS 1
Calcite
-m 0.00020
-parms 0.05
-tol 1e-10
Dolomite
-m 0.0
-parms 0.005
-tol 1e-10
END

BIN
bench/dolo/dolo_inner.rds Normal file

Binary file not shown.

View File

@ -0,0 +1,115 @@
rows <- 2000
cols <- 1000
grid_def <- matrix(2, nrow = rows, ncol = cols)
# Define grid configuration for POET model
grid_setup <- list(
pqc_in_file = "./dol.pqi",
pqc_db_file = "./phreeqc_kin.dat", # Path to the database file for Phreeqc
grid_def = grid_def, # Definition of the grid, containing IDs according to the Phreeqc input script
grid_size = c(cols, rows) / 100, # Size of the grid in meters
constant_cells = c() # IDs of cells with constant concentration
)
bound_size <- 2
diffusion_setup <- list(
inner_boundaries = list(
"row" = c(400, 1400, 1600),
"col" = c(200, 800, 800),
"sol_id" = c(3, 4, 4)
),
alpha_x = 1e-6,
alpha_y = 1e-6
)
check_sign_cal_dol_dht <- function(old, new) {
if ((old["Calcite"] == 0) != (new["Calcite"] == 0)) {
return(TRUE)
}
if ((old["Dolomite"] == 0) != (new["Dolomite"] == 0)) {
return(TRUE)
}
return(FALSE)
}
fuzz_input_dht_keys <- function(input) {
dht_species <- c(
"H" = 3,
"O" = 3,
"Charge" = 3,
"C(4)" = 6,
"Ca" = 6,
"Cl" = 3,
"Mg" = 5,
"Calcite" = 4,
"Dolomite" = 4
)
return(input[names(dht_species)])
}
check_sign_cal_dol_interp <- function(to_interp, data_set) {
dht_species <- c(
"H" = 3,
"O" = 3,
"Charge" = 3,
"C(4)" = 6,
"Ca" = 6,
"Cl" = 3,
"Mg" = 5,
"Calcite" = 4,
"Dolomite" = 4
)
data_set <- as.data.frame(do.call(rbind, data_set), check.names = FALSE, optional = TRUE)
names(data_set) <- names(dht_species)
cal <- (data_set$Calcite == 0) == (to_interp["Calcite"] == 0)
dol <- (data_set$Dolomite == 0) == (to_interp["Dolomite"] == 0)
cal_dol_same_sig <- cal == dol
return(rev(which(!cal_dol_same_sig)))
}
check_neg_cal_dol <- function(result) {
neg_sign <- (result["Calcite"] < 0) || (result["Dolomite"] < 0)
return(neg_sign)
}
# Optional when using Interpolation (example with less key species and custom
# significant digits)
pht_species <- c(
"C(4)" = 3,
"Ca" = 3,
"Mg" = 2,
"Calcite" = 2,
"Dolomite" = 2
)
chemistry_setup <- list(
dht_species = c(
"H" = 3,
"O" = 3,
"Charge" = 3,
"C(4)" = 6,
"Ca" = 6,
"Cl" = 3,
"Mg" = 5,
"Calcite" = 4,
"Dolomite" = 4
),
pht_species = pht_species,
hooks = list(
dht_fill = check_sign_cal_dol_dht,
dht_fuzz = fuzz_input_dht_keys,
interp_pre = check_sign_cal_dol_interp,
interp_post = check_neg_cal_dol
)
)
# Define a setup list for simulation configuration
setup <- list(
Grid = grid_setup, # Parameters related to the grid structure
Diffusion = diffusion_setup, # Parameters related to the diffusion process
Chemistry = chemistry_setup # Parameters related to the chemistry process
)

View File

@ -0,0 +1,10 @@
iterations <- 500
dt <- 50
out_save <- seq(5, iterations, by = 5)
list(
timesteps = rep(dt, iterations),
store_result = TRUE,
out_save = out_save
)

131
bench/dolo/dolo_interp.R Normal file
View File

@ -0,0 +1,131 @@
rows <- 400
cols <- 200
grid_def <- matrix(2, nrow = rows, ncol = cols)
# Define grid configuration for POET model
grid_setup <- list(
pqc_in_file = "./dol.pqi",
pqc_db_file = "./phreeqc_kin.dat", # Path to the database file for Phreeqc
grid_def = grid_def, # Definition of the grid, containing IDs according to the Phreeqc input script
grid_size = c(2.5, 5), # Size of the grid in meters
constant_cells = c() # IDs of cells with constant concentration
)
bound_def_we <- list(
"type" = rep("constant", rows),
"sol_id" = rep(1, rows),
"cell" = seq(1, rows)
)
bound_def_ns <- list(
"type" = rep("constant", cols),
"sol_id" = rep(1, cols),
"cell" = seq(1, cols)
)
diffusion_setup <- list(
boundaries = list(
"W" = bound_def_we,
"E" = bound_def_we,
"N" = bound_def_ns,
"S" = bound_def_ns
),
inner_boundaries = list(
"row" = floor(rows / 2),
"col" = floor(cols / 2),
"sol_id" = c(3)
),
alpha_x = 1e-6,
alpha_y = 1e-6
)
check_sign_cal_dol_dht <- function(old, new) {
# if ((old["Calcite"] == 0) != (new["Calcite"] == 0)) {
# return(TRUE)
# }
# if ((old["Dolomite"] == 0) != (new["Dolomite"] == 0)) {
# return(TRUE)
# }
return(FALSE)
}
# fuzz_input_dht_keys <- function(input) {
# dht_species <- c(
# "H" = 3,
# "O" = 3,
# "Charge" = 3,
# "C" = 6,
# "Ca" = 6,
# "Cl" = 3,
# "Mg" = 5,
# "Calcite" = 4,
# "Dolomite" = 4
# )
# return(input[names(dht_species)])
# }
check_sign_cal_dol_interp <- function(to_interp, data_set) {
dht_species <- c(
"H" = 3,
"O" = 3,
"Charge" = 3,
"C" = 6,
"Ca" = 6,
"Cl" = 3,
"Mg" = 5,
"Calcite" = 4,
"Dolomite" = 4
)
data_set <- as.data.frame(do.call(rbind, data_set), check.names = FALSE, optional = TRUE)
names(data_set) <- names(dht_species)
cal <- (data_set$Calcite == 0) == (to_interp["Calcite"] == 0)
dol <- (data_set$Dolomite == 0) == (to_interp["Dolomite"] == 0)
cal_dol_same_sig <- cal == dol
return(rev(which(!cal_dol_same_sig)))
}
check_neg_cal_dol <- function(result) {
neg_sign <- (result["Calcite"] < 0) || (result["Dolomite"] < 0)
return(neg_sign)
}
# Optional when using Interpolation (example with less key species and custom
# significant digits)
pht_species <- c(
"C" = 3,
"Ca" = 3,
"Mg" = 2,
"Calcite" = 2,
"Dolomite" = 2
)
chemistry_setup <- list(
dht_species = c(
"H" = 3,
"O" = 3,
"Charge" = 3,
"C" = 6,
"Ca" = 6,
"Cl" = 3,
"Mg" = 5,
"Calcite" = 4,
"Dolomite" = 4
),
pht_species = pht_species,
hooks = list(
dht_fill = check_sign_cal_dol_dht,
# dht_fuzz = fuzz_input_dht_keys,
interp_pre = check_sign_cal_dol_interp,
interp_post = check_neg_cal_dol
)
)
# Define a setup list for simulation configuration
setup <- list(
Grid = grid_setup, # Parameters related to the grid structure
Diffusion = diffusion_setup, # Parameters related to the diffusion process
Chemistry = chemistry_setup # Parameters related to the chemistry process
)

View File

@ -1,204 +0,0 @@
## Time-stamp: "Last modified 2023-08-16 14:57:25 mluebke"
database <- normalizePath("../share/poet/bench/dolo/phreeqc_kin.dat")
input_script <- normalizePath("../share/poet/bench/dolo/dolo_inner.pqi")
#################################################################
## Section 1 ##
## Grid initialization ##
#################################################################
n <- 400
m <- 200
types <- c("scratch", "phreeqc", "rds")
init_cell <- list(
"H" = 110.683,
"O" = 55.3413,
"Charge" = -5.0822e-19,
"C" = 1.2279E-4,
"Ca" = 1.2279E-4,
"Cl" = 0,
"Mg" = 0,
"O2g" = 0.499957,
"Calcite" = 2.07e-4,
"Dolomite" = 0
)
grid <- list(
n_cells = c(n, m),
s_cells = c(5, 2.5),
type = types[1]
)
##################################################################
## Section 2 ##
## Diffusion parameters and boundary conditions ##
##################################################################
## initial conditions
init_diffu <- list(
"H" = 1.110124E+02,
"O" = 5.550833E+01,
"Charge" = -1.216307659761E-09,
"C(4)" = 1.230067028174E-04,
"Ca" = 1.230067028174E-04,
"Cl" = 0,
"Mg" = 0
)
## diffusion coefficients
alpha_diffu <- c(
"H" = 1E-6,
"O" = 1E-6,
"Charge" = 1E-6,
"C(4)" = 1E-6,
"Ca" = 1E-6,
"Cl" = 1E-6,
"Mg" = 1E-6
)
## list of boundary conditions/inner nodes
vecinj_diffu <- list(
list(
"H" = 1.110124E+02,
"O" = 5.550796E+01,
"Charge" = -3.230390327801E-08,
"C(4)" = 0,
"Ca" = 0,
"Cl" = 0.002,
"Mg" = 0.001
),
list(
"H" = 110.683,
"O" = 55.3413,
"Charge" = 1.90431e-16,
"C(4)" = 0,
"Ca" = 0.0,
"Cl" = 0.004,
"Mg" = 0.002
),
init_diffu
)
vecinj_inner <- list(
l1 = c(1, floor(n / 2), floor(m / 2))
# l2 = c(2,1400,800),
# l3 = c(2,1600,800)
)
boundary <- list(
# "N" = c(1, rep(0, n-1)),
"N" = rep(3, n),
"E" = rep(3, m),
"S" = rep(3, n),
"W" = rep(3, m)
)
diffu_list <- names(alpha_diffu)
vecinj <- do.call(rbind.data.frame, vecinj_diffu)
names(vecinj) <- names(init_diffu)
diffusion <- list(
init = as.data.frame(init_diffu, check.names = FALSE),
vecinj = vecinj,
vecinj_inner = vecinj_inner,
vecinj_index = boundary,
alpha = alpha_diffu
)
#################################################################
## Section 3 ##
## Chemistry module (Phreeqc) ##
#################################################################
## # optional when using DHT
dht_species <- c(
"H" = 3,
"O" = 3,
"Charge" = 3,
"C(4)" = 6,
"Ca" = 6,
"Cl" = 3,
"Mg" = 5,
"Calcite" = 4,
"Dolomite" = 4
)
## # Optional when using Interpolation (example with less key species and custom
## # significant digits)
# pht_species <- c(
# "C(4)" = 3,
# "Ca" = 3,
# "Mg" = 2,
# "Calcite" = 2,
# "Dolomite" = 2
# )
check_sign_cal_dol_dht <- function(old, new) {
if ((old["Calcite"] == 0) != (new["Calcite"] == 0)) {
return(TRUE)
}
if ((old["Dolomite"] == 0) != (new["Dolomite"] == 0)) {
return(TRUE)
}
return(FALSE)
}
fuzz_input_dht_keys <- function(input) {
return(input[names(dht_species)])
}
check_sign_cal_dol_interp <- function(to_interp, data_set) {
data_set <- as.data.frame(do.call(rbind, data_set), check.names = FALSE, optional = TRUE)
names(data_set) <- names(dht_species)
cal <- (data_set$Calcite == 0) == (to_interp["Calcite"] == 0)
dol <- (data_set$Dolomite == 0) == (to_interp["Dolomite"] == 0)
cal_dol_same_sig <- cal == dol
return(rev(which(!cal_dol_same_sig)))
}
check_neg_cal_dol <- function(result) {
neg_sign <- (result["Calcite"] <- 0) || (result["Dolomite"] < 0)
return(any(neg_sign))
}
hooks <- list(
dht_fill = check_sign_cal_dol_dht,
dht_fuzz = fuzz_input_dht_keys,
interp_pre_func = check_sign_cal_dol_interp,
interp_post_func = check_neg_cal_dol
)
chemistry <- list(
database = database,
input_script = input_script,
dht_species = dht_species,
hooks = hooks
# pht_species = pht_species
)
#################################################################
## Section 4 ##
## Putting all those things together ##
#################################################################
iterations <- 20000
dt <- 200
setup <- list(
grid = grid,
diffusion = diffusion,
chemistry = chemistry,
iterations = iterations,
timesteps = rep(dt, iterations),
store_result = TRUE,
out_save = c(1, seq(50, iterations, by = 50))
)

View File

@ -0,0 +1,10 @@
iterations <- 20000
dt <- 200
out_save <- seq(50, iterations, by = 50)
list(
timesteps = rep(dt, iterations),
store_result = TRUE,
out_save = out_save
)

Binary file not shown.

View File

@ -0,0 +1,102 @@
% Created 2024-12-11 mer 23:24
% Intended LaTeX compiler: pdflatex
\documentclass[a4paper, 9pt]{article}
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
\usepackage{graphicx}
\usepackage{longtable}
\usepackage{wrapfig}
\usepackage{rotating}
\usepackage[normalem]{ulem}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{capt-of}
\usepackage{hyperref}
\usepackage{fullpage}
\usepackage{amsmath}
\usepackage{graphicx}
\usepackage{charter}
\usepackage{listings}
\lstloadlanguages{R}
\author{MDL <delucia@gfz.de>}
\date{2024-12-11}
\title{A \texttt{barite}-based benchmark for FGCS interpolation paper}
\begin{document}
\maketitle
\section{Description}
\label{sec:org739879a}
\begin{itemize}
\item \texttt{barite\_fgcs\_2.R}: POET input script with circular
"crystals" on a 200x200 nodes grid
\item \(\alpha\): isotropic 10\textsuperscript{-5}
m\textsuperscript{2}/s outside of the crystals,
10\textsuperscript{-7} inside
\item 200 iterations, dt = 1000
\item \texttt{barite\_fgcs\_2.pqi}: PHREEQC input, 4 SOLUTIONS
(basically the same as in \texttt{barite} benchmark):
\begin{enumerate}
\item Equilibrium with Celestite, no mineral \(Rightarrow\)
\item Equilibrium with Celestite, KINETICS Celestite (1 mol) and
Barite (0 mol)
\item Injection of 0.1 BaCl2 from NW corner
\item Injection of 0.2 BaCl2 from SE corner
\end{enumerate}
\item \texttt{db\_barite.dat}: PHREEQC database containing the kinetic
expressions for barite and celestite, stripped down from
\texttt{phreeqc.dat}
\end{itemize}
\begin{figure}[htbp]
\centering
\includegraphics[width=0.48\textwidth]{./fgcs_Celestite_init.pdf}
\includegraphics[width=0.48\textwidth]{./fgcs_Barite_200.pdf}
\caption{\textbf{Left:} Initial distribution of Celestite
"crystals". \textbf{Right:} precipitated Barite}
\end{figure}
\section{Interpolation}
\label{sec:org2a09431}
Using the following parametrization:
\begin{lstlisting}
dht_species <- c("H" = 7,
"O" = 7,
"Ba" = 7,
"Cl" = 7,
"S(6)" = 7,
"Sr" = 7,
"Barite" = 4,
"Celestite" = 4)
pht_species <- c("Ba" = 4,
"Cl" = 3,
"S(6)" = 3,
"Sr" = 3,
"Barite" = 2,
"Celestite" = 2 )
\end{lstlisting}
Runtime goes from 1800 to 600 s (21 CPUs) but there are "suspect"
errors especially in O and H, where "suspect" means some values appear
to be multiplied by 2:
\begin{figure}[htbp]
\centering
\includegraphics[width=0.9\textwidth]{./fgcs_interp_1.pdf}
\caption{Scatterplots reference vs interpolated after 1 coupling
iteration}
\end{figure}
\end{document}
%%% Local Variables:
%%% mode: LaTeX
%%% TeX-master: t
%%% End:

90
bench/fgcs/EvalFGCS.R Normal file
View File

@ -0,0 +1,90 @@
## Time-stamp: "Last modified 2024-12-11 23:21:25 delucia"
library(PoetUtils)
library(viridis)
res <- ReadPOETSims("./res_fgcs2_96/")
pp <- PlotField(res$iter_200$C$Barite, rows = 200, cols = 200, contour = FALSE,
nlevels=12, palette=terrain.colors)
cairo_pdf("fgcs_Celestite_init.pdf", family="serif")
par(mar=c(0,0,0,0))
pp <- PlotField((res$iter_000$Celestite), rows = 200, cols = 200,
contour = FALSE, breaks=c(-0.5,0.5,1.5),
palette = grey.colors, plot.axes = FALSE, scale = FALSE,
main="Initial Celestite crystals")
dev.off()
cairo_pdf("fgcs_Ba_init.pdf", family="serif")
par(mar=c(0,0,0,0))
pp <- PlotField(log10(res$iter_001$C$Cl), rows = 200, cols = 200,
contour = FALSE,
palette = terrain.colors, plot.axes = FALSE, scale = FALSE,
main="log10(Ba)")
dev.off()
pp <- PlotField(log10(res$iter_002$C$Ba), rows = 200, cols = 200,
contour = FALSE, palette = viridis, rev.palette = FALSE,
main = "log10(Ba) after 5 iterations")
pp <- PlotField(log10(res$iter_200$C$`S(6)`), rows = 200, cols = 200, contour = FALSE)
str(res$iter_00)
res$iter_178$C$Barite
pp <- res$iter_043$C$Barite
breaks <- pretty(pp, n = 5)
br <- c(0, 0.0005, 0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1)
pp <- PlotField(res$iter_200$C$Barite, rows = 200, cols = 200, contour = FALSE,
breaks = br, palette=terrain.colors)
cairo_pdf("fgcs_Barite_200.pdf", family="serif")
pp <- PlotField(log10(res$iter_200$C$Barite), rows = 200, cols = 200,
contour = FALSE, palette = terrain.colors, plot.axes = FALSE,
rev.palette = FALSE, main = "log10(Barite) after 200 iter")
dev.off()
ref <- ReadPOETSims("./res_fgcs_2_ref")
rei <- ReadPOETSims("./res_fgcs_2_interp1/")
timref <- ReadRObj("./res_fgcs_2_ref/timings.qs")
timint <- ReadRObj("./res_fgcs_2_interp1/timings.qs")
timref
timint
wch <- c("H","O", "Ba", "Sr","Cl", "S(6)")
rf <- data.matrix(ref$iter_001$C[, wch])
r1 <- data.matrix(rei$iter_001$C[, wch])
r1[is.nan(r1)] <- NA
rf[is.nan(rf)] <- NA
cairo_pdf("fgcs_interp_1.pdf", family="serif", width = 10, height = 7)
PlotScatter(rf, r1, which = wch, labs = c("ref", "interp"), cols = 3, log="", las = 1, pch=4)
dev.off()
head(r1)
head(rf)
rf$O
r1$O

2
bench/fgcs/README.org Normal file
View File

@ -0,0 +1,2 @@
* Refer to the LaTeX file (and pdf) for more information

105
bench/fgcs/barite_fgcs_2.R Normal file
View File

@ -0,0 +1,105 @@
## Time-stamp: "Last modified 2024-12-11 16:08:11 delucia"
cols <- 1000
rows <- 1000
dim_cols <- 50
dim_rows <- 50
ncirc <- 20 ## number of crystals
rmax <- cols / 10 ## max radius (in nodes)
set.seed(22933)
centers <- cbind(sample(seq_len(cols), ncirc), sample(seq_len(rows), ncirc))
radii <- sample(seq_len(rmax), ncirc, replace = TRUE)
mi <- matrix(rep(seq_len(cols), rows), byrow = TRUE, nrow = rows)
mj <- matrix(rep(seq_len(cols), each = rows), byrow = TRUE, nrow = rows)
tmpl <- lapply(seq_len(ncirc), function(x) which((mi - centers[x, 1])^2 + (mj - centers[x, 2])^2 < radii[x]^2, arr.ind = TRUE))
inds <- do.call(rbind, tmpl)
grid <- matrix(1, nrow = rows, ncol = cols)
grid[inds] <- 2
alpha <- matrix(1e-5, ncol = cols, nrow = rows)
alpha[inds] <- 1e-7
## image(grid, asp=1)
## Define grid configuration for POET model
grid_setup <- list(
pqc_in_file = "./barite_fgcs_2.pqi",
pqc_db_file = "../barite/db_barite.dat", ## database file
grid_def = grid, ## grid definition, IDs according to the Phreeqc input
grid_size = c(dim_cols, dim_rows), ## grid size in meters
constant_cells = c() ## IDs of cells with constant concentration
)
bound_length <- cols / 10
bound_N <- list(
"type" = rep("constant", bound_length),
"sol_id" = rep(3, bound_length),
"cell" = seq(1, bound_length)
)
bound_W <- list(
"type" = rep("constant", bound_length),
"sol_id" = rep(3, bound_length),
"cell" = seq(1, bound_length)
)
bound_E <- list(
"type" = rep("constant", bound_length),
"sol_id" = rep(4, bound_length),
"cell" = seq(rows - bound_length + 1, rows)
)
bound_S <- list(
"type" = rep("constant", bound_length),
"sol_id" = rep(4, bound_length),
"cell" = seq(cols - bound_length + 1, cols)
)
diffusion_setup <- list(
boundaries = list(
"W" = bound_W,
"N" = bound_N,
"E" = bound_E,
"S" = bound_S
),
alpha_x = alpha,
alpha_y = alpha
)
dht_species <- c(
"H" = 7,
"O" = 7,
"Ba" = 7,
"Cl" = 7,
"S" = 7,
"Sr" = 7,
"Barite" = 4,
"Celestite" = 4
)
pht_species <- c(
"Ba" = 4,
"Cl" = 3,
"S" = 3,
"Sr" = 3,
"Barite" = 0,
"Celestite" = 0
)
chemistry_setup <- list(
dht_species = dht_species,
pht_species = pht_species
)
## Define a setup list for simulation configuration
setup <- list(
Grid = grid_setup, ## Parameters related to the grid structure
Diffusion = diffusion_setup, ## Parameters related to the diffusion process
Chemistry = chemistry_setup
)

View File

@ -0,0 +1,49 @@
SOLUTION 1
units mol/kgw
water 1
temperature 25
pH 7.008
pe 10.798
S 6.205e-04
Sr 6.205e-04
END
SOLUTION 2
units mol/kgw
water 1
temperature 25
pH 7.008
pe 10.798
S 6.205e-04
Sr 6.205e-04
KINETICS 2
Barite
-m 0.00
-parms 50. # reactive surface area
-tol 1e-9
Celestite
-m 1
-parms 10.0 # reactive surface area
-tol 1e-9
END
SOLUTION 3
units mol/kgw
water 1
temperature 25
Ba 0.1
Cl 0.2
END
SOLUTION 4
units mol/kgw
water 1
temperature 25
Ba 0.2
Cl 0.4
END
RUN_CELLS
-cells 1 2 3 4
END

View File

@ -0,0 +1,7 @@
iterations <- 200
dt <- 1000
list(
timesteps = rep(dt, iterations),
store_result = TRUE
)

View File

@ -1,9 +1,20 @@
install(FILES
ExBase.pqi
ex.R
surfex.R
SurfExBase.pqi
SMILE_2021_11_01_TH.dat
DESTINATION
share/poet/bench/surfex
set(bench_files
# surfex.R
# ex.R
PoetEGU_surfex_500.R
)
set(runtime_files
# surfex_rt.R
# ex_rt.R
PoetEGU_surfex_500_rt.R
)
ADD_BENCH_TARGET(
surfex_bench
bench_files
runtime_files
"surfex"
)
add_dependencies(${BENCHTARGET} surfex_bench)

View File

@ -37,3 +37,27 @@ EXCHANGE 1
Z 0.0012585
Y 0.0009418
END
SOLUTION 2
temp 13
units mol/kgw
C(-4) 2.92438561098248e-21
C(4) 2.65160558871092e-06
Ca 2.89001071336443e-05
Cl 0.000429291158114428
Fe(2) 1.90823391198114e-07
Fe(3) 3.10832423034763e-12
H(0) 2.7888235127385e-15
K 2.5301787e-06
Mg 2.31391999937907e-05
Na 0.00036746969
S(-2) 1.01376078438546e-14
S(2) 1.42247026981542e-19
S(4) 9.49422092568557e-18
S(6) 2.19812504654191e-05
Sr 6.01218519999999e-07
U(4) 4.82255946569383e-12
U(5) 5.49050615347901e-13
U(6) 1.32462838991902e-09
END

View File

@ -0,0 +1,40 @@
rows <- 500
cols <- 200
grid_left <- matrix(1, nrow = rows, ncol = cols/2)
grid_rght <- matrix(2, nrow = rows, ncol = cols/2)
grid_def <- cbind(grid_left, grid_rght)
# Define grid configuration for POET model
grid_setup <- list(
pqc_in_file = "./SurfexEGU.pqi",
pqc_db_file = "./SMILE_2021_11_01_TH.dat", # Path to the database file for Phreeqc
grid_def = grid_def, # Definition of the grid, containing IDs according to the Phreeqc input script
grid_size = c(10, 4), # Size of the grid in meters
constant_cells = c() # IDs of cells with constant concentration
)
bound_def <- list(
"type" = rep("constant", cols),
"sol_id" = rep(3, cols),
"cell" = seq(1, cols)
)
diffusion_setup <- list(
boundaries = list(
"N" = bound_def
),
alpha_x = matrix(runif(rows*cols))*1e-8,
alpha_y = matrix(runif(rows*cols))*1e-9## ,1e-10
)
chemistry_setup <- list()
# Define a setup list for simulation configuration
setup <- list(
Grid = grid_setup, # Parameters related to the grid structure
Diffusion = diffusion_setup, # Parameters related to the diffusion process
Chemistry = chemistry_setup # Parameters related to the chemistry process
)

View File

@ -0,0 +1,11 @@
iterations <- 200
dt <- 1000
out_save <- c(1, 2, seq(5, iterations, by=5))
## out_save <- seq(1, iterations)
list(
timesteps = rep(dt, iterations),
store_result = TRUE,
out_save = out_save
)

100
bench/surfex/README.org Normal file
View File

@ -0,0 +1,100 @@
#+TITLE: Description of =surfex= benchmark
#+AUTHOR: MDL <delucia@gfz-potsdam.de>
#+DATE: 2023-08-26
#+STARTUP: inlineimages
#+LATEX_CLASS_OPTIONS: [a4paper,9pt]
#+LATEX_HEADER: \usepackage{fullpage}
#+LATEX_HEADER: \usepackage{amsmath, systeme}
#+LATEX_HEADER: \usepackage{graphicx}
#+LATEX_HEADER: \usepackage{charter}
#+OPTIONS: toc:nil
* Quick start
#+begin_src sh :language sh :frame single
mpirun -np 4 ./poet ex.R ex_res
mpirun -np 4 ./poet surfex.R surfex_res
#+end_src
* List of Files
- =ex.R=: POET input script for a 100x100 simulation grid, only
exchange
- =ExBase.pqi=: PHREEQC input script for the =ex.R= model
- =surfex.R=: POET input script for a 1000x1000 simulation grid
considering both cation exchange and surface complexation
- =SurfExBase.pqi=: PHREEQC input script for the =surfex.R= model
- =SMILE_2021_11_01_TH.dat=: PHREEQC database containing the
parametrized data for Surface and Exchange, based on the SMILE
Thermodynamic Database (Version 01-November-2021)
* Chemical system
This model describes migration of Uranium radionuclide in Opalinus
clay subject to surface complexation and cation exchange on the
surface of clay minerals. These two processes account for the binding
of aqueous complexes to the surfaces of minerals, which may have a
significant impact on safety of underground nuclear waste repository.
Namely, they can act as retardation buffer for uranium complexes
entering into a natural system. The system is kindly provided by Dr.
T. Hennig and is inspired to the sandy facies BWS-A3 sample from the
Mont Terri underground lab (Hennig and Kühn, 2021).
This chemical system is highly redox-sensitive, and several elements
are defined in significant amounts in different valence states. In
total, 20 elemental concentrations and valences are transported:
C(-4), C(4), Ca, Cl, Fe(2), Fe(3), K, Mg, Na, S(-2), S(2), S(4), S(6),
Sr , U(4), U(5), U(6); plus the total H, total O and Charge implicitly
required by PHREEQC_RM.
** Exchange
The SMILE database defines thermodynamical data for exchange of all
major cations and uranyl-ions on Illite and Montmorillonite. In
PHREEQC terms:
- *Y* for Montmorillonite, with a total amount of 1.2585
milliequivalents and
- *Z* for Illite, with a total amount of 0.9418 meq
** Surface
Here we consider a Donnan diffuse double layer of 0.49 nm. Six
distinct sorption sites are defined:
- Kln_aOH (aluminol site) and Kln_siOH (silanol) for Kaolinite
- For Illite, strong and weak sites Ill_sOH and Ill_wOH respectively
- For Montmorillonite, strong and weak sites Mll_sOH and Mll_wOH
respectively
Refer to the =SurfExBase.pqi= script for the actual numerical values
of the parameters.
* POET simulations
** =ex.R=
This benchmark only considers EXCHANGE, no mineral or SURFACE
complexation is involved.
- Grid discretization: square domain of 1 \cdot 1 m^{2} discretized in
100x100 cells
- Boundary conditions: E, S and W sides of the domain are closed.
*Fixed concentrations* are fixed at the N boundary.
- Diffusion coefficients: isotropic homogeneous \alpha = 1E-06
- Time steps & iterations: 10 iterations with \Delta t of 200 s
- *DHT* is not implemented as of yet for models including SURFACE and
EXCHANGE geochemical processes *TODO*
- Hooks: no hooks defined *TODO*
** =surfex.R=
- Grid discretization: rectangular domain of 1 \cdot 1 m^{2}
discretized in 10 \times 10 cells
- Boundary conditions: E, S and W sides of the domain are closed.
*Fixed concentrations* are fixed at the N boundary.
- Diffusion coefficients: isotropic homogeneous \alpha = 1E-06
- Time steps & iterations: 10 iterations with \Delta t of 200 s
* References
- Hennig, T.; Kühn, M.Surrogate Model for Multi-Component Diffusion of
Uranium through Opalinus Clay on the Host Rock Scale. Appl. Sci.
2021, 11, 786. https://doi.org/10.3390/app11020786

View File

@ -54,3 +54,27 @@ EXCHANGE 1
Z 0.0012585
Y 0.0009418
END
SOLUTION 2
temp 13
units mol/kgw
C(-4) 2.92438561098248e-21
C(4) 2.65160558871092e-06
Ca 2.89001071336443e-05
Cl 0.000429291158114428
Fe(2) 1.90823391198114e-07
Fe(3) 3.10832423034763e-12
H(0) 2.7888235127385e-15
K 2.5301787e-06
Mg 2.31391999937907e-05
Na 0.00036746969
S(-2) 1.01376078438546e-14
S(2) 1.42247026981542e-19
S(4) 9.49422092568557e-18
S(6) 2.19812504654191e-05
Sr 6.01218519999999e-07
U(4) 4.82255946569383e-12
U(5) 5.49050615347901e-13
U(6) 1.32462838991902e-09
END

108
bench/surfex/SurfexEGU.pqi Normal file
View File

@ -0,0 +1,108 @@
## Time-stamp: "Last modified 2024-04-12 10:59:59 delucia"
## KNOBS
## -logfile false
## -iterations 10000
## -convergence_tolerance 1E-12
## -step_size 2
## -pe_step_size 2
SOLUTION 1 ## Porewater composition Opalinus Clay, WITHOUT radionuclides, AFTER EQUI_PHASES
pe -2.627 ## Eh = -227 mV, Value from Bossart & Thury (2008)-> PC borehole measurement 2003, Eh still decreasing
density 1.01583 ## kg/dm³ = g/cm³
temp 13 ## mean temperature Mont Terri, Bossart & Thury (2008), calculations performed for 25°C
units mol/kgw
## Mean composition
pH 7.064
Na 2.763e-01
Cl 3.228e-01 charge
S(6) 1.653e-02 as SO4
Ca 2.173e-02
Mg 1.740e-02
K 1.902e-03
Sr 4.520e-04
Fe 1.435e-04
U 2.247e-09
SURFACE 1 Opalinus Clay, clay minerals
## calculated with rho_b=2.2903 kg/dm³, poro=0.1662
## 1 dm³ = 13.565641 kg_sed/kg_pw
-equil 1 ## equilibrate with solution 1
-sites_units density ## set unit for binding site density to sites/nm2
-donnan 4.9e-10 ## calculated after Wigger & Van Loon (2018) for ionic strength after equilibration with minerales for pCO2=2.2 log10 bar
# surface density SSA (m2/g) mass (g/kgw)
Kln_aOH 1.155 11. 3798.4 ## Kaolinite 28 wt% (aluminol and silanol sites)
Kln_siOH 1.155
Ill_sOH 0.05 100. 4205.35 ## Illite 31 wt% (weak und strong binding sites)
Ill_wOH 2.26 ## 2 % strong binding sites
Mll_sOH 0.05 100. 813.94 ## Montmorillonite = smektite = 6 wt% (weak und strong binding sites)
Mll_wOH 2.26 ## 2 % strong binding sites
EXCHANGE 1 Exchanger, only illite+montmorillonite
## Illite = 0.225 eq/kg_rock, Montmorillonit = 0.87 eq/kg_rock
-equil 1 ## equilibrate with solution 1
Z 0.9462 ## = Illite
Y 0.70813 ## = Montmorillonite
END
SOLUTION 2 ## Porewater composition Opalinus Clay, WITHOUT radionuclides, AFTER EQUI_PHASES
pe -2.627 ## Eh = -227 mV, Value from Bossart & Thury (2008)-> PC borehole measurement 2003, Eh still decreasing
density 1.01583 ## kg/dm³ = g/cm³
temp 13 ## mean temperature Mont Terri, Bossart & Thury (2008), calculations performed for 25°C
units mol/kgw
## Mean composition
pH 7.064
Na 2.763e-01
Cl 3.228e-01 charge
S(6) 1.653e-02 as SO4
Ca 2.173e-02
Mg 1.740e-02
K 1.902e-03
Sr 4.520e-04
Fe 1.435e-04
U 2.247e-09
SURFACE 2 Opalinus Clay, clay minerals
-equil 2 ## equilibrate with solution 2
-sites_units density ## set unit for binding site density to
## sites/nm2
-donnan 4.9e-10 ## calculated after Wigger & Van Loon (2018)
## for ionic strength after equilibration
## with minerales for pCO2=2.2 log10 bar
## surface density SSA (m2/g) mass (g/kgw)
Kln_aOH 1.155 11. 2798.4 ## Kaolinite 28 wt% (aluminol and silanol sites)
Kln_siOH 1.155
Ill_sOH 0.05 100. 1205.35 ## Illite 31 wt% (weak und strong binding sites)
Ill_wOH 2.26 ## 2 % strong binding sites
Mll_sOH 0.05 100. 113.94 ## Montmorillonite = smektite = 6 wt% (weak und strong binding sites)
Mll_wOH 2.26 ## 2 % strong binding sites
EXCHANGE 2 Exchanger, only illite+montmorillonite
## Illite = 0.225 eq/kg_rock, Montmorillonit = 0.87 eq/kg_rock
-equil 2 ## equilibrate with solution 1
Z 0.5 ## = Illite
Y 0.2 ## = Montmorillonite
END
SOLUTION 3
pe -2.627 ## Eh = -227 mV, Value from Bossart & Thury (2008)-> PC borehole measurement 2003, Eh still decreasing
density 1.01583 ## kg/dm³ = g/cm³
temp 13 ## mean temperature Mont Terri, Bossart & Thury (2008), calculations performed for 25°C
units mol/kgw
## Mean composition
pH 7.064
Na 3.763e-01
Cl 4.228e-01 charge
S(6) 1.653e-02 as SO4
Ca 2.173e-02
Mg 1.740e-02
K 1.902e-03
Sr 4.520e-04
Fe 1.435e-04
U 1e-6
C 1.991e-03
END
RUN_CELLS
END

View File

@ -1,140 +1,37 @@
## Time-stamp: "Last modified 2023-08-02 13:59:35 mluebke"
rows <- 100
cols <- 100
database <- normalizePath("./SMILE_2021_11_01_TH.dat")
input_script <- normalizePath("./ExBase.pqi")
grid_def <- matrix(1, nrow = rows, ncol = cols)
cat(paste(":: R This is a test 1\n"))
# Define grid configuration for POET model
grid_setup <- list(
pqc_in_file = "./SurfExBase.pqi",
pqc_db_file = "./SMILE_2021_11_01_TH.dat", # Path to the database file for Phreeqc
grid_def = grid_def, # Definition of the grid, containing IDs according to the Phreeqc input script
grid_size = c(1, 1), # Size of the grid in meters
constant_cells = c() # IDs of cells with constant concentration
)
#################################################################
## Section 1 ##
## Grid initialization ##
#################################################################
bound_def <- list(
"type" = rep("constant", cols),
"sol_id" = rep(2, cols),
"cell" = seq(1, cols)
)
n <- 100
m <- 100
types <- c("scratch", "phreeqc", "rds")
init_cell <- list(H = 1.476571028625e-01,
O = 7.392297218936e-02,
Charge = -1.765225732724e-18,
`C(-4)` = 2.477908970828e-21,
`C(4)` = 2.647623016916e-06,
Ca = 2.889623169138e-05,
Cl = 4.292806181039e-04,
`Fe(2)` =1.908142472666e-07,
`Fe(3)` =3.173306589931e-12,
`H(0)` =2.675642675119e-15,
K = 2.530134809667e-06,
Mg =2.313806319294e-05,
Na =3.674633059628e-04,
`S(-2)` = 8.589766637180e-15,
`S(2)` = 1.205284362720e-19,
`S(4)` = 9.108958772790e-18,
`S(6)` = 2.198092329098e-05,
Sr = 6.012080128154e-07,
`U(4)` = 1.039668623852e-14,
`U(5)` = 1.208394829796e-15,
`U(6)` = 2.976409147150e-12)
grid <- list(
n_cells = c(n, m),
s_cells = c(1, 1),
type = "scratch"
diffusion_setup <- list(
boundaries = list(
"N" = bound_def
),
alpha_x = 1e-6,
alpha_y = 1e-6
)
##################################################################
## Section 2 ##
## Diffusion parameters and boundary conditions ##
##################################################################
vecinj_diffu <- list(
list(H = 0.147659686316291,
O = 0.0739242798146046,
Charge = 7.46361643222701e-20,
`C(-4)` = 2.92438561098248e-21,
`C(4)` = 2.65160558871092e-06,
Ca = 2.89001071336443e-05,
Cl = 0.000429291158114428,
`Fe(2)` = 1.90823391198114e-07,
`Fe(3)` = 3.10832423034763e-12,
`H(0)` = 2.7888235127385e-15,
K = 2.5301787e-06,
Mg = 2.31391999937907e-05,
Na = 0.00036746969,
`S(-2)` = 1.01376078438546e-14,
`S(2)` = 1.42247026981542e-19,
`S(4)` = 9.49422092568557e-18,
`S(6)` = 2.19812504654191e-05,
Sr = 6.01218519999999e-07,
`U(4)` = 4.82255946569383e-12,
`U(5)` = 5.49050615347901e-13,
`U(6)` = 1.32462838991902e-09)
)
vecinj <- do.call(rbind.data.frame, vecinj_diffu)
names(vecinj) <- grid$props
## diffusion coefficients
alpha_diffu <- c(H = 1E-6, O = 1E-6, Charge = 1E-6, `C(-4)` = 1E-6,
`C(4)` = 1E-6, Ca = 1E-6, Cl = 1E-6, `Fe(2)` = 1E-6,
`Fe(3)` = 1E-6, `H(0)` = 1E-6, K = 1E-6, Mg = 1E-6,
Na = 1E-6, `S(-2)` = 1E-6, `S(2)` = 1E-6,
`S(4)` = 1E-6, `S(6)` = 1E-6, Sr = 1E-6,
`U(4)` = 1E-6, `U(5)` = 1E-6, `U(6)` = 1E-6)
## list of boundary conditions/inner nodes
## vecinj_inner <- list(
## list(1,1,1)
## )
boundary <- list(
"N" = rep(1, n),
"E" = rep(0, n),
"S" = rep(0, n),
"W" = rep(0, n)
)
diffu_list <- names(alpha_diffu)
vecinj <- do.call(rbind.data.frame, vecinj_diffu)
names(vecinj) <- names(init_cell)
diffusion <- list(
init = as.data.frame(init_cell, check.names = FALSE),
vecinj = vecinj,
# vecinj_inner = vecinj_inner,
vecinj_index = boundary,
alpha = alpha_diffu
)
#################################################################
## Section 3 ##
## Chemistry module (Phreeqc) ##
#################################################################
chemistry <- list(
database = database,
input_script = input_script
)
#################################################################
## Section 4 ##
## Putting all those things together ##
#################################################################
iterations <- 10
dt <- 200
chemistry_setup <- list()
# Define a setup list for simulation configuration
setup <- list(
grid = grid,
diffusion = diffusion,
chemistry = chemistry,
iterations = iterations,
timesteps = rep(dt, iterations),
store_result = TRUE
)
Grid = grid_setup, # Parameters related to the grid structure
Diffusion = diffusion_setup, # Parameters related to the diffusion process
Chemistry = chemistry_setup # Parameters related to the chemistry process
)

7
bench/surfex/ex_rt.R Normal file
View File

@ -0,0 +1,7 @@
iterations <- 10
dt <- 200
list(
timesteps = rep(dt, iterations),
store_result = TRUE
)

View File

@ -1,141 +1,37 @@
## Time-stamp: "Last modified 2023-08-02 13:59:44 mluebke"
rows <- 1000
cols <- 1000
database <- normalizePath("../share/poet/bench/surfex/SMILE_2021_11_01_TH.dat")
input_script <- normalizePath("../share/poet/bench/surfex/SurfExBase.pqi")
grid_def <- matrix(1, nrow = rows, ncol = cols)
cat(paste(":: R This is a test 1\n"))
# Define grid configuration for POET model
grid_setup <- list(
pqc_in_file = "./SurfExBase.pqi",
pqc_db_file = "./SMILE_2021_11_01_TH.dat", # Path to the database file for Phreeqc
grid_def = grid_def, # Definition of the grid, containing IDs according to the Phreeqc input script
grid_size = c(rows, cols) / 10, # Size of the grid in meters
constant_cells = c() # IDs of cells with constant concentration
)
#################################################################
## Section 1 ##
## Grid initialization ##
#################################################################
bound_def <- list(
"type" = rep("constant", cols),
"sol_id" = rep(2, cols),
"cell" = seq(1, cols)
)
n <- 10
m <- 10
types <- c("scratch", "phreeqc", "rds")
init_cell <- list(H = 1.476571028625e-01,
O = 7.392297218936e-02,
Charge = -1.765225732724e-18,
`C(-4)` = 2.477908970828e-21,
`C(4)` = 2.647623016916e-06,
Ca = 2.889623169138e-05,
Cl = 4.292806181039e-04,
`Fe(2)` =1.908142472666e-07,
`Fe(3)` =3.173306589931e-12,
`H(0)` =2.675642675119e-15,
K = 2.530134809667e-06,
Mg =2.313806319294e-05,
Na =3.674633059628e-04,
`S(-2)` = 8.589766637180e-15,
`S(2)` = 1.205284362720e-19,
`S(4)` = 9.108958772790e-18,
`S(6)` = 2.198092329098e-05,
Sr = 6.012080128154e-07,
`U(4)` = 1.039668623852e-14,
`U(5)` = 1.208394829796e-15,
`U(6)` = 2.976409147150e-12)
grid <- list(
n_cells = c(n, m),
s_cells = c(1, 1),
type = "scratch"
diffusion_setup <- list(
boundaries = list(
"N" = bound_def
),
alpha_x = 1e-6,
alpha_y = 1e-6
)
##################################################################
## Section 2 ##
## Diffusion parameters and boundary conditions ##
##################################################################
vecinj_diffu <- list(
list(H = 0.147659686316291,
O = 0.0739242798146046,
Charge = 7.46361643222701e-20,
`C(-4)` = 2.92438561098248e-21,
`C(4)` = 2.65160558871092e-06,
Ca = 2.89001071336443e-05,
Cl = 0.000429291158114428,
`Fe(2)` = 1.90823391198114e-07,
`Fe(3)` = 3.10832423034763e-12,
`H(0)` = 2.7888235127385e-15,
K = 2.5301787e-06,
Mg = 2.31391999937907e-05,
Na = 0.00036746969,
`S(-2)` = 1.01376078438546e-14,
`S(2)` = 1.42247026981542e-19,
`S(4)` = 9.49422092568557e-18,
`S(6)` = 2.19812504654191e-05,
Sr = 6.01218519999999e-07,
`U(4)` = 4.82255946569383e-12,
`U(5)` = 5.49050615347901e-13,
`U(6)` = 1.32462838991902e-09)
)
vecinj <- do.call(rbind.data.frame, vecinj_diffu)
names(vecinj) <- grid$props
## diffusion coefficients
alpha_diffu <- c(H = 1E-6, O = 1E-6, Charge = 1E-6, `C(-4)` = 1E-6,
`C(4)` = 1E-6, Ca = 1E-6, Cl = 1E-6, `Fe(2)` = 1E-6,
`Fe(3)` = 1E-6, `H(0)` = 1E-6, K = 1E-6, Mg = 1E-6,
Na = 1E-6, `S(-2)` = 1E-6, `S(2)` = 1E-6,
`S(4)` = 1E-6, `S(6)` = 1E-6, Sr = 1E-6,
`U(4)` = 1E-6, `U(5)` = 1E-6, `U(6)` = 1E-6)
## list of boundary conditions/inner nodes
## vecinj_inner <- list(
## list(1,1,1)
## )
boundary <- list(
"N" = rep(1, n),
"E" = rep(0, n),
"S" = rep(0, n),
"W" = rep(0, n)
)
diffu_list <- names(alpha_diffu)
vecinj <- do.call(rbind.data.frame, vecinj_diffu)
names(vecinj) <- names(init_cell)
diffusion <- list(
init = as.data.frame(init_cell, check.names = FALSE),
vecinj = vecinj,
# vecinj_inner = vecinj_inner,
vecinj_index = boundary,
alpha = alpha_diffu
)
#################################################################
## Section 3 ##
## Chemistry module (Phreeqc) ##
#################################################################
chemistry <- list(
database = database,
input_script = input_script
)
#################################################################
## Section 4 ##
## Putting all those things together ##
#################################################################
iterations <- 10
dt <- 200
chemistry_setup <- list()
# Define a setup list for simulation configuration
setup <- list(
grid = grid,
diffusion = diffusion,
chemistry = chemistry,
iterations = iterations,
timesteps = rep(dt, iterations),
store_result = TRUE,
out_save = c(5, iterations)
)
Grid = grid_setup, # Parameters related to the grid structure
Diffusion = diffusion_setup, # Parameters related to the diffusion process
Chemistry = chemistry_setup # Parameters related to the chemistry process
)

10
bench/surfex/surfex_rt.R Normal file
View File

@ -0,0 +1,10 @@
iterations <- 100
dt <- 200
out_save <- seq(5, iterations, by = 5)
list(
timesteps = rep(dt, iterations),
store_result = TRUE,
out_save = out_save
)

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 73 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 73 KiB

View File

@ -12,9 +12,8 @@ if(DOXYGEN_FOUND)
set(DOXYGEN_PROJECT_NUMBER ${POET_VERSION})
doxygen_add_docs(doxygen
${PROJECT_SOURCE_DIR}/include
${PROJECT_SOURCE_DIR}/src
${PROJECT_SOURCE_DIR}/README.md
${PROJECT_SOURCE_DIR}/docs/Input_Scripts.md
${PROJECT_SOURCE_DIR}/docs/Output.md
COMMENT "Generate html pages")
endif()

View File

@ -1,86 +0,0 @@
# Input Scripts
In the following the expected schemes of the input scripts is described.
Therefore, each section of the input script gets its own chapter. All sections
should return a `list` as results, which are concatenated to one setup list at
the end of the file. All values must have the same name in order to get parsed
by POET.
## Grid initialization
| name | type | description |
|----------------|----------------|-----------------------------------------------------------------------|
| `n_cells` | Numeric Vector | Number of cells in each direction |
| `s_cells` | Numeric Vector | Spatial resolution of grid in each direction |
| `type` | String | Type of initialization, can be set to *scratch*, *phreeqc* or *rds* |
## Diffusion parameters
| name | type | description |
|----------------|----------------------|-------------------------------------------|
| `init` | Named Numeric Vector | Initial state for each diffused species |
| `vecinj` | Data Frame | Defining all boundary conditions row wise |
| `vecinj_inner` | List of Triples | Inner boundaries |
| `vecinj_index` | List of 4 elements | Ghost nodes boundary conditions |
| `alpha` | Named Numeric Vector | Constant alpha for each species |
### Remark on boundary conditions
Each boundary condition should be defined in `vecinj` as a data frame, where one
row holds one boundary condition.
To define inner (constant) boundary conditions, use a list of triples in
`vecinj_inner`, where each triples is defined by $(i,x,y)$. $i$ is defining the
boundary condition, referencing to the row in `vecinj`. $x$ and $y$ coordinates
then defining the position inside the grid.
Ghost nodes are set by `vecinj_index` which is a list containing boundaries for
each celestial direction (**important**: named by `N, E, S, W`). Each direction
is a numeric vector, also representing a row index of the `vecinj` data frame
for each ghost node, starting at the left-most and upper cell respectively. By
setting the boundary condition to $0$, the ghost node is set as closed boundary.
#### Example
Suppose you have a `vecinj` data frame defining 2 boundary conditions and a grid
consisting of $10 \times 10$ grid cells. Grid cell $(1,1)$ should be set to the
first boundary condition and $(5,6)$ to the second. Also, all boundary
conditions for the ghost nodes should be closed. Except the southern boundary,
which should be set to the first boundary condition injection. The following
setup describes how to setup your initial script, where `n` and `m` are the
grids cell count for each direction ($n = m = 10$):
```R
vecinj_inner <- list (
l1 = c(1, 1, 1),
l2 = c(2, 5, 6)
)
vecinj_index <- list(
"N" = rep(0, n),
"E" = rep(0, m),
"S" = rep(1, n),
"W" = rep(0, m)
)
```
## Chemistry parameters
| name | type | description |
|----------------|--------------|----------------------------------------------------------------------------------|
| `database` | String | Path to the Phreeqc database |
| `input_script` | String | Path the the Phreeqc input script |
| `dht_species` | Named Vector | Indicates significant digits to use for each species for DHT rounding. |
| `pht_species` | Named Vector | Indicates significant digits to use for each species for Interpolation rounding. |
## Final setup
| name | type | description |
|----------------|----------------|------------------------------------------------------------|
| `grid` | List | Grid parameter list |
| `diffusion` | List | Diffusion parameter list |
| `chemistry` | List | Chemistry parameter list |
| `iterations` | Numeric Value | Count of iterations |
| `timesteps` | Numeric Vector | $\Delta t$ to use for specific iteration |
| `store_result` | Boolean | Indicates if results should be stored |
| `out_save` | Numeric Vector | *optional:* At which iteration the states should be stored |

View File

@ -35,34 +35,50 @@ corresponding values can be found in `<OUTPUT_DIRECTORY>/timings.rds`
and possible to read out within a R runtime with
`readRDS("timings.rds")`. There you will find the following values:
| Value | Description |
|--------------------|----------------------------------------------------------------------------|
| simtime | time spent in whole simulation loop without any initialization and cleanup |
| simtime\_transport | measured time in *transport* subroutine |
| simtime\_chemistry | measured time in *chemistry* subroutine (actual parallelized part) |
| Value | Description |
| --------- | -------------------------------------------------------------------------- |
| simtime | time spent in whole simulation loop without any initialization and cleanup |
| chemistry | measured time in *chemistry* subroutine |
| diffusion | measured time in *diffusion* subroutine |
### chemistry subsetting
### Chemistry subsetting
If running parallel there are also measured timings which are subsets of
*simtime\_chemistry*.
| Value | Description |
| ------------- | --------------------------------------------------------- |
| simtime | overall runtime of chemistry |
| loop | time spent in send/recv loop of master |
| sequential | sequential part of the master (e.g. shuffling field) |
| idle\_master | idling time of the master waiting for workers |
| idle\_worker | idling time (waiting for work from master) of the workers |
| phreeqc\_time | accumulated times for Phreeqc calls of every worker |
| Value | Description |
|-----------------------|-----------------------------------------------------------|
| chemistry\_loop | time spent in send/recv loop of master |
| chemistry\_sequential | sequential part of master chemistry |
| idle\_master | idling time (waiting for any free worker) of the master |
| idle\_worker | idling time (waiting for work from master) of the workers |
| phreeqc\_time | accumulated times for Phreeqc calls of every worker |
### DHT usage {#DHT-usage}
#### DHT usage
If running in parallel and with activated DHT, two more timings and also
some profiling about the DHT usage are given:
| Value | Description |
|-----------------|---------------------------------------------------------|
| dht\_fill\_time | time to write data to DHT |
| dht\_get\_time | time to retreive data from DHT |
| dh\_hits | count of data points retrieved from DHT |
| dht\_miss | count of misses/count of data points written to DHT |
| --------------- | ------------------------------------------------------- |
| dht\_hits | count of data points retrieved from DHT |
| dht\_evictions | count of data points evicted by another write operation |
| dht\_get\_time | time to retreive data from DHT |
| dht\_fill\_time | time to write data to DHT |
#### Interpolation
If using interpolation, the following values are given:
| Value | Description |
| -------------- | --------------------------------------------------------------------- |
| interp\_w | time spent to write to PHT |
| interp\_r | time spent to read from DHT/PHT/Cache |
| interp\_g | time spent to gather results from DHT |
| interp\_fc | accumulated time spent in interpolation function call |
| interp\_calls | count of interpolations |
| interp\_cached | count of interpolation data sets, which where cached in the local map |
### Diffusion subsetting
| Value | Description |
| --------- | ------------------------------------------ |
| simtime | overall runtime of diffusion |

281
docs/POET.drawio Normal file

File diff suppressed because one or more lines are too long

4
docs/POET_scheme.svg Normal file

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 813 KiB

602
docs/Scheme_POET_en.svg Normal file

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 127 KiB

@ -1 +0,0 @@
Subproject commit ae7a13539fb71f270b87eb2e874fbac80bc8dda2

1
ext/iphreeqc Submodule

@ -0,0 +1 @@
Subproject commit 6e727e2f896e853745b4dd123c5772a9b40ad705

@ -1 +0,0 @@
Subproject commit 6ed14c35322a245e3a9776ef262c0ac0eba3b301

@ -1 +1 @@
Subproject commit 25855da6b2930559b542bbadb16299932332d6a3
Subproject commit 449647010ab9cdf9e405139f360424a2b21ab3ab

7106
include/doctest/doctest.h Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,169 @@
#ifndef DOCTEST_MPI_H
#define DOCTEST_MPI_H
#ifdef DOCTEST_CONFIG_IMPLEMENT
#include "doctest/extensions/mpi_sub_comm.h"
#include "mpi_reporter.h"
#include <unordered_map>
namespace doctest {
// Each time a MPI_TEST_CASE is executed on N procs,
// we need a sub-communicator of N procs to execute it.
// It is then registered here and can be re-used
// by other tests that requires a sub-comm of the same size
std::unordered_map<int,mpi_sub_comm> sub_comms_by_size;
// Record if at least one MPI_TEST_CASE was registered "skipped"
// because there is not enought procs to execute it
int nb_test_cases_skipped_insufficient_procs = 0;
std::string thread_level_to_string(int thread_lvl);
int mpi_init_thread(int argc, char *argv[], int required_thread_support);
void mpi_finalize();
// Can be safely called before MPI_Init()
// This is needed for MPI_TEST_CASE because we use doctest::skip()
// to prevent execution of tests where there is not enough procs,
// but doctest::skip() is called during test registration, that is, before main(), and hence before MPI_Init()
int mpi_comm_world_size() {
#if defined(OPEN_MPI)
const char* size_str = std::getenv("OMPI_COMM_WORLD_SIZE");
#elif defined(I_MPI_VERSION) || defined(MPI_VERSION) // Intel MPI + MPICH (at least)
const char* size_str = std::getenv("PMI_SIZE"); // see https://community.intel.com/t5/Intel-oneAPI-HPC-Toolkit/Environment-variables-defined-by-intel-mpirun/td-p/1096703
#else
#error "Unknown MPI implementation: please submit an issue or a PR to doctest. Meanwhile, you can look at the output of e.g. `mpirun -np 3 env` to search for an environnement variable that contains the size of MPI_COMM_WORLD and extend this code accordingly"
#endif
if (size_str==nullptr) return 1; // not launched with mpirun/mpiexec, so assume only one process
return std::stoi(size_str);
}
// Record size of MPI_COMM_WORLD with mpi_comm_world_size()
int world_size_before_init = mpi_comm_world_size();
std::string thread_level_to_string(int thread_lvl) {
switch (thread_lvl) {
case MPI_THREAD_SINGLE: return "MPI_THREAD_SINGLE";
case MPI_THREAD_FUNNELED: return "MPI_THREAD_FUNNELED";
case MPI_THREAD_SERIALIZED: return "MPI_THREAD_SERIALIZED";
case MPI_THREAD_MULTIPLE: return "MPI_THREAD_MULTIPLE";
default: return "Invalid MPI thread level";
}
}
int mpi_init_thread(int argc, char *argv[], int required_thread_support) {
int provided_thread_support;
MPI_Init_thread(&argc, &argv, required_thread_support, &provided_thread_support);
int world_size;
MPI_Comm_size(MPI_COMM_WORLD,&world_size);
if (world_size_before_init != world_size) {
DOCTEST_INTERNAL_ERROR(
"doctest found "+std::to_string(world_size_before_init)+" MPI processes before `MPI_Init_thread`,"
" but MPI_COMM_WORLD is actually of size "+std::to_string(world_size)+".\n"
"This is most likely due to your MPI implementation not being well supported by doctest. Please report this issue on GitHub"
);
}
if (provided_thread_support!=required_thread_support) {
std::cout <<
"WARNING: " + thread_level_to_string(required_thread_support) + " was asked, "
+ "but only " + thread_level_to_string(provided_thread_support) + " is provided by the MPI library\n";
}
return provided_thread_support;
}
void mpi_finalize() {
// We need to destroy all created sub-communicators before calling MPI_Finalize()
doctest::sub_comms_by_size.clear();
MPI_Finalize();
}
} // doctest
#else // DOCTEST_CONFIG_IMPLEMENT
#include "doctest/extensions/mpi_sub_comm.h"
#include <unordered_map>
#include <exception>
namespace doctest {
extern std::unordered_map<int,mpi_sub_comm> sub_comms_by_size;
extern int nb_test_cases_skipped_insufficient_procs;
extern int world_size_before_init;
int mpi_comm_world_size();
int mpi_init_thread(int argc, char *argv[], int required_thread_support);
void mpi_finalize();
template<int nb_procs, class F>
void execute_mpi_test_case(F func) {
auto it = sub_comms_by_size.find(nb_procs);
if (it==end(sub_comms_by_size)) {
bool was_emplaced = false;
std::tie(it,was_emplaced) = sub_comms_by_size.emplace(std::make_pair(nb_procs,mpi_sub_comm(nb_procs)));
assert(was_emplaced);
}
const mpi_sub_comm& sub = it->second;
if (sub.comm != MPI_COMM_NULL) {
func(sub.rank,nb_procs,sub.comm,std::integral_constant<int,nb_procs>{});
};
}
inline bool
insufficient_procs(int test_nb_procs) {
static const int world_size = mpi_comm_world_size();
bool insufficient = test_nb_procs>world_size;
if (insufficient) {
++nb_test_cases_skipped_insufficient_procs;
}
return insufficient;
}
} // doctest
#define DOCTEST_MPI_GEN_ASSERTION(rank_to_test, assertion, ...) \
static_assert(rank_to_test<test_nb_procs_as_int_constant.value,"Trying to assert on a rank greater than the number of procs of the test!"); \
if(rank_to_test == test_rank) assertion(__VA_ARGS__)
#define DOCTEST_MPI_WARN(rank_to_test, ...) DOCTEST_MPI_GEN_ASSERTION(rank_to_test,DOCTEST_WARN,__VA_ARGS__)
#define DOCTEST_MPI_CHECK(rank_to_test, ...) DOCTEST_MPI_GEN_ASSERTION(rank_to_test,DOCTEST_CHECK,__VA_ARGS__)
#define DOCTEST_MPI_REQUIRE(rank_to_test, ...) DOCTEST_MPI_GEN_ASSERTION(rank_to_test,DOCTEST_REQUIRE,__VA_ARGS__)
#define DOCTEST_MPI_WARN_FALSE(rank_to_test, ...) DOCTEST_MPI_GEN_ASSERTION(rank_to_test,DOCTEST_WARN_FALSE,__VA_ARGS__)
#define DOCTEST_MPI_CHECK_FALSE(rank_to_test, ...) DOCTEST_MPI_GEN_ASSERTION(rank_to_test,DOCTEST_CHECK_FALSE,__VA_ARGS__)
#define DOCTEST_MPI_REQUIRE_FALSE(rank_to_test, ...) DOCTEST_MPI_GEN_ASSERTION(rank_to_test,DOCTEST_REQUIRE_FALSE,__VA_ARGS__)
#define DOCTEST_CREATE_MPI_TEST_CASE(name,nb_procs,func) \
static void func(DOCTEST_UNUSED int test_rank, DOCTEST_UNUSED int test_nb_procs, DOCTEST_UNUSED MPI_Comm test_comm, DOCTEST_UNUSED std::integral_constant<int,nb_procs>); \
TEST_CASE(name * doctest::description("MPI_TEST_CASE") * doctest::skip(doctest::insufficient_procs(nb_procs))) { \
doctest::execute_mpi_test_case<nb_procs>(func); \
} \
static void func(DOCTEST_UNUSED int test_rank, DOCTEST_UNUSED int test_nb_procs, DOCTEST_UNUSED MPI_Comm test_comm, DOCTEST_UNUSED std::integral_constant<int,nb_procs> test_nb_procs_as_int_constant)
// DOC: test_rank, test_nb_procs, and test_comm are available UNDER THESE SPECIFIC NAMES in the body of the unit test
// DOC: test_nb_procs_as_int_constant is equal to test_nb_procs, but as a compile time value
// (used in CHECK-like macros to assert the checked rank exists)
#define DOCTEST_MPI_TEST_CASE(name,nb_procs) \
DOCTEST_CREATE_MPI_TEST_CASE(name,nb_procs,DOCTEST_ANONYMOUS(DOCTEST_MPI_FUNC))
// == SHORT VERSIONS OF THE MACROS
#if !defined(DOCTEST_CONFIG_NO_SHORT_MACRO_NAMES)
#define MPI_WARN DOCTEST_MPI_WARN
#define MPI_CHECK DOCTEST_MPI_CHECK
#define MPI_REQUIRE DOCTEST_MPI_REQUIRE
#define MPI_WARN_FALSE DOCTEST_MPI_WARN_FALSE
#define MPI_CHECK_FALSE DOCTEST_MPI_CHECK_FALSE
#define MPI_REQUIRE_FALSE DOCTEST_MPI_REQUIRE_FALSE
#define MPI_TEST_CASE DOCTEST_MPI_TEST_CASE
#endif // DOCTEST_CONFIG_NO_SHORT_MACRO_NAMES
#endif // DOCTEST_CONFIG_IMPLEMENT
#endif // DOCTEST_MPI_H

View File

@ -0,0 +1,37 @@
//
// doctest_util.h - an accompanying extensions header to the main doctest.h header
//
// Copyright (c) 2016-2023 Viktor Kirilov
//
// Distributed under the MIT Software License
// See accompanying file LICENSE.txt or copy at
// https://opensource.org/licenses/MIT
//
// The documentation can be found at the library's page:
// https://github.com/doctest/doctest/blob/master/doc/markdown/readme.md
//
#ifndef DOCTEST_UTIL_H
#define DOCTEST_UTIL_H
#ifndef DOCTEST_LIBRARY_INCLUDED
#include "../doctest.h"
#endif
#include <memory>
#include <vector>
#include <string>
namespace doctest {
inline void applyCommandLine(doctest::Context& ctx, const std::vector<std::string>& args) {
auto doctest_args = std::make_unique<const char*[]>(args.size());
for (size_t i = 0; i < args.size(); ++i) {
doctest_args[i] = args[i].c_str();
}
ctx.applyCommandLine(args.size(), doctest_args.get());
}
} // namespace doctest
#endif // DOCTEST_UTIL_H

View File

@ -0,0 +1,271 @@
#ifndef DOCTEST_MPI_REPORTER_H
#define DOCTEST_MPI_REPORTER_H
// #include <doctest/doctest.h>
#include <fstream>
#include <string>
#include "mpi.h"
#include <vector>
#include <mutex>
namespace doctest {
extern int nb_test_cases_skipped_insufficient_procs;
int mpi_comm_world_size();
namespace {
// https://stackoverflow.com/a/11826666/1583122
struct NullBuffer : std::streambuf {
int overflow(int c) { return c; }
};
class NullStream : public std::ostream {
public:
NullStream()
: std::ostream(&nullBuff)
{}
private:
NullBuffer nullBuff = {};
};
static NullStream nullStream;
/* \brief Extends the ConsoleReporter of doctest
* Each process writes its results to its own file
* Intended to be used when a test assertion fails and the user wants to know exactly what happens on which process
*/
struct MpiFileReporter : public ConsoleReporter {
std::ofstream logfile_stream = {};
MpiFileReporter(const ContextOptions& co)
: ConsoleReporter(co,logfile_stream)
{
int rank = 0;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
std::string logfile_name = "doctest_" + std::to_string(rank) + ".log";
logfile_stream = std::ofstream(logfile_name.c_str(), std::fstream::out);
}
};
/* \brief Extends the ConsoleReporter of doctest
* Allows to manage the execution of tests in a parallel framework
* All results are collected on rank 0
*/
struct MpiConsoleReporter : public ConsoleReporter {
private:
static std::ostream& replace_by_null_if_not_rank_0(std::ostream* os) {
int rank = 0;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (rank==0) {
return *os;
} else {
return nullStream;
}
}
std::vector<std::pair<std::string, int>> m_failure_str_queue = {};
public:
MpiConsoleReporter(const ContextOptions& co)
: ConsoleReporter(co,replace_by_null_if_not_rank_0(co.cout))
{}
std::string file_line_to_string(const char* file, int line,
const char* tail = ""){
std::stringstream ss;
ss << skipPathFromFilename(file)
<< (opt.gnu_file_line ? ":" : "(")
<< (opt.no_line_numbers ? 0 : line) // 0 or the real num depending on the option
<< (opt.gnu_file_line ? ":" : "):") << tail;
return ss.str();
}
void test_run_end(const TestRunStats& p) override {
ConsoleReporter::test_run_end(p);
const bool anythingFailed = p.numTestCasesFailed > 0 || p.numAssertsFailed > 0;
// -----------------------------------------------------
// > Gather information in rank 0
int n_rank, rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &n_rank);
int g_numAsserts = 0;
int g_numAssertsFailed = 0;
int g_numTestCasesFailed = 0;
MPI_Reduce(&p.numAsserts , &g_numAsserts , 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(&p.numAssertsFailed , &g_numAssertsFailed , 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(&p.numTestCasesFailed, &g_numTestCasesFailed, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
std::vector<int> numAssertsFailedByRank;
if(rank == 0){
numAssertsFailedByRank.resize(static_cast<std::size_t>(n_rank));
}
MPI_Gather(&p.numAssertsFailed, 1, MPI_INT, numAssertsFailedByRank.data(), 1, MPI_INT, 0, MPI_COMM_WORLD);
if(rank == 0) {
separator_to_stream();
s << Color::Cyan << "[doctest] " << Color::None << "assertions on all processes: " << std::setw(6)
<< g_numAsserts << " | "
<< ((g_numAsserts == 0 || anythingFailed) ? Color::None : Color::Green)
<< std::setw(6) << (g_numAsserts - g_numAssertsFailed) << " passed" << Color::None
<< " | " << (g_numAssertsFailed > 0 ? Color::Red : Color::None) << std::setw(6)
<< g_numAssertsFailed << " failed" << Color::None << " |\n";
if (nb_test_cases_skipped_insufficient_procs>0) {
s << Color::Cyan << "[doctest] " << Color::Yellow << "WARNING: Skipped ";
if (nb_test_cases_skipped_insufficient_procs>1) {
s << nb_test_cases_skipped_insufficient_procs << " tests requiring more than ";
} else {
s << nb_test_cases_skipped_insufficient_procs << " test requiring more than ";
}
if (mpi_comm_world_size()>1) {
s << mpi_comm_world_size() << " MPI processes to run\n";
} else {
s << mpi_comm_world_size() << " MPI process to run\n";
}
}
separator_to_stream();
if(g_numAssertsFailed > 0){
s << Color::Cyan << "[doctest] " << Color::None << "fail on rank:" << std::setw(6) << "\n";
for(std::size_t i = 0; i < numAssertsFailedByRank.size(); ++i){
if( numAssertsFailedByRank[i] > 0 ){
s << std::setw(16) << " -> On rank [" << i << "] with " << numAssertsFailedByRank[i] << " test failed" << std::endl;
}
}
}
s << Color::Cyan << "[doctest] " << Color::None
<< "Status: " << (g_numTestCasesFailed > 0 ? Color::Red : Color::Green)
<< ((g_numTestCasesFailed > 0) ? "FAILURE!" : "SUCCESS!") << Color::None << std::endl;
}
}
void test_case_end(const CurrentTestCaseStats& st) override {
if (is_mpi_test_case()) {
// function called by every rank at the end of a test
// if failed assertions happened, they have been sent to rank 0
// here rank zero gathers them and prints them all
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
std::vector<MPI_Request> requests;
requests.reserve(m_failure_str_queue.size()); // avoid realloc & copy of MPI_Request
for (const std::pair<std::string, int> &failure : m_failure_str_queue)
{
const std::string & failure_str = failure.first;
const int failure_line = failure.second;
int failure_msg_size = static_cast<int>(failure_str.size());
requests.push_back(MPI_REQUEST_NULL);
MPI_Isend(failure_str.c_str(), failure_msg_size, MPI_BYTE,
0, failure_line, MPI_COMM_WORLD, &requests.back()); // Tag = file line
}
// Compute the number of assert with fail among all procs
const int nb_fail_asserts = static_cast<int>(m_failure_str_queue.size());
int nb_fail_asserts_glob = 0;
MPI_Reduce(&nb_fail_asserts, &nb_fail_asserts_glob, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
if(rank == 0) {
MPI_Status status;
MPI_Status status_recv;
using id_string = std::pair<int,std::string>;
std::vector<id_string> msgs(static_cast<std::size_t>(nb_fail_asserts_glob));
for (std::size_t i=0; i<static_cast<std::size_t>(nb_fail_asserts_glob); ++i) {
MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
int count;
MPI_Get_count(&status, MPI_BYTE, &count);
std::string recv_msg(static_cast<std::size_t>(count),'\0');
void* recv_msg_data = const_cast<char*>(recv_msg.data()); // const_cast needed. Non-const .data() exists in C++11 though...
MPI_Recv(recv_msg_data, count, MPI_BYTE, status.MPI_SOURCE,
status.MPI_TAG, MPI_COMM_WORLD, &status_recv);
msgs[i] = {status.MPI_SOURCE,recv_msg};
}
std::sort(begin(msgs),end(msgs),[](const id_string& x, const id_string& y){ return x.first < y.first; });
// print
if (nb_fail_asserts_glob>0) {
separator_to_stream();
file_line_to_stream(tc->m_file.c_str(), static_cast<int>(tc->m_line), "\n");
if(tc->m_test_suite && tc->m_test_suite[0] != '\0')
s << Color::Yellow << "TEST SUITE: " << Color::None << tc->m_test_suite << "\n";
if(strncmp(tc->m_name, " Scenario:", 11) != 0)
s << Color::Yellow << "TEST CASE: ";
s << Color::None << tc->m_name << "\n\n";
for(const auto& msg : msgs) {
s << msg.second;
}
s << "\n";
}
}
MPI_Waitall(static_cast<int>(requests.size()), requests.data(), MPI_STATUSES_IGNORE);
m_failure_str_queue.clear();
}
ConsoleReporter::test_case_end(st);
}
bool is_mpi_test_case() const {
return tc->m_description != nullptr
&& std::string(tc->m_description) == std::string("MPI_TEST_CASE");
}
void log_assert(const AssertData& rb) override {
if (!is_mpi_test_case()) {
ConsoleReporter::log_assert(rb);
} else {
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if(!rb.m_failed && !opt.success)
return;
std::lock_guard<std::mutex> lock(mutex);
std::stringstream failure_msg;
failure_msg << Color::Red << "On rank [" << rank << "] : " << Color::None;
failure_msg << file_line_to_string(rb.m_file, rb.m_line, " ");
if((rb.m_at & (assertType::is_throws_as | assertType::is_throws_with)) ==0){
failure_msg << Color::Cyan
<< assertString(rb.m_at)
<< "( " << rb.m_expr << " ) "
<< Color::None
<< (!rb.m_failed ? "is correct!\n" : "is NOT correct!\n")
<< " values: "
<< assertString(rb.m_at)
<< "( " << rb.m_decomp.c_str() << " )\n";
}
m_failure_str_queue.push_back({failure_msg.str(), rb.m_line});
}
}
}; // MpiConsoleReporter
// "1" is the priority - used for ordering when multiple reporters/listeners are used
REGISTER_REPORTER("MpiConsoleReporter", 1, MpiConsoleReporter);
REGISTER_REPORTER("MpiFileReporter", 1, MpiFileReporter);
} // anonymous
} // doctest
#endif // DOCTEST_REPORTER_H

View File

@ -0,0 +1,84 @@
#ifndef DOCTEST_MPI_SUB_COMM_H
#define DOCTEST_MPI_SUB_COMM_H
#include "mpi.h"
#include "doctest/doctest.h"
#include <cassert>
#include <string>
namespace doctest {
inline
int mpi_world_nb_procs() {
int n;
MPI_Comm_size(MPI_COMM_WORLD, &n);
return n;
}
struct mpi_sub_comm {
int nb_procs;
int rank;
MPI_Comm comm;
mpi_sub_comm( mpi_sub_comm const& ) = delete;
mpi_sub_comm& operator=( mpi_sub_comm const& ) = delete;
mpi_sub_comm(int nb_prcs) noexcept
: nb_procs(nb_prcs)
, rank(-1)
, comm(MPI_COMM_NULL)
{
int comm_world_rank;
MPI_Comm_rank(MPI_COMM_WORLD, &comm_world_rank);
if (nb_procs>mpi_world_nb_procs()) {
if (comm_world_rank==0) {
MESSAGE(
"Unable to run test: need ", std::to_string(nb_procs), " procs",
" but program launched with only ", std::to_string(doctest::mpi_world_nb_procs()), "."
);
CHECK(nb_procs<=mpi_world_nb_procs());
}
} else {
int color = MPI_UNDEFINED;
if(comm_world_rank < nb_procs){
color = 0;
}
MPI_Comm_split(MPI_COMM_WORLD, color, comm_world_rank, &comm);
if(comm != MPI_COMM_NULL){
MPI_Comm_rank(comm, &rank);
assert(rank==comm_world_rank);
}
}
}
void destroy_comm() {
if(comm != MPI_COMM_NULL){
MPI_Comm_free(&comm);
}
}
mpi_sub_comm(mpi_sub_comm&& x)
: nb_procs(x.nb_procs)
, rank(x.rank)
, comm(x.comm)
{
x.comm = MPI_COMM_NULL;
}
mpi_sub_comm& operator=(mpi_sub_comm&& x) {
destroy_comm();
nb_procs = x.nb_procs;
rank = x.rank;
comm = x.comm;
x.comm = MPI_COMM_NULL;
return *this;
}
~mpi_sub_comm() {
destroy_comm();
}
};
} // doctest
#endif // DOCTEST_SUB_COMM_H

View File

@ -0,0 +1,295 @@
/*
* Copyright (c), 2017, Ali Can Demiralp <ali.demiralp@rwth-aachen.de>
*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
*/
#pragma once
#include <vector>
#include <H5Apublic.h>
#include "H5DataType.hpp"
#include "H5DataSpace.hpp"
#include "H5Object.hpp"
#include "bits/H5Friends.hpp"
#include "bits/H5Path_traits.hpp"
namespace HighFive {
class DataSpace;
namespace detail {
/// \brief Internal hack to create an `Attribute` from an ID.
///
/// WARNING: Creating an Attribute from an ID has implications w.r.t. the lifetime of the object
/// that got passed via its ID. Using this method careless opens up the suite of issues
/// related to C-style resource management, including the analog of double free, dangling
/// pointers, etc.
///
/// NOTE: This is not part of the API and only serves to work around a compiler issue in GCC which
/// prevents us from using `friend`s instead. This function should only be used for internal
/// purposes. The problematic construct is:
///
/// template<class Derived>
/// friend class SomeCRTP<Derived>;
///
/// \private
Attribute make_attribute(hid_t hid);
} // namespace detail
/// \brief Class representing an Attribute of a DataSet or Group
///
/// \sa AnnotateTraits::createAttribute, AnnotateTraits::getAttribute, AnnotateTraits::listAttributeNames, AnnotateTraits::hasAttribute, AnnotateTraits::deleteAttribute for create, get, list, check or delete Attribute
class Attribute: public Object, public PathTraits<Attribute> {
public:
const static ObjectType type = ObjectType::Attribute;
/// \brief Get the name of the current Attribute.
/// \code{.cpp}
/// auto attr = dset.createAttribute<std::string>("my_attribute", DataSpace::From(string_list));
/// std::cout << attr.getName() << std::endl; // Will print "my_attribute"
/// \endcode
/// \since 2.2.2
std::string getName() const;
/// \brief The number of bytes required to store the attribute in the HDF5 file.
/// \code{.cpp}
/// size_t size = dset.createAttribute<int>("foo", DataSpace(1, 2)).getStorageSize();
/// \endcode
/// \since 1.0
size_t getStorageSize() const;
/// \brief Get the DataType of the Attribute.
/// \code{.cpp}
/// Attribute attr = dset.createAttribute<int>("foo", DataSpace(1, 2));
/// auto dtype = attr.getDataType(); // Will be an hdf5 type deduced from int
/// \endcode
/// \since 1.0
DataType getDataType() const;
/// \brief Get a copy of the DataSpace of the current Attribute.
/// \code{.cpp}
/// Attribute attr = dset.createAttribute<int>("foo", DataSpace(1, 2));
/// auto dspace = attr.getSpace(); // This will be a DataSpace of dimension 1 * 2
/// \endcode
/// \since 1.0
DataSpace getSpace() const;
/// \brief Get the memory DataSpace of the current Attribute.
///
/// HDF5 attributes don't support selections. Therefore, there's no need
/// for a memory dataspace. However, HighFive supports allocating arrays
/// and checking dimensions, this requires the dimensions of the memspace.
///
/// \since 1.0
DataSpace getMemSpace() const;
/// \brief Get the value of the Attribute.
/// \code{.cpp}
/// Attribute attr = dset.getAttribute("foo");
/// // The value will contains what have been written in the attribute
/// std::vector<int> value = attr.read<std::vector<int>>();
/// \endcode
/// \since 2.5.0
template <typename T>
T read() const;
/// \brief Get the value of the Attribute in a buffer.
///
/// Read the attribute into an existing object. Only available for
/// supported types `T`. If `array` has preallocated the correct amount of
/// memory, then this routine should not trigger reallocation. Otherwise,
/// if supported, the object will be resized.
///
/// An exception is raised if the numbers of dimension of the buffer and of
/// the attribute are different.
///
/// \code{.cpp}
/// // Will read into `value` avoiding memory allocation if the dimensions
/// // match, i.e. if the attribute `"foo"` has three element.
/// std::vector<int> value(3);
/// file.getAttribute("foo").read(value);
/// \endcode
/// \since 1.0
template <typename T>
void read(T& array) const;
/// \brief Read the attribute into a pre-allocated buffer.
/// \param array A pointer to the first byte of sufficient pre-allocated memory.
/// \param mem_datatype The DataType of the array.
///
/// \note This is the shallowest wrapper around `H5Aread`. If possible
/// prefer either Attribute::read() const or Attribute::read(T&) const.
///
/// \code{.cpp}
/// auto attr = file.getAttribute("foo");
///
/// // Simulate custom allocation by the application.
/// size_t n_elements = attr.getSpace().getElementCount();
/// int * ptr = (int*) malloc(n_elements*sizeof(int));
///
/// // Read into the pre-allocated memory.
/// attr.read(ptr, mem_datatype);
/// \endcode
/// \since 2.2.2
template <typename T>
void read_raw(T* array, const DataType& mem_datatype) const;
/// \brief Read the attribute into a buffer.
/// Behaves like Attribute::read(T*, const DataType&) const but
/// additionally this overload deduces the memory datatype from `T`.
///
/// \param array Pointer to the first byte of pre-allocated memory.
///
/// \note If possible prefer either Attribute::read() const or Attribute::read(T&) const.
///
/// \code{.cpp}
/// auto attr = file.getAttribute("foo");
///
/// // Simulate custom allocation by the application.
/// size_t n_elements = attr.getSpace().getElementCount();
/// int * ptr = (int*) malloc(n_elements*sizeof(int));
///
/// // Read into the pre-allocated memory.
/// attr.read(ptr);
/// \endcode
/// \since 2.2.2
template <typename T>
void read_raw(T* array) const;
/// \brief Write the value into the Attribute.
///
/// Write the value to the attribute. For supported types `T`, this overload
/// will write the value to the attribute. The datatype and dataspace are
/// deduced automatically. However, since the attribute has already been
/// created, the dimensions of `value` must match those of the attribute.
///
/// \code{.cpp}
/// // Prefer the fused version if creating and writing the attribute
/// // at the same time.
/// dset.createAttribute("foo", std::vector<int>{1, 2, 3});
///
/// // To overwrite the value:
/// std::vector<int> value{4, 5, 6};
/// dset.getAttribute<int>("foo").write(value);
/// \endcode
/// \since 1.0
template <typename T>
void write(const T& value);
/// \brief Write from a raw pointer.
///
/// Values that have been correctly arranged memory, can be written directly
/// by passing a raw pointer.
///
/// \param buffer Pointer to the first byte of the value.
/// \param mem_datatype The DataType of the buffer.
///
/// \note This is the shallowest wrapper around `H5Awrite`. It's useful
/// if you need full control. If possible prefer Attribute::write.
///
/// \code{.cpp}
/// Attribute attr = dset.createAttribute<int>("foo", DataSpace(2, 3));
///
/// // Simulate the application creating `value` and only exposing access
/// // to the raw pointer `ptr`.
/// std::vector<std::array<int, 3>> value{{1, 2, 3}, {4, 5, 6}};
/// int * ptr = (int*) value.data();
///
/// // Simply write the bytes to disk.
/// attr.write(ptr, AtomicType<int>());
/// \endcode
/// \since 2.2.2
template <typename T>
void write_raw(const T* buffer, const DataType& mem_datatype);
/// \brief Write from a raw pointer.
///
/// Much like Attribute::write_raw(const T*, const DataType&).
/// Additionally, this overload attempts to automatically deduce the
/// datatype of the buffer. Note, that the file datatype is already set.
///
/// \param buffer Pointer to the first byte.
///
/// \note If possible prefer Attribute::write.
///
/// \code{.cpp}
/// // Simulate the application creating `value` and only exposing access
/// // to the raw pointer `ptr`.
/// std::vector<std::array<int, 3>> value{{1, 2, 3}, {4, 5, 6}};
/// int * ptr = (int*) value.data();
///
/// // Simply write the bytes to disk.
/// attr.write(ptr);
/// \endcode
/// \since 2.2.2
template <typename T>
void write_raw(const T* buffer);
/// \brief The create property list used for this attribute.
///
/// Some of HDF5 properties/setting of an attribute are defined by a
/// create property list. This method returns a copy of the create
/// property list used during creation of the attribute.
///
/// \code{.cpp}
/// auto acpl = attr.getCreatePropertyList();
///
/// // For example to create another attribute with the same properties.
/// file.createAttribute("foo", 42, acpl);
/// \endcode
/// \since 2.5.0
AttributeCreateProps getCreatePropertyList() const {
return details::get_plist<AttributeCreateProps>(*this, H5Aget_create_plist);
}
// No empty attributes
Attribute() = delete;
///
/// \brief Return an `Attribute` with `axes` squeezed from the memspace.
///
/// Returns an `Attribute` in which the memspace has been modified
/// to not include the axes listed in `axes`.
///
/// Throws if any axis to be squeezes has a dimension other than `1`.
///
/// \since 3.0
Attribute squeezeMemSpace(const std::vector<size_t>& axes) const;
///
/// \brief Return a `Attribute` with a simple memspace with `dims`.
///
/// Returns a `Attribute` in which the memspace has been modified
/// to be a simple dataspace with dimensions `dims`.
///
/// Throws if the number of elements changes.
///
/// \since 3.0
Attribute reshapeMemSpace(const std::vector<size_t>& dims) const;
protected:
using Object::Object;
private:
DataSpace _mem_space;
#if HIGHFIVE_HAS_FRIEND_DECLARATIONS
template <typename Derivate>
friend class ::HighFive::AnnotateTraits;
#endif
friend Attribute detail::make_attribute(hid_t);
};
namespace detail {
inline Attribute make_attribute(hid_t hid) {
return Attribute(hid);
}
} // namespace detail
} // namespace HighFive

View File

@ -0,0 +1,114 @@
/*
* Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
*/
#pragma once
#include <vector>
#include "H5DataSpace.hpp"
#include "H5DataType.hpp"
#include "H5Object.hpp"
#include "bits/H5_definitions.hpp"
#include "bits/H5Annotate_traits.hpp"
#include "bits/H5Slice_traits.hpp"
#include "bits/H5Path_traits.hpp"
#include "bits/H5_definitions.hpp"
namespace HighFive {
///
/// \brief Class representing a dataset.
///
class DataSet: public Object,
public SliceTraits<DataSet>,
public AnnotateTraits<DataSet>,
public PathTraits<DataSet> {
public:
const static ObjectType type = ObjectType::Dataset;
///
/// \brief getStorageSize
/// \return returns the amount of storage allocated for a dataset.
///
uint64_t getStorageSize() const;
///
/// \brief getOffset
/// \return returns DataSet address in file
///
uint64_t getOffset() const;
///
/// \brief getDataType
/// \return return the datatype associated with this dataset
///
DataType getDataType() const;
///
/// \brief getSpace
/// \return return the dataspace associated with this dataset
///
DataSpace getSpace() const;
///
/// \brief getMemSpace
/// \return same than getSpace for DataSet, compatibility with Selection
/// class
///
DataSpace getMemSpace() const;
/// \brief Change the size of the dataset
///
/// This requires that the dataset was created with chunking, and you would
/// generally want to have set a larger maxdims setting
/// \param dims New size of the dataset
void resize(const std::vector<size_t>& dims);
/// \brief Get the dimensions of the whole DataSet.
/// This is a shorthand for getSpace().getDimensions()
/// \return The shape of the current HighFive::DataSet
///
inline std::vector<size_t> getDimensions() const {
return getSpace().getDimensions();
}
/// \brief Get the total number of elements in the current dataset.
/// E.g. 2x2x2 matrix has size 8.
/// This is a shorthand for getSpace().getTotalCount()
/// \return The shape of the current HighFive::DataSet
///
inline size_t getElementCount() const {
return getSpace().getElementCount();
}
/// \brief Get the list of properties for creation of this dataset
DataSetCreateProps getCreatePropertyList() const {
return details::get_plist<DataSetCreateProps>(*this, H5Dget_create_plist);
}
/// \brief Get the list of properties for accession of this dataset
DataSetAccessProps getAccessPropertyList() const {
return details::get_plist<DataSetAccessProps>(*this, H5Dget_access_plist);
}
DataSet() = default;
protected:
using Object::Object; // bring DataSet(hid_t)
explicit DataSet(Object&& o) noexcept
: Object(std::move(o)) {}
friend class Reference;
template <typename Derivate>
friend class NodeTraits;
};
} // namespace HighFive

View File

@ -0,0 +1,282 @@
/*
* Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
*/
#pragma once
#include <vector>
#include <array>
#include <cstdint>
#include <type_traits>
#include <initializer_list>
#include "H5Object.hpp"
#include "bits/H5_definitions.hpp"
namespace HighFive {
namespace detail {
/// @brief Create a HighFive::DataSpace from an HID, without incrementing the id.
///
/// @note This is internal API and subject to change.
/// @internal
DataSpace make_data_space(hid_t hid);
} // namespace detail
/// \brief Class representing the space (dimensions) of a DataSet
///
/// \code{.cpp}
/// // Create a DataSpace of dimension 1 x 2 x 3
/// DataSpace dspace(1, 2, 3);
/// std::cout << dspace.getElementCount() << std::endl; // Print 1 * 2 * 3 = 6
/// std::cout << dspace.getNumberDimensions() << std::endl; // Print 3
/// std::vector<size_t> dims = dspace.getDimensions(); // dims is {1, 2, 3}
/// \endcode
class DataSpace: public Object {
public:
const static ObjectType type = ObjectType::DataSpace;
/// \brief Magic value to specify that a DataSpace can grow without limit.
///
/// This value should be used with DataSpace::DataSpace(const std::vector<size_t>& dims, const
/// std::vector<size_t>& maxdims);
///
/// \since 2.0
static const size_t UNLIMITED = SIZE_MAX;
/// \brief An enum to create scalar and null DataSpace with DataSpace::DataSpace(DataspaceType dtype).
///
/// This enum is needed otherwise we will not be able to distringuish between both with normal
/// constructors. Both have a dimension of 0.
/// \since 1.3
enum class DataspaceType {
dataspace_scalar, ///< Value to create scalar DataSpace
dataspace_null, ///< Value to create null DataSpace
// simple dataspace are handle directly from their dimensions
};
// For backward compatibility: `DataSpace::dataspace_scalar`.
constexpr static DataspaceType dataspace_scalar = DataspaceType::dataspace_scalar;
constexpr static DataspaceType dataspace_null = DataspaceType::dataspace_null;
/// \brief Create a DataSpace of N-dimensions from a std::vector<size_t>.
/// \param dims Dimensions of the new DataSpace
///
/// \code{.cpp}
/// // Create a DataSpace with 2 dimensions: 1 and 3
/// DataSpace(std::vector<size_t>{1, 3});
/// \endcode
/// \since 1.0
explicit DataSpace(const std::vector<size_t>& dims);
/// \brief Create a DataSpace of N-dimensions from a std::array<size_t, N>.
/// \param dims Dimensions of the new DataSpace
///
/// \code{.cpp}
/// // Create a DataSpace with 2 dimensions: 1 and 3
/// DataSpace(std::array<size_t, 2>{1, 3});
/// \endcode
/// \since 2.3
template <size_t N>
explicit DataSpace(const std::array<size_t, N>& dims);
/// \brief Create a DataSpace of N-dimensions from an initializer list.
/// \param dims Dimensions of the new DataSpace
///
/// \code{.cpp}
/// // Create a DataSpace with 2 dimensions: 1 and 3
/// DataSpace{1, 3};
/// \endcode
/// \since 2.1
DataSpace(const std::initializer_list<size_t>& dims);
/// \brief Create a DataSpace of N-dimensions from direct values.
/// \param dim1 The first dimension
/// \param dims The following dimensions
///
/// \code{.cpp}
/// // Create a DataSpace with 2 dimensions: 1 and 3
/// DataSpace(1, 3);
/// \endcode
/// \since 2.1
template <typename... Args>
explicit DataSpace(size_t dim1, Args... dims);
/// \brief Create a DataSpace from a pair of iterators.
/// \param begin The beginning of the container
/// \param end The end of the container
///
/// \code{.cpp}
/// // Create a DataSpace with 2 dimensions: 1 and 3
/// std::vector<int> v{1, 3};
/// DataSpace(v.begin(), v.end());
/// \endcode
///
/// \since 2.0
// Attention: Explicitly disable DataSpace(int_like, int_like) from trying
// to use this constructor
template <typename IT,
typename = typename std::enable_if<!std::is_integral<IT>::value, IT>::type>
DataSpace(IT begin, IT end);
/// \brief Create a resizable N-dimensional DataSpace.
/// \param dims Initial size of dataspace
/// \param maxdims Maximum size of the dataspace
///
/// \code{.cpp}
/// // Create a DataSpace with 2 dimensions: 1 and 3.
/// // It can later be resized up to a maximum of 10 x 10
/// DataSpace(std::vector<size_t>{1, 3}, std::vector<size_t>{10, 10});
/// \endcode
///
/// \see UNLIMITED for a DataSpace that can be resized without limit.
/// \since 2.0
explicit DataSpace(const std::vector<size_t>& dims, const std::vector<size_t>& maxdims);
/// \brief Create a scalar or a null DataSpace.
///
/// This overload enables creating scalar or null data spaces, both have
/// dimension 0.
///
/// \param space_type The value from the enum
///
/// \code{.cpp}
/// DataSpace(DataspaceType::dataspace_scalar);
/// \endcode
///
/// \attention Avoid braced intialization in these cases, i.e.
/// \code{.cpp}
/// // This is not a scalar dataset:
/// DataSpace{DataspaceType::dataspace_scalar};
/// \endcode
///
/// \since 1.3
explicit DataSpace(DataspaceType space_type);
/// \brief Create a scalar DataSpace.
///
/// \code{.cpp}
/// auto dataspace = DataSpace::Scalar();
/// \endcode
///
/// \since 2.9
static DataSpace Scalar();
/// \brief Create a null DataSpace.
///
/// \code{.cpp}
/// auto dataspace = DataSpace::Null();
/// \endcode
///
/// \since 2.9
static DataSpace Null();
/// \brief Create a copy of the DataSpace which will have different id.
///
/// \code{.cpp}
/// DataSpace dspace1(1, 3);
/// auto dspace2 = dspace.clone();
/// \endcode
///
/// \since 1.0
DataSpace clone() const;
/// \brief Returns the number of dimensions of a DataSpace.
/// \code{.cpp}
/// DataSpace dspace(1, 3);
/// size_t number_of_dim = dspace.getNumberDimensions(); // returns 2
/// \endcode
/// \since 1.0
size_t getNumberDimensions() const;
/// \brief Returns the size of the dataset in each dimension.
///
/// For zero-dimensional datasets (e.g. scalar or null datasets) an empty
/// vector is returned.
///
/// \code{.cpp}
/// DataSpace dspace(1, 3);
/// auto dims = dspace.getDimensions(); // returns {1, 3}
/// \endcode
///
/// \sa DataSpace::getMaxDimensions
///
/// \since 1.0
std::vector<size_t> getDimensions() const;
/// \brief Return the number of elements in this DataSpace.
///
/// \code{.cpp}
/// DataSpace dspace(1, 3);
/// size_t elementcount = dspace.getElementCount(); // return 1 x 3 = 3
/// \endcode
/// \since 2.1
size_t getElementCount() const;
/// \brief Returns the maximum size of the dataset in each dimension.
///
/// This is the maximum size a dataset can be extended to, which may be
/// different from the current size of the dataset.
///
/// \code{.cpp}
/// DataSpace dspace(std::vector<size_t>{1, 3}, std::vector<size_t>{UNLIMITED, 10});
/// dspace.getMaxDimensions(); // Return {UNLIMITED, 10}
/// \endcode
///
/// \sa DataSpace::getDimensions
/// \since 2.0
std::vector<size_t> getMaxDimensions() const;
/// \brief Automatically deduce the DataSpace from a container/value.
///
/// Certain containers and scalar values are fully supported by HighFive.
/// For these containers, HighFive can deduce the dimensions from `value`.
///
/// \code{.cpp}
/// double d = 42.0;
/// std::vector<std::vector<int>> v = {{4, 5, 6}, {7, 8, 9}};
/// DataSpace::From(v); // A DataSpace of dimensions 2, 3.
/// DataSpace::From(d); // A scalar dataspace.
/// \endcode
///
/// \since 1.0
template <typename T>
static DataSpace From(const T& value);
/// \brief Create a DataSpace from a value of type string array.
/// \param string_array An C-array of C-string (null-terminated).
///
/// \code{.cpp}
/// char string_array[2][10] = {"123456789", "abcdefghi"};
/// auto dspace = DataSpace::FromCharArrayStrings(string_array); // dspace is a DataSpace of
/// dimensions 2
/// \endcode
/// \since 2.2
template <std::size_t N, std::size_t Width>
static DataSpace FromCharArrayStrings(const char (&string_array)[N][Width]);
protected:
DataSpace() = default;
static DataSpace fromId(hid_t hid) {
DataSpace space;
space._hid = hid;
return space;
}
friend class Attribute;
friend class File;
friend class DataSet;
friend DataSpace detail::make_data_space(hid_t hid);
};
} // namespace HighFive
// We include bits right away since DataSpace is user-constructible
#include "bits/H5Dataspace_misc.hpp"

View File

@ -0,0 +1,367 @@
/*
* Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
*/
#pragma once
#include <type_traits>
#include <vector>
#include <H5Tpublic.h>
#include "H5Object.hpp"
#include "bits/H5Utils.hpp"
#include "bits/string_padding.hpp"
#include "H5PropertyList.hpp"
#include "bits/h5_wrapper.hpp"
#include "bits/h5t_wrapper.hpp"
namespace HighFive {
///
/// \brief Enum of Fundamental data classes
///
enum class DataTypeClass {
Time = 1 << 1,
Integer = 1 << 2,
Float = 1 << 3,
String = 1 << 4,
BitField = 1 << 5,
Opaque = 1 << 6,
Compound = 1 << 7,
Reference = 1 << 8,
Enum = 1 << 9,
VarLen = 1 << 10,
Array = 1 << 11,
Invalid = 0
};
inline DataTypeClass operator|(DataTypeClass lhs, DataTypeClass rhs) {
using T = std::underlying_type<DataTypeClass>::type;
return static_cast<DataTypeClass>(static_cast<T>(lhs) | static_cast<T>(rhs));
}
inline DataTypeClass operator&(DataTypeClass lhs, DataTypeClass rhs) {
using T = std::underlying_type<DataTypeClass>::type;
return static_cast<DataTypeClass>(static_cast<T>(lhs) & static_cast<T>(rhs));
}
class StringType;
///
/// \brief HDF5 Data Type
///
class DataType: public Object {
public:
bool operator==(const DataType& other) const;
bool operator!=(const DataType& other) const;
///
/// \brief Return the fundamental type.
///
DataTypeClass getClass() const;
///
/// \brief Returns the length (in bytes) of this type elements
///
/// Notice that the size of variable length sequences may have limited applicability
/// given that it refers to the size of the control structure. For info see
/// https://support.hdfgroup.org/HDF5/doc/RM/RM_H5T.html#Datatype-GetSize
size_t getSize() const;
///
/// \brief Returns a friendly description of the type (e.g. Float32)
///
std::string string() const;
///
/// \brief Returns whether the type is a variable-length string
///
bool isVariableStr() const;
///
/// \brief Returns whether the type is a fixed-length string
///
bool isFixedLenStr() const;
///
/// \brief Returns this datatype as a `StringType`.
///
StringType asStringType() const;
///
/// \brief Check the DataType was default constructed.
///
bool empty() const noexcept;
/// \brief Returns whether the type is a Reference
bool isReference() const;
/// \brief Get the list of properties for creation of this DataType
DataTypeCreateProps getCreatePropertyList() const {
return details::get_plist<DataTypeCreateProps>(*this, H5Tget_create_plist);
}
protected:
using Object::Object;
friend class Attribute;
friend class File;
friend class DataSet;
friend class CompoundType;
template <typename Derivate>
friend class NodeTraits;
};
enum class CharacterSet : std::underlying_type<H5T_cset_t>::type {
Ascii = H5T_CSET_ASCII,
Utf8 = H5T_CSET_UTF8,
};
class StringType: public DataType {
public:
///
/// \brief For stings return the character set.
///
CharacterSet getCharacterSet() const;
///
/// \brief For fixed length stings return the padding.
///
StringPadding getPadding() const;
protected:
using DataType::DataType;
friend class DataType;
};
class FixedLengthStringType: public StringType {
public:
///
/// \brief Create a fixed length string datatype.
///
/// The string will be `size` bytes long, regardless whether it's ASCII or
/// UTF8. In particular, a string with `n` UFT8 characters in general
/// requires `4*n` bytes.
///
/// The string padding is subtle, essentially it's just a hint. While
/// commonly, a null-terminated string is guaranteed to have one `'\0'`
/// which marks the semantic end of the string, this is not enforced by
/// HDF5. In fact, there are HDF5 files that contain strings that claim to
/// be null-terminated but aren't. The length of the buffer must be at
/// least `size` bytes regardless of the padding. HDF5 will read or write
/// `size` bytes, irrespective of when (if at all) the `\0` occurs.
///
/// Note that when writing, passing `StringPadding::NullTerminated` is a
/// guarantee to the reader that it contains a `\0`. Therefore, make sure
/// that the string really is null-terminated. Otherwise prefer a
/// null-padded string. This mearly states that the buffer is filled up
/// with 0 or more `\0`.
FixedLengthStringType(size_t size,
StringPadding padding,
CharacterSet character_set = CharacterSet::Ascii);
};
class VariableLengthStringType: public StringType {
public:
///
/// \brief Create a variable length string HDF5 datatype.
///
explicit VariableLengthStringType(CharacterSet character_set = CharacterSet::Ascii);
};
///
/// \brief create an HDF5 DataType from a C++ type
///
/// Support only basic data type
///
template <typename T>
class AtomicType: public DataType {
public:
AtomicType();
using basic_type = T;
};
///
/// \brief Create a compound HDF5 datatype
///
class CompoundType: public DataType {
public:
///
/// \brief Use for defining a sub-type of compound type
struct member_def {
member_def(std::string t_name, DataType t_base_type, size_t t_offset = 0)
: name(std::move(t_name))
, base_type(std::move(t_base_type))
, offset(t_offset) {}
std::string name;
DataType base_type;
size_t offset;
};
///
/// \brief Initializes a compound type from a vector of member definitions
/// \param t_members
/// \param size
inline CompoundType(const std::vector<member_def>& t_members, size_t size = 0)
: members(t_members) {
create(size);
}
inline CompoundType(std::vector<member_def>&& t_members, size_t size = 0)
: members(std::move(t_members)) {
create(size);
}
inline CompoundType(const std::initializer_list<member_def>& t_members, size_t size = 0)
: members(t_members) {
create(size);
}
///
/// \brief Initializes a compound type from a DataType
/// \param type
inline explicit CompoundType(DataType&& type)
: DataType(type) {
if (getClass() != DataTypeClass::Compound) {
std::ostringstream ss;
ss << "hid " << _hid << " does not refer to a compound data type";
throw DataTypeException(ss.str());
}
size_t n_members = static_cast<size_t>(detail::h5t_get_nmembers(_hid));
members.reserve(n_members);
for (unsigned i = 0; i < n_members; i++) {
char* name = detail::h5t_get_member_name(_hid, i);
size_t offset = detail::h5t_get_member_offset(_hid, i);
hid_t member_hid = detail::h5t_get_member_type(_hid, i);
DataType member_type{member_hid};
members.emplace_back(std::string(name), member_type, offset);
detail::h5_free_memory(name);
}
}
/// \brief Commit datatype into the given Object
/// \param object Location to commit object into
/// \param name Name to give the datatype
inline void commit(const Object& object, const std::string& name) const;
/// \brief Get read access to the CompoundType members
inline const std::vector<member_def>& getMembers() const noexcept {
return members;
}
private:
/// A vector of the member_def members of this CompoundType
std::vector<member_def> members;
/// \brief Automatically create the type from the set of members
/// using standard struct alignment.
/// \param size Total size of data type
void create(size_t size = 0);
};
///
/// \brief Create a enum HDF5 datatype
///
/// \code{.cpp}
/// enum class Position {
/// FIRST = 1,
/// SECOND = 2,
/// };
///
/// EnumType<Position> create_enum_position() {
/// return {{"FIRST", Position::FIRST},
/// {"SECOND", Position::SECOND}};
/// }
///
/// // You have to register the type inside HighFive
/// HIGHFIVE_REGISTER_TYPE(Position, create_enum_position)
///
/// void write_first(H5::File& file) {
/// auto dataset = file.createDataSet("/foo", Position::FIRST);
/// }
/// \endcode
template <typename T>
class EnumType: public DataType {
public:
///
/// \brief Use for defining a member of enum type
struct member_def {
member_def(const std::string& t_name, T t_value)
: name(t_name)
, value(std::move(t_value)) {}
std::string name;
T value;
};
EnumType(const EnumType& other) = default;
EnumType(const std::vector<member_def>& t_members)
: members(t_members) {
static_assert(std::is_enum<T>::value, "EnumType<T>::create takes only enum");
if (members.empty()) {
HDF5ErrMapper::ToException<DataTypeException>(
"Could not create an enum without members");
}
create();
}
EnumType(std::initializer_list<member_def> t_members)
: EnumType(std::vector<member_def>(t_members)) {}
/// \brief Commit datatype into the given Object
/// \param object Location to commit object into
/// \param name Name to give the datatype
void commit(const Object& object, const std::string& name) const;
private:
std::vector<member_def> members;
void create();
};
/// \brief Create a DataType instance representing type T
template <typename T>
DataType create_datatype();
/// \brief Create a DataType instance representing type T and perform a sanity check on its size
template <typename T>
DataType create_and_check_datatype();
} // namespace HighFive
/// \brief Macro to extend datatype of HighFive
///
/// This macro has to be called outside of any namespace.
///
/// \code{.cpp}
/// namespace app {
/// enum FooBar { FOO = 1, BAR = 2 };
/// EnumType create_enum_foobar() {
/// return EnumType<FooBar>({{"FOO", FooBar::FOO},
/// {"BAR", FooBar::BAR}});
/// }
/// }
///
/// HIGHFIVE_REGISTER_TYPE(FooBar, ::app::create_enum_foobar)
/// \endcode
#define HIGHFIVE_REGISTER_TYPE(type, function) \
template <> \
inline HighFive::DataType HighFive::create_datatype<type>() { \
return function(); \
}
#include "bits/H5DataType_misc.hpp"

398
include/highfive/H5Easy.hpp Normal file
View File

@ -0,0 +1,398 @@
/*
* Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
*/
/// \brief
/// Read/dump DataSets or Attribute using a minimalistic syntax.
/// To this end, the functions are templated, and accept:
/// - Any type accepted by HighFive
/// - Eigen objects
/// - xtensor objects
/// - OpenCV objects
#pragma once
#include <string>
#include <vector>
// optionally enable xtensor plug-in and load the library
#ifdef XTENSOR_VERSION_MAJOR
#ifndef H5_USE_XTENSOR
#define H5_USE_XTENSOR
#endif
#endif
#ifdef H5_USE_XTENSOR
#include "xtensor.hpp"
#endif
// optionally enable Eigen plug-in and load the library
#ifdef EIGEN_WORLD_VERSION
#ifndef H5_USE_EIGEN
#define H5_USE_EIGEN
#endif
#endif
#ifdef H5_USE_EIGEN
#include <Eigen/Eigen>
#include "eigen.hpp"
#endif
// optionally enable OpenCV plug-in and load the library
#ifdef CV_MAJOR_VERSION
#ifndef H5_USE_OPENCV
#define H5_USE_OPENCV
#endif
#endif
#ifdef H5_USE_OPENCV
#include <opencv2/opencv.hpp>
#include "experimental/opencv.hpp"
#endif
#include "H5File.hpp"
namespace H5Easy {
using HighFive::AtomicType;
using HighFive::Attribute;
using HighFive::Chunking;
using HighFive::DataSet;
using HighFive::DataSetCreateProps;
using HighFive::DataSpace;
using HighFive::Deflate;
using HighFive::Exception;
using HighFive::File;
using HighFive::ObjectType;
using HighFive::Shuffle;
///
/// \brief Write mode for DataSets
enum class DumpMode {
Create = 0, /*!< Dump only if DataSet does not exist, otherwise throw. */
Overwrite = 1 /*!< Create or overwrite if DataSet of correct shape exists, otherwise throw. */
};
///
/// \brief Signal to enable/disable automatic flushing after write operations.
enum class Flush {
False = 0, /*!< No automatic flushing. */
True = 1 /*!< Automatic flushing. */
};
///
/// \brief Signal to set compression level for written DataSets.
class Compression {
public:
///
/// \brief Enable compression with the highest compression level (9).
/// or disable compression (set compression level to 0).
///
/// \param enable ``true`` to enable with highest compression level
explicit Compression(bool enable = true);
///
/// \brief Set compression level.
///
/// \param level the compression level
template <class T>
Compression(T level);
///
/// \brief Return compression level.
inline unsigned get() const;
private:
unsigned m_compression_level;
};
///
/// \brief Define options for dumping data.
///
/// By default:
/// - DumpMode::Create
/// - Flush::True
/// - Compression: false
/// - ChunkSize: automatic
class DumpOptions {
public:
///
/// \brief Constructor: accept all default settings.
DumpOptions() = default;
///
/// \brief Constructor: overwrite (some of the) defaults.
/// \param args any of DumpMode(), Flush(), Compression() in arbitrary number and order.
template <class... Args>
DumpOptions(Args... args) {
set(args...);
}
///
/// \brief Overwrite H5Easy::DumpMode setting.
/// \param mode: DumpMode.
inline void set(DumpMode mode);
///
/// \brief Overwrite H5Easy::Flush setting.
/// \param mode Flush.
inline void set(Flush mode);
///
/// \brief Overwrite H5Easy::Compression setting.
/// \param level Compression.
inline void set(const Compression& level);
///
/// \brief Overwrite any setting(s).
/// \param arg any of DumpMode(), Flush(), Compression in arbitrary number and order.
/// \param args any of DumpMode(), Flush(), Compression in arbitrary number and order.
template <class T, class... Args>
inline void set(T arg, Args... args);
///
/// \brief Set chunk-size. If the input is rank (size) zero, automatic chunking is enabled.
/// \param shape Chunk size along each dimension.
template <class T>
inline void setChunkSize(const std::vector<T>& shape);
///
/// \brief Set chunk-size. If the input is rank (size) zero, automatic chunking is enabled.
/// \param shape Chunk size along each dimension.
inline void setChunkSize(std::initializer_list<size_t> shape);
///
/// \brief Get overwrite-mode.
/// \return bool
inline bool overwrite() const;
///
/// \brief Get flush-mode.
/// \return bool
inline bool flush() const;
///
/// \brief Get compress-mode.
/// \return bool
inline bool compress() const;
///
/// \brief Get compression level.
/// \return [0..9]
inline unsigned getCompressionLevel() const;
///
/// \brief Get chunking mode: ``true`` is manually set, ``false`` if chunk-size should be
/// computed automatically.
/// \return bool
inline bool isChunked() const;
///
/// \brief Get chunk size. Use DumpOptions::getChunkSize to check if chunk-size should
/// be automatically computed.
inline std::vector<hsize_t> getChunkSize() const;
private:
bool m_overwrite = false;
bool m_flush = true;
unsigned m_compression_level = 0;
std::vector<hsize_t> m_chunk_size = {};
};
///
/// \brief Get the size of an existing DataSet in an open HDF5 file.
///
/// \param file opened file (has to be readable)
/// \param path path of the DataSet
///
/// \return Size of the DataSet
inline size_t getSize(const File& file, const std::string& path);
///
/// \brief Get the shape of an existing DataSet in an readable file.
///
/// \param file opened file (has to be readable)
/// \param path Path of the DataSet
///
/// \return the shape of the DataSet
inline std::vector<size_t> getShape(const File& file, const std::string& path);
///
/// \brief Write object (templated) to a (new) DataSet in an open HDF5 file.
///
/// \param file opened file (has to be writeable)
/// \param path path of the DataSet
/// \param data the data to write (any supported type)
/// \param mode write mode
///
/// \return The newly created DataSet
///
template <class T>
inline DataSet dump(File& file,
const std::string& path,
const T& data,
DumpMode mode = DumpMode::Create);
///
/// \brief Write object (templated) to a (new) DataSet in an open HDF5 file.
///
/// \param file opened file (has to be writeable)
/// \param path path of the DataSet
/// \param data the data to write (any supported type)
/// \param options dump options
///
/// \return The newly created DataSet
///
template <class T>
inline DataSet dump(File& file, const std::string& path, const T& data, const DumpOptions& options);
///
/// \brief Write a scalar to a (new, extendible) DataSet in an open HDF5 file.
///
/// \param file opened file (has to be writeable)
/// \param path path of the DataSet
/// \param data the data to write (any supported type)
/// \param idx the indices to which to write
///
/// \return The newly created DataSet
///
template <class T>
inline DataSet dump(File& file,
const std::string& path,
const T& data,
const std::vector<size_t>& idx);
///
/// \brief Write a scalar to a (new, extendable) DataSet in an open HDF5 file.
///
/// \param file open File (has to be writeable)
/// \param path path of the DataSet
/// \param data the data to write (any supported type)
/// \param idx the indices to which to write
///
/// \return The newly created DataSet
///
template <class T>
inline DataSet dump(File& file,
const std::string& path,
const T& data,
const std::initializer_list<size_t>& idx);
///
/// \brief Write a scalar to a (new, extendible) DataSet in an open HDF5 file.
///
/// \param file opened file (has to be writeable)
/// \param path path of the DataSet
/// \param data the data to write (any supported type)
/// \param idx the indices to which to write
/// \param options dump options
///
/// \return The newly created DataSet
///
template <class T>
inline DataSet dump(File& file,
const std::string& path,
const T& data,
const std::vector<size_t>& idx,
const DumpOptions& options);
///
/// \brief Write a scalar to a (new, extendible) DataSet in an open HDF5 file.
///
/// \param file opened file (has to be writeable)
/// \param path path of the DataSet
/// \param data the data to write (any supported type)
/// \param idx the indices to which to write
/// \param options dump options
///
/// \return The newly created DataSet
///
template <class T>
inline DataSet dump(File& file,
const std::string& path,
const T& data,
const std::initializer_list<size_t>& idx,
const DumpOptions& options);
///
/// \brief Load entry ``{i, j, ...}`` from a DataSet in an open HDF5 file to a scalar.
///
/// \param file opened file (has to be writeable)
/// \param idx the indices to load
/// \param path path of the DataSet
///
/// \return The read data
///
template <class T>
inline T load(const File& file, const std::string& path, const std::vector<size_t>& idx);
///
/// \brief Load a DataSet in an open HDF5 file to an object (templated).
///
/// \param file opened file (has to be writeable)
/// \param path path of the DataSet
///
/// \return The read data
///
template <class T>
inline T load(const File& file, const std::string& path);
///
/// \brief Write object (templated) to a (new) Attribute in an open HDF5 file.
///
/// \param file opened file (has to be writeable)
/// \param path path of the DataSet
/// \param key name of the attribute
/// \param data the data to write (any supported type)
/// \param mode write mode
///
/// \return The newly created DataSet
///
template <class T>
inline Attribute dumpAttribute(File& file,
const std::string& path,
const std::string& key,
const T& data,
DumpMode mode = DumpMode::Create);
///
/// \brief Write object (templated) to a (new) Attribute in an open HDF5 file.
///
/// \param file opened file (has to be writeable)
/// \param path path of the DataSet
/// \param key name of the attribute
/// \param data the data to write (any supported type)
/// \param options dump options
///
/// \return The newly created DataSet
///
template <class T>
inline Attribute dumpAttribute(File& file,
const std::string& path,
const std::string& key,
const T& data,
const DumpOptions& options);
///
/// \brief Load a Attribute in an open HDF5 file to an object (templated).
///
/// \param file opened file (has to be writeable)
/// \param path path of the DataSet
/// \param key name of the attribute
///
/// \return The read data
///
template <class T>
inline T loadAttribute(const File& file, const std::string& path, const std::string& key);
} // namespace H5Easy
#include "h5easy_bits/H5Easy_Eigen.hpp"
#include "h5easy_bits/H5Easy_misc.hpp"
#include "h5easy_bits/H5Easy_public.hpp"
#include "h5easy_bits/H5Easy_scalar.hpp"

View File

@ -0,0 +1,167 @@
/*
* Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
*/
#pragma once
#include <memory>
#include <stdexcept>
#include <string>
#include <H5Ipublic.h>
namespace HighFive {
///
/// \brief Basic HighFive Exception class
///
///
class Exception: public std::exception {
public:
explicit Exception(const std::string& err_msg)
: _errmsg(err_msg) {}
Exception(const Exception& other) = default;
Exception(Exception&& other) noexcept = default;
Exception& operator=(const Exception& other) = default;
Exception& operator=(Exception&& other) noexcept = default;
~Exception() noexcept override {}
///
/// \brief get the current exception error message
/// \return
///
inline const char* what() const noexcept override {
return _errmsg.c_str();
}
///
/// \brief define the error message
/// \param errmsg
///
inline virtual void setErrorMsg(const std::string& errmsg) {
_errmsg = errmsg;
}
///
/// \brief nextException
/// \return pointer to the next exception in the chain, or NULL if not
/// existing
///
inline Exception* nextException() const {
return _next.get();
}
///
/// \brief HDF5 library error mapper
/// \return HDF5 major error number
///
inline hid_t getErrMajor() const {
return _err_major;
}
///
/// \brief HDF5 library error mapper
/// \return HDF5 minor error number
///
inline hid_t getErrMinor() const {
return _err_minor;
}
private:
std::string _errmsg;
std::shared_ptr<Exception> _next = nullptr;
hid_t _err_major = 0, _err_minor = 0;
friend struct HDF5ErrMapper;
};
///
/// \brief Exception specific to HighFive Object interface
///
class ObjectException: public Exception {
public:
explicit ObjectException(const std::string& err_msg)
: Exception(err_msg) {}
};
///
/// \brief Exception specific to HighFive DataType interface
///
class DataTypeException: public Exception {
public:
explicit DataTypeException(const std::string& err_msg)
: Exception(err_msg) {}
};
///
/// \brief Exception specific to HighFive File interface
///
class FileException: public Exception {
public:
explicit FileException(const std::string& err_msg)
: Exception(err_msg) {}
};
///
/// \brief Exception specific to HighFive DataSpace interface
///
class DataSpaceException: public Exception {
public:
explicit DataSpaceException(const std::string& err_msg)
: Exception(err_msg) {}
};
///
/// \brief Exception specific to HighFive Attribute interface
///
class AttributeException: public Exception {
public:
explicit AttributeException(const std::string& err_msg)
: Exception(err_msg) {}
};
///
/// \brief Exception specific to HighFive DataSet interface
///
class DataSetException: public Exception {
public:
explicit DataSetException(const std::string& err_msg)
: Exception(err_msg) {}
};
///
/// \brief Exception specific to HighFive Group interface
///
class GroupException: public Exception {
public:
explicit GroupException(const std::string& err_msg)
: Exception(err_msg) {}
};
///
/// \brief Exception specific to HighFive Property interface
///
class PropertyException: public Exception {
public:
explicit PropertyException(const std::string& err_msg)
: Exception(err_msg) {}
};
///
/// \brief Exception specific to HighFive Reference interface
///
class ReferenceException: public Exception {
public:
explicit ReferenceException(const std::string& err_msg)
: Exception(err_msg) {}
};
} // namespace HighFive
#include "bits/H5Exception_misc.hpp"

203
include/highfive/H5File.hpp Normal file
View File

@ -0,0 +1,203 @@
/*
* Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
*/
#pragma once
#include <string>
#include <type_traits>
#include "H5Object.hpp"
#include "H5PropertyList.hpp"
#include "bits/H5Annotate_traits.hpp"
#include "bits/H5Node_traits.hpp"
namespace HighFive {
///
/// \brief File class
///
class File: public Object, public NodeTraits<File>, public AnnotateTraits<File> {
public:
const static ObjectType type = ObjectType::File;
enum class AccessMode {
None = 0x00u,
/// Open flag: Read only access
ReadOnly = 0x01u,
/// Open flag: Read Write access
ReadWrite = 0x02u,
/// Open flag: Truncate a file if already existing
Truncate = 0x04u,
/// Open flag: Open will fail if file already exist
Excl = 0x08u,
/// Open flag: Open in debug mode
Debug = 0x10u,
/// Open flag: Create non existing file
Create = 0x20u,
/// Derived open flag: common write mode (=ReadWrite|Create|Truncate)
Overwrite = Truncate,
/// Derived open flag: Opens RW or exclusively creates
OpenOrCreate = ReadWrite | Create
};
constexpr static AccessMode ReadOnly = AccessMode::ReadOnly;
constexpr static AccessMode ReadWrite = AccessMode::ReadWrite;
constexpr static AccessMode Truncate = AccessMode::Truncate;
constexpr static AccessMode Excl = AccessMode::Excl;
constexpr static AccessMode Debug = AccessMode::Debug;
constexpr static AccessMode Create = AccessMode::Create;
constexpr static AccessMode Overwrite = AccessMode::Overwrite;
constexpr static AccessMode OpenOrCreate = AccessMode::OpenOrCreate;
///
/// \brief File
/// \param filename: filepath of the HDF5 file
/// \param openFlags: Open mode / flags ( ReadOnly, ReadWrite)
/// \param fileAccessProps: the file access properties
///
/// Open or create a new HDF5 file
explicit File(const std::string& filename,
AccessMode openFlags = ReadOnly,
const FileAccessProps& fileAccessProps = FileAccessProps::Default());
///
/// \brief File
/// \param filename: filepath of the HDF5 file
/// \param access_mode: Open mode / flags ( ReadOnly, ReadWrite, etc.)
/// \param fileCreateProps: the file create properties
/// \param fileAccessProps: the file access properties
///
/// Open or create a new HDF5 file
File(const std::string& filename,
AccessMode access_mode,
const FileCreateProps& fileCreateProps,
const FileAccessProps& fileAccessProps = FileAccessProps::Default());
/// \brief Keeps reference count constant, and invalidates other.
File(File&& other) noexcept = default;
/// \brief Keeps reference count constant, and invalidates other.
File& operator=(File&& other) = default;
/// \brief Increments reference count, keeps other valid.
File(const File& other) = default;
/// \brief Increments reference count, keeps other valid.
File& operator=(const File& other) = default;
///
/// \brief Return the name of the file
///
const std::string& getName() const;
/// \brief Object path of a File is always "/"
std::string getPath() const noexcept {
return "/";
}
/// \brief Returns the block size for metadata in bytes
hsize_t getMetadataBlockSize() const;
/// \brief Returns the HDF5 version compatibility bounds
std::pair<H5F_libver_t, H5F_libver_t> getVersionBounds() const;
#if H5_VERSION_GE(1, 10, 1)
/// \brief Returns the HDF5 file space strategy.
H5F_fspace_strategy_t getFileSpaceStrategy() const;
/// \brief Returns the page size, if paged allocation is used.
hsize_t getFileSpacePageSize() const;
#endif
///
/// \brief flush
///
/// Flushes all buffers associated with a file to disk
///
void flush();
/// \brief Get the list of properties for creation of this file
FileCreateProps getCreatePropertyList() const {
return details::get_plist<FileCreateProps>(*this, H5Fget_create_plist);
}
/// \brief Get the list of properties for accession of this file
FileAccessProps getAccessPropertyList() const {
return details::get_plist<FileAccessProps>(*this, H5Fget_access_plist);
}
/// \brief Get the size of this file in bytes
size_t getFileSize() const;
/// \brief Get the amount of tracked, unused space in bytes.
///
/// Note, this is a wrapper for `H5Fget_freespace` and returns the number
/// bytes in the free space manager. This might be different from the total
/// amount of unused space in the HDF5 file, since the free space manager
/// might not track everything or not track across open-close cycles.
size_t getFreeSpace() const;
protected:
File() = default;
using Object::Object;
private:
mutable std::string _filename{};
template <typename>
friend class PathTraits;
};
inline File::AccessMode operator|(File::AccessMode lhs, File::AccessMode rhs) {
using int_t = std::underlying_type<File::AccessMode>::type;
return static_cast<File::AccessMode>(static_cast<int_t>(lhs) | static_cast<int_t>(rhs));
}
inline File::AccessMode operator&(File::AccessMode lhs, File::AccessMode rhs) {
using int_t = std::underlying_type<File::AccessMode>::type;
return static_cast<File::AccessMode>(static_cast<int_t>(lhs) & static_cast<int_t>(rhs));
}
inline File::AccessMode operator^(File::AccessMode lhs, File::AccessMode rhs) {
using int_t = std::underlying_type<File::AccessMode>::type;
return static_cast<File::AccessMode>(static_cast<int_t>(lhs) ^ static_cast<int_t>(rhs));
}
inline File::AccessMode operator~(File::AccessMode mode) {
using int_t = std::underlying_type<File::AccessMode>::type;
return static_cast<File::AccessMode>(~static_cast<int_t>(mode));
}
inline const File::AccessMode& operator|=(File::AccessMode& lhs, File::AccessMode rhs) {
lhs = lhs | rhs;
return lhs;
}
inline File::AccessMode operator&=(File::AccessMode& lhs, File::AccessMode rhs) {
lhs = lhs & rhs;
return lhs;
}
inline File::AccessMode operator^=(File::AccessMode& lhs, File::AccessMode rhs) {
lhs = lhs ^ rhs;
return lhs;
}
inline bool any(File::AccessMode mode) {
return mode != File::AccessMode::None;
}
} // namespace HighFive
// H5File is the main user constructible -> bring in implementation headers
#include "bits/H5Annotate_traits_misc.hpp"
#include "bits/H5File_misc.hpp"
#include "bits/H5Node_traits_misc.hpp"
#include "bits/H5Path_traits_misc.hpp"

View File

@ -0,0 +1,86 @@
/*
* Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
*/
#pragma once
#include <H5Gpublic.h>
#include "H5Object.hpp"
#include "bits/H5Friends.hpp"
#include "bits/H5_definitions.hpp"
#include "bits/H5Annotate_traits.hpp"
#include "bits/H5Node_traits.hpp"
#include "bits/H5Path_traits.hpp"
namespace HighFive {
namespace detail {
/// \brief Internal hack to create an `Group` from an ID.
///
/// WARNING: Creating an Group from an ID has implications w.r.t. the lifetime of the object
/// that got passed via its ID. Using this method careless opens up the suite of issues
/// related to C-style resource management, including the analog of double free, dangling
/// pointers, etc.
///
/// NOTE: This is not part of the API and only serves to work around a compiler issue in GCC which
/// prevents us from using `friend`s instead. This function should only be used for internal
/// purposes. The problematic construct is:
///
/// template<class Derived>
/// friend class SomeCRTP<Derived>;
///
/// \private
Group make_group(hid_t);
} // namespace detail
///
/// \brief Represents an hdf5 group
class Group: public Object,
public NodeTraits<Group>,
public AnnotateTraits<Group>,
public PathTraits<Group> {
public:
const static ObjectType type = ObjectType::Group;
Group() = default;
std::pair<unsigned int, unsigned int> getEstimatedLinkInfo() const;
/// \brief Get the list of properties for creation of this group
GroupCreateProps getCreatePropertyList() const {
return details::get_plist<GroupCreateProps>(*this, H5Gget_create_plist);
}
explicit Group(Object&& o) noexcept
: Object(std::move(o)) {};
protected:
using Object::Object;
friend Group detail::make_group(hid_t);
friend class File;
friend class Reference;
#if HIGHFIVE_HAS_FRIEND_DECLARATIONS
template <typename Derivate>
friend class ::HighFive::NodeTraits;
#endif
};
inline std::pair<unsigned int, unsigned int> Group::getEstimatedLinkInfo() const {
auto gcpl = getCreatePropertyList();
auto eli = EstimatedLinkInfo(gcpl);
return std::make_pair(eli.getEntries(), eli.getNameLength());
}
namespace detail {
inline Group make_group(hid_t hid) {
return Group(hid);
}
} // namespace detail
} // namespace HighFive

View File

@ -0,0 +1,139 @@
/*
* Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
*/
#pragma once
#include <ctime>
#include "bits/H5_definitions.hpp"
#include "bits/H5Friends.hpp"
#include "H5Exception.hpp"
#include "bits/h5o_wrapper.hpp"
#include "bits/h5i_wrapper.hpp"
namespace HighFive {
///
/// \brief Enum of the types of objects (H5O api)
///
enum class ObjectType {
File,
Group,
UserDataType,
DataSpace,
Dataset,
Attribute,
Other // Internal/custom object type
};
class Object {
public:
// move constructor, reuse hid
Object(Object&& other) noexcept;
///
/// \brief isValid
/// \return true if current Object is a valid HDF5Object
///
bool isValid() const noexcept;
///
/// \brief getId
/// \return internal HDF5 id to the object
/// provided for C API compatibility
///
hid_t getId() const noexcept;
///
/// \brief Retrieve several infos about the current object (address, dates, etc)
///
ObjectInfo getInfo() const;
///
/// \brief Address of an HDF5 object in the file.
///
/// Not all HDF5 files support addresses anymore. The more recent concept
/// is a VOL token.
///
/// \since 3.0.0
///
haddr_t getAddress() const;
///
/// \brief Gets the fundamental type of the object (dataset, group, etc)
/// \exception ObjectException when the _hid is negative or the type
/// is custom and not registered yet
///
ObjectType getType() const;
// Check if refer to same object
bool operator==(const Object& other) const noexcept {
return _hid == other._hid;
}
protected:
// empty constructor
Object();
// copy constructor, increase reference counter
Object(const Object& other);
// Init with an low-level object id
explicit Object(hid_t) noexcept;
// decrease reference counter
~Object();
// Copy-Assignment operator
Object& operator=(const Object& other);
Object& operator=(Object&& other);
hid_t _hid;
private:
friend class Reference;
friend class CompoundType;
#if HIGHFIVE_HAS_FRIEND_DECLARATIONS
template <typename Derivate>
friend class NodeTraits;
template <typename Derivate>
friend class AnnotateTraits;
template <typename Derivate>
friend class PathTraits;
#endif
};
///
/// \brief A class for accessing hdf5 objects info
///
class ObjectInfo {
public:
ObjectInfo(const Object& obj);
/// \brief Retrieve the number of references to this object
size_t getRefCount() const noexcept;
/// \brief Retrieve the object's creation time
time_t getCreationTime() const noexcept;
/// \brief Retrieve the object's last modification time
time_t getModificationTime() const noexcept;
private:
detail::h5o_info1_t raw_info;
friend class Object;
};
} // namespace HighFive
#include "bits/H5Object_misc.hpp"

View File

@ -0,0 +1,710 @@
/*
* Copyright (c), 2017-2018, Adrien Devresse <adrien.devresse@epfl.ch>
* Juan Hernando <juan.hernando@epfl.ch>
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
*/
#pragma once
#include <vector>
#include <H5Ppublic.h>
// Required by MPIOFileAccess
#ifdef H5_HAVE_PARALLEL
#include <H5FDmpi.h>
#endif
#include "H5Exception.hpp"
#include "H5Object.hpp"
namespace HighFive {
/// \defgroup PropertyLists Property Lists
/// HDF5 is configured through what they call property lists. In HDF5 the
/// process has four steps:
///
/// 1. Create a property list. As users we now have an `hid_t` identifying the
/// property list.
/// 2. Set properties as desired.
/// 3. Pass the HID to the HDF5 function to be configured.
/// 4. Free the property list.
///
/// Note that the mental picture is that one creates a settings object, and
/// then passes those settings to a function such as `H5Dwrite`. In and of
/// themselves the settings don't change the behaviour of HDF5. Rather they
/// need to be used to take affect.
///
/// The second aspect is that property lists represent any number of related
/// settings, e.g. there's property lists anything related to creating files
/// and another for accessing files, same for creating and accessing datasets,
/// etc. Settings that affect creating files, must be passed a file creation
/// property list, while settings that affect file access require a file access
/// property list.
///
/// In HighFive the `PropertyList` works similar in that it's a object
/// representing the settings, i.e. internally it's just the property lists
/// HID. Just like in HDF5 one adds the settings to the settings object; and
/// then passes the settings object to the respective method. Example:
///
///
/// // Create an object which contains the setting to
/// // open files with MPI-IO.
/// auto fapl = FileAccessProps();
/// fapl.add(MPIOFileAccess(MPI_COMM_WORLD, MPI_INFO_NULL);
///
/// // To open a specific file with MPI-IO, we do:
/// auto file = File("foo.h5", File::ReadOnly, fapl);
///
/// Note that the `MPIOFileAccess` object by itself doesn't affect the
/// `FileAccessProps`. Rather it needs to be explicitly added to the `fapl`
/// (the group of file access related settings), and then the `fapl` needs to
/// be passed to the constructor of `File` for the settings to take affect.
///
/// This is important to understand when reading properties. Example:
///
/// // Obtain the file access property list:
/// auto fapl = file.getAccessPropertyList()
///
/// // Extracts a copy of the collective MPI-IO metadata settings from
/// // the group of file access related setting, i.e. the `fapl`:
/// auto mpio_metadata = MPIOCollectiveMetadata(fapl);
///
/// if(mpio_metadata.isCollectiveRead()) {
/// // something specific if meta data is read collectively.
/// }
///
/// // Careful, this only affects the `mpio_metadata` object, but not the
/// // `fapl`, and also not whether `file` uses collective MPI-IO for
/// // metadata.
/// mpio_metadata = MPIOCollectiveMetadata(false, false);
///
/// @{
///
/// \brief Types of property lists
///
enum class PropertyType : int {
FILE_CREATE,
FILE_ACCESS,
DATASET_CREATE,
DATASET_ACCESS,
DATASET_XFER,
GROUP_CREATE,
GROUP_ACCESS,
DATATYPE_CREATE,
DATATYPE_ACCESS,
STRING_CREATE,
ATTRIBUTE_CREATE,
LINK_CREATE,
LINK_ACCESS,
};
namespace details {
template <typename T, typename U>
T get_plist(const U& obj, hid_t (*f)(hid_t)) {
auto hid = f(obj.getId());
if (hid < 0) {
HDF5ErrMapper::ToException<PropertyException>("Unable to get property list");
}
T t{};
t._hid = hid;
return t;
}
} // namespace details
///
/// \brief Base Class for Property lists, providing global default
class PropertyListBase: public Object {
public:
PropertyListBase() noexcept;
static const PropertyListBase& Default() noexcept {
static const PropertyListBase plist{};
return plist;
}
private:
template <typename T, typename U>
friend T details::get_plist(const U&, hid_t (*f)(hid_t));
};
///
/// \brief HDF5 property Lists
///
template <PropertyType T>
class PropertyList: public PropertyListBase {
public:
///
/// \brief return the type of this PropertyList
constexpr PropertyType getType() const noexcept {
return T;
}
///
/// Add a property to this property list.
/// A property is an object which is expected to have a method with the
/// following signature void apply(hid_t hid) const
template <typename P>
void add(const P& property);
///
/// Return the Default property type object
static const PropertyList<T>& Default() noexcept {
return static_cast<const PropertyList<T>&>(PropertyListBase::Default());
}
/// Return a property list created via a call to `H5Pcreate`.
///
/// An empty property is needed when one wants `getId()` to immediately
/// point at a valid HID. This is important when interfacing directly with
/// HDF5 to set properties that haven't been wrapped by HighFive.
static PropertyList<T> Empty() {
auto plist = PropertyList<T>();
plist._initializeIfNeeded();
return plist;
}
protected:
void _initializeIfNeeded();
};
using FileCreateProps = PropertyList<PropertyType::FILE_CREATE>;
using FileAccessProps = PropertyList<PropertyType::FILE_ACCESS>;
using DataSetCreateProps = PropertyList<PropertyType::DATASET_CREATE>;
using DataSetAccessProps = PropertyList<PropertyType::DATASET_ACCESS>;
using DataTransferProps = PropertyList<PropertyType::DATASET_XFER>;
using GroupCreateProps = PropertyList<PropertyType::GROUP_CREATE>;
using GroupAccessProps = PropertyList<PropertyType::GROUP_ACCESS>;
using DataTypeCreateProps = PropertyList<PropertyType::DATATYPE_CREATE>;
using DataTypeAccessProps = PropertyList<PropertyType::DATATYPE_ACCESS>;
using StringCreateProps = PropertyList<PropertyType::STRING_CREATE>;
using AttributeCreateProps = PropertyList<PropertyType::ATTRIBUTE_CREATE>;
using LinkCreateProps = PropertyList<PropertyType::LINK_CREATE>;
using LinkAccessProps = PropertyList<PropertyType::LINK_ACCESS>;
///
/// RawPropertyLists are to be used when advanced H5 properties
/// are desired and are not part of the HighFive API.
/// Therefore this class is mainly for internal use.
template <PropertyType T>
class RawPropertyList: public PropertyList<T> {
public:
template <typename F, typename... Args>
void add(const F& funct, const Args&... args);
};
#ifdef H5_HAVE_PARALLEL
///
/// \brief Configure MPI access for the file
///
/// All further modifications to the structure of the file will have to be
/// done with collective operations
///
class MPIOFileAccess {
public:
MPIOFileAccess(MPI_Comm comm, MPI_Info info);
private:
friend FileAccessProps;
void apply(hid_t list) const;
MPI_Comm _comm;
MPI_Info _info;
};
#if H5_VERSION_GE(1, 10, 0)
///
/// \brief Use collective MPI-IO for metadata read and write.
///
/// See `MPIOCollectiveMetadataRead` and `MPIOCollectiveMetadataWrite`.
///
class MPIOCollectiveMetadata {
public:
explicit MPIOCollectiveMetadata(bool collective = true);
explicit MPIOCollectiveMetadata(const FileAccessProps& plist);
bool isCollectiveRead() const;
bool isCollectiveWrite() const;
private:
friend FileAccessProps;
void apply(hid_t plist) const;
bool collective_read_;
bool collective_write_;
};
///
/// \brief Use collective MPI-IO for metadata read?
///
/// Note that when used in a file access property list, this will force all reads
/// of meta data to be collective. HDF5 function may implicitly perform metadata
/// reads. These functions would become collective. A list of functions that
/// perform metadata reads can be found in the HDF5 documentation, e.g.
/// https://docs.hdfgroup.org/hdf5/v1_12/group___g_a_c_p_l.html
///
/// In HighFive setting collective read is (currently) only supported on file level.
///
/// Please also consult upstream documentation of `H5Pset_all_coll_metadata_ops`.
///
class MPIOCollectiveMetadataRead {
public:
explicit MPIOCollectiveMetadataRead(bool collective = true);
explicit MPIOCollectiveMetadataRead(const FileAccessProps& plist);
bool isCollective() const;
private:
friend FileAccessProps;
friend MPIOCollectiveMetadata;
void apply(hid_t plist) const;
bool collective_;
};
///
/// \brief Use collective MPI-IO for metadata write?
///
/// In order to keep the in-memory representation of the file structure
/// consistent across MPI ranks, writing meta data is always a collective
/// operation. Meaning all MPI ranks must participate. Passing this setting
/// enables using MPI-IO collective operations for metadata writes.
///
/// Please also consult upstream documentation of `H5Pset_coll_metadata_write`.
///
class MPIOCollectiveMetadataWrite {
public:
explicit MPIOCollectiveMetadataWrite(bool collective = true);
explicit MPIOCollectiveMetadataWrite(const FileAccessProps& plist);
bool isCollective() const;
private:
friend FileAccessProps;
friend MPIOCollectiveMetadata;
void apply(hid_t plist) const;
bool collective_;
};
#endif
#endif
///
/// \brief Configure the version bounds for the file
///
/// Used to define the compatibility of objects created within HDF5 files,
/// and affects the format of groups stored in the file.
///
/// See also the documentation of \c H5P_SET_LIBVER_BOUNDS in HDF5.
///
/// Possible values for \c low and \c high are:
/// * \c H5F_LIBVER_EARLIEST
/// * \c H5F_LIBVER_V18
/// * \c H5F_LIBVER_V110
/// * \c H5F_LIBVER_NBOUNDS
/// * \c H5F_LIBVER_LATEST currently defined as \c H5F_LIBVER_V110 within
/// HDF5
///
class FileVersionBounds {
public:
FileVersionBounds(H5F_libver_t low, H5F_libver_t high);
explicit FileVersionBounds(const FileAccessProps& fapl);
std::pair<H5F_libver_t, H5F_libver_t> getVersion() const;
private:
friend FileAccessProps;
void apply(hid_t list) const;
H5F_libver_t _low;
H5F_libver_t _high;
};
///
/// \brief Configure the metadata block size to use writing to files
///
/// \param size Metadata block size in bytes
///
class MetadataBlockSize {
public:
explicit MetadataBlockSize(hsize_t size);
explicit MetadataBlockSize(const FileAccessProps& fapl);
hsize_t getSize() const;
private:
friend FileAccessProps;
void apply(hid_t list) const;
hsize_t _size;
};
#if H5_VERSION_GE(1, 10, 1)
///
/// \brief Configure the file space strategy.
///
/// See the upstream documentation of `H5Pget_file_space_strategy` for more details. Essentially,
/// it enables configuring how space is allocate in the file.
///
class FileSpaceStrategy {
public:
///
/// \brief Create a file space strategy property.
///
/// \param strategy The HDF5 free space strategy.
/// \param persist Should free space managers be persisted across file closing and reopening.
/// \param threshold The free-space manager wont track sections small than this threshold.
FileSpaceStrategy(H5F_fspace_strategy_t strategy, hbool_t persist, hsize_t threshold);
explicit FileSpaceStrategy(const FileCreateProps& fcpl);
H5F_fspace_strategy_t getStrategy() const;
hbool_t getPersist() const;
hsize_t getThreshold() const;
private:
friend FileCreateProps;
void apply(hid_t list) const;
H5F_fspace_strategy_t _strategy;
hbool_t _persist;
hsize_t _threshold;
};
///
/// \brief Configure the page size for paged allocation.
///
/// See the upstream documentation of `H5Pset_file_space_page_size` for more details. Essentially,
/// it enables configuring the page size when paged allocation is used.
///
/// General information about paged allocation can be found in the upstream documentation "RFC: Page
/// Buffering".
///
class FileSpacePageSize {
public:
///
/// \brief Create a file space strategy property.
///
/// \param page_size The page size in bytes.
explicit FileSpacePageSize(hsize_t page_size);
explicit FileSpacePageSize(const FileCreateProps& fcpl);
hsize_t getPageSize() const;
private:
friend FileCreateProps;
void apply(hid_t list) const;
hsize_t _page_size;
};
#ifndef H5_HAVE_PARALLEL
/// \brief Set size of the page buffer.
///
/// Please, consult the upstream documentation of
/// H5Pset_page_buffer_size
/// H5Pget_page_buffer_size
/// Note that this setting is only valid for page allocated/aggregated
/// files, i.e. those that have file space strategy "Page".
///
/// Tests suggest this doesn't work in the parallel version of the
/// library. Hence, this isn't available at compile time if the parallel
/// library was selected.
class PageBufferSize {
public:
/// Property to set page buffer sizes.
///
/// @param page_buffer_size maximum size of the page buffer in bytes.
/// @param min_meta_percent fraction of the page buffer dedicated to meta data, in percent.
/// @param min_raw_percent fraction of the page buffer dedicated to raw data, in percent.
explicit PageBufferSize(size_t page_buffer_size,
unsigned min_meta_percent = 0,
unsigned min_raw_percent = 0);
explicit PageBufferSize(const FileAccessProps& fapl);
size_t getPageBufferSize() const;
unsigned getMinMetaPercent() const;
unsigned getMinRawPercent() const;
private:
friend FileAccessProps;
void apply(hid_t list) const;
size_t _page_buffer_size;
unsigned _min_meta;
unsigned _min_raw;
};
#endif
#endif
/// \brief Set hints as to how many links to expect and their average length
///
class EstimatedLinkInfo {
public:
/// \brief Create a property with the request parameters.
///
/// @param entries The estimated number of links in a group.
/// @param length The estimated length of the names of links.
explicit EstimatedLinkInfo(unsigned entries, unsigned length);
explicit EstimatedLinkInfo(const GroupCreateProps& gcpl);
/// \brief The estimated number of links in a group.
unsigned getEntries() const;
/// \brief The estimated length of the names of links.
unsigned getNameLength() const;
private:
friend GroupCreateProps;
void apply(hid_t hid) const;
unsigned _entries;
unsigned _length;
};
class Chunking {
public:
explicit Chunking(const std::vector<hsize_t>& dims);
Chunking(const std::initializer_list<hsize_t>& items);
template <typename... Args>
explicit Chunking(hsize_t item, Args... args);
explicit Chunking(DataSetCreateProps& plist, size_t max_dims = 32);
const std::vector<hsize_t>& getDimensions() const;
private:
friend DataSetCreateProps;
void apply(hid_t hid) const;
std::vector<hsize_t> _dims;
};
class Deflate {
public:
explicit Deflate(unsigned level);
private:
friend DataSetCreateProps;
friend GroupCreateProps;
void apply(hid_t hid) const;
const unsigned _level;
};
class Szip {
public:
explicit Szip(unsigned options_mask = H5_SZIP_EC_OPTION_MASK,
unsigned pixels_per_block = H5_SZIP_MAX_PIXELS_PER_BLOCK);
unsigned getOptionsMask() const;
unsigned getPixelsPerBlock() const;
private:
friend DataSetCreateProps;
void apply(hid_t hid) const;
const unsigned _options_mask;
const unsigned _pixels_per_block;
};
class Shuffle {
public:
Shuffle() = default;
private:
friend DataSetCreateProps;
void apply(hid_t hid) const;
};
/// \brief When are datasets allocated?
///
/// The precise time of when HDF5 requests space to store the dataset
/// can be configured. Please, consider the upstream documentation for
/// `H5Pset_alloc_time`.
class AllocationTime {
public:
explicit AllocationTime(H5D_alloc_time_t alloc_time);
explicit AllocationTime(const DataSetCreateProps& dcpl);
H5D_alloc_time_t getAllocationTime();
private:
friend DataSetCreateProps;
void apply(hid_t dcpl) const;
H5D_alloc_time_t _alloc_time;
};
/// Dataset access property to control chunk cache configuration.
/// Do not confuse with the similar file access property for H5Pset_cache
class Caching {
public:
/// https://support.hdfgroup.org/HDF5/doc/RM/H5P/H5Pset_chunk_cache.html for
/// details.
Caching(size_t numSlots,
size_t cacheSize,
double w0 = static_cast<double>(H5D_CHUNK_CACHE_W0_DEFAULT));
explicit Caching(const DataSetCreateProps& dcpl);
size_t getNumSlots() const;
size_t getCacheSize() const;
double getW0() const;
private:
friend DataSetAccessProps;
void apply(hid_t hid) const;
size_t _numSlots;
size_t _cacheSize;
double _w0;
};
class CreateIntermediateGroup {
public:
explicit CreateIntermediateGroup(bool create = true);
explicit CreateIntermediateGroup(const LinkCreateProps& lcpl);
bool isSet() const;
protected:
void fromPropertyList(hid_t hid);
private:
friend LinkCreateProps;
void apply(hid_t hid) const;
bool _create;
};
#ifdef H5_HAVE_PARALLEL
class UseCollectiveIO {
public:
explicit UseCollectiveIO(bool enable = true);
explicit UseCollectiveIO(const DataTransferProps& dxpl);
/// \brief Does the property request collective IO?
bool isCollective() const;
private:
friend DataTransferProps;
void apply(hid_t hid) const;
bool _enable;
};
/// \brief The cause for non-collective I/O.
///
/// The cause refers to the most recent I/O with data transfer property list `dxpl` at time of
/// creation of this object. This object will not update automatically for later data transfers,
/// i.e. `H5Pget_mpio_no_collective_cause` is called in the constructor, and not when fetching
/// a value, such as `wasCollective`.
class MpioNoCollectiveCause {
public:
explicit MpioNoCollectiveCause(const DataTransferProps& dxpl);
/// \brief Was the datatransfer collective?
bool wasCollective() const;
/// \brief The local cause for a non-collective I/O.
uint32_t getLocalCause() const;
/// \brief The global cause for a non-collective I/O.
uint32_t getGlobalCause() const;
/// \brief A pair of the local and global cause for non-collective I/O.
std::pair<uint32_t, uint32_t> getCause() const;
private:
friend DataTransferProps;
uint32_t _local_cause;
uint32_t _global_cause;
};
#endif
struct CreationOrder {
enum _creation_order {
Tracked = H5P_CRT_ORDER_TRACKED,
Indexed = H5P_CRT_ORDER_INDEXED,
};
};
///
/// \brief Track and index creation order time
///
/// Let user retrieve objects by creation order time instead of name.
///
class LinkCreationOrder {
public:
///
/// \brief Create the property
/// \param flags Should be a composition of HighFive::CreationOrder.
///
explicit LinkCreationOrder(unsigned flags)
: _flags(flags) {}
explicit LinkCreationOrder(const FileCreateProps& fcpl);
explicit LinkCreationOrder(const GroupCreateProps& gcpl);
unsigned getFlags() const;
protected:
void fromPropertyList(hid_t hid);
private:
friend FileCreateProps;
friend GroupCreateProps;
void apply(hid_t hid) const;
unsigned _flags;
};
///
/// \brief Set threshold for attribute storage.
///
/// HDF5 can store Attributes in the object header (compact) or in the B-tree
/// (dense). This property sets the threshold when attributes are moved to one
/// or the other storage format.
///
/// Please refer to the upstream documentation of `H5Pset_attr_phase_change` or
/// Section 8 (Attributes) in the User Guide, in particular Subsection 8.5.
///
class AttributePhaseChange {
public:
///
/// \brief Create the property from the threshold values.
///
/// When the number of attributes hits `max_compact` the attributes are
/// moved to dense storage, once the number drops to below `min_dense` the
/// attributes are moved to compact storage.
AttributePhaseChange(unsigned max_compact, unsigned min_dense);
/// \brief Extract threshold values from property list.
explicit AttributePhaseChange(const GroupCreateProps& gcpl);
unsigned max_compact() const;
unsigned min_dense() const;
private:
friend GroupCreateProps;
void apply(hid_t hid) const;
unsigned _max_compact;
unsigned _min_dense;
};
/// @}
} // namespace HighFive
#include "bits/H5PropertyList_misc.hpp"

View File

@ -0,0 +1,81 @@
/*
* Copyright (c), 2020, EPFL - Blue Brain Project
*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
*/
#pragma once
#include <string>
#include <vector>
#include <H5Ipublic.h>
#include <H5Rpublic.h>
#include "bits/H5_definitions.hpp"
namespace HighFive {
namespace details {
template <typename T>
struct inspector;
}
///
/// \brief An HDF5 (object) reference type
///
/// HDF5 object references allow pointing to groups, datasets (and compound types). They
/// differ from links in their ability to be stored and retrieved as data from the HDF5
/// file in datasets themselves.
///
class Reference {
public:
/// \brief Create an empty Reference to be initialized later
Reference() = default;
/// \brief Create a Reference to an object residing at a given location
///
/// \param location A File or Group where the object being referenced to resides
/// \param object A Dataset or Group to be referenced
Reference(const Object& location, const Object& object);
/// \brief Retrieve the Object being referenced by the Reference
///
/// \tparam T the appropriate HighFive Container (either DataSet or Group)
/// \param location the location where the referenced object is to be found (a File)
/// \return the dereferenced Object (either a Group or DataSet)
template <typename T>
T dereference(const Object& location) const;
/// \brief Get only the type of the referenced Object
///
/// \param location the location where the referenced object is to be found (a File)
/// \return the ObjectType of the referenced object
ObjectType getType(const Object& location) const;
protected:
/// \brief Create a Reference from a low-level HDF5 object reference
inline explicit Reference(const hobj_ref_t h5_ref)
: href(h5_ref) {};
/// \brief Create the low-level reference and store it at refptr
///
/// \param refptr Pointer to a memory location where the created HDF5 reference will
/// be stored
void create_ref(hobj_ref_t* refptr) const;
private:
Object get_ref(const Object& location) const;
hobj_ref_t href{};
std::string obj_name{};
hid_t parent_id{};
friend struct details::inspector<Reference>;
};
} // namespace HighFive
#include "bits/H5Reference_misc.hpp"

View File

@ -0,0 +1,68 @@
/*
* Copyright (c), 2017, Adrien Devresse <adrien.devresse@epfl.ch>
*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
*/
#pragma once
#include "H5DataSet.hpp"
#include "H5DataSpace.hpp"
#include "bits/H5Slice_traits.hpp"
#include "bits/H5Friends.hpp"
namespace HighFive {
namespace detail {
Selection make_selection(const DataSpace&, const DataSpace&, const DataSet&);
}
///
/// \brief Selection: represent a view on a slice/part of a dataset
///
/// A Selection is valid only if its parent dataset is valid
///
class Selection: public SliceTraits<Selection> {
public:
///
/// \brief getSpace
/// \return Dataspace associated with this selection
///
DataSpace getSpace() const;
///
/// \brief getMemSpace
/// \return Dataspace associated with the memory representation of this
/// selection
///
DataSpace getMemSpace() const;
///
/// \brief getDataSet
/// \return parent dataset of this selection
///
DataSet& getDataset();
const DataSet& getDataset() const;
///
/// \brief return the datatype of the selection
/// \return return the datatype of the selection
DataType getDataType() const;
protected:
Selection(const DataSpace& memspace, const DataSpace& file_space, const DataSet& set);
private:
DataSpace _mem_space, _file_space;
DataSet _set;
#if HIGHFIVE_HAS_FRIEND_DECLARATIONS
template <typename Derivate>
friend class ::HighFive::SliceTraits;
#endif
friend Selection detail::make_selection(const DataSpace&, const DataSpace&, const DataSet&);
};
} // namespace HighFive

View File

@ -0,0 +1,218 @@
/*
* Copyright (c), 2017, Blue Brain Project - EPFL (CH)
*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
*/
#pragma once
#include <functional>
#include <string>
#include <iostream>
#include "bits/h5e_wrapper.hpp"
#include "bits/H5Friends.hpp"
namespace HighFive {
///
/// \brief Utility class to disable HDF5 stack printing inside a scope.
///
class SilenceHDF5 {
public:
inline explicit SilenceHDF5(bool enable = true)
: _client_data(nullptr) {
detail::nothrow::h5e_get_auto2(H5E_DEFAULT, &_func, &_client_data);
if (enable) {
detail::nothrow::h5e_set_auto2(H5E_DEFAULT, nullptr, nullptr);
}
}
inline ~SilenceHDF5() {
detail::nothrow::h5e_set_auto2(H5E_DEFAULT, _func, _client_data);
}
private:
H5E_auto2_t _func;
void* _client_data;
};
#define HIGHFIVE_LOG_LEVEL_DEBUG 10
#define HIGHFIVE_LOG_LEVEL_INFO 20
#define HIGHFIVE_LOG_LEVEL_WARN 30
#define HIGHFIVE_LOG_LEVEL_ERROR 40
#ifndef HIGHFIVE_LOG_LEVEL
#define HIGHFIVE_LOG_LEVEL HIGHFIVE_LOG_LEVEL_WARN
#endif
enum class LogSeverity {
Debug = HIGHFIVE_LOG_LEVEL_DEBUG,
Info = HIGHFIVE_LOG_LEVEL_INFO,
Warn = HIGHFIVE_LOG_LEVEL_WARN,
Error = HIGHFIVE_LOG_LEVEL_ERROR
};
inline std::string to_string(LogSeverity severity) {
switch (severity) {
case LogSeverity::Debug:
return "DEBUG";
case LogSeverity::Info:
return "INFO";
case LogSeverity::Warn:
return "WARN";
case LogSeverity::Error:
return "ERROR";
default:
return "??";
}
}
/** \brief A logger with supporting basic functionality.
*
* This logger delegates the logging task to a callback. This level of
* indirection enables using the default Python logger from C++; or
* integrating HighFive into some custom logging solution.
*
* Using this class directly to log is not intended. Rather you should use
* - `HIGHFIVE_LOG_DEBUG{,_IF}`
* - `HIGHFIVE_LOG_INFO{,_IF}`
* - `HIGHFIVE_LOG_WARNING{,_IF}`
* - `HIGHFIVE_LOG_ERROR{,_IF}`
*
* This is intended to used as a singleton, via `get_global_logger()`.
*/
class Logger {
public:
using callback_type =
std::function<void(LogSeverity, const std::string&, const std::string&, int)>;
public:
Logger() = delete;
Logger(const Logger&) = delete;
Logger(Logger&&) = delete;
explicit Logger(callback_type cb)
: _cb(std::move(cb)) {}
Logger& operator=(const Logger&) = delete;
Logger& operator=(Logger&&) = delete;
inline void log(LogSeverity severity,
const std::string& message,
const std::string& file,
int line) {
_cb(severity, message, file, line);
}
inline void set_logging_callback(callback_type cb) {
_cb = std::move(cb);
}
private:
callback_type _cb;
};
inline void default_logging_callback(LogSeverity severity,
const std::string& message,
const std::string& file,
int line) {
std::clog << file << ": " << line << " [" << to_string(severity) << "] " << message
<< std::endl;
}
/// \brief Obtain a reference to the logger used by HighFive.
///
/// This uses a Meyers singleton, to ensure that the global logger is
/// initialized with a safe default logger, before it is used.
///
/// Note: You probably don't need to call this function explicitly.
///
inline Logger& get_global_logger() {
static Logger logger(&default_logging_callback);
return logger;
}
/// \brief Sets the callback that's used by the logger.
inline void register_logging_callback(Logger::callback_type cb) {
auto& logger = get_global_logger();
logger.set_logging_callback(std::move(cb));
}
namespace detail {
/// \brief Log a `message` with severity `severity`.
inline void log(LogSeverity severity,
const std::string& message,
const std::string& file,
int line) {
auto& logger = get_global_logger();
logger.log(severity, message, file, line);
}
} // namespace detail
#if HIGHFIVE_LOG_LEVEL <= HIGHFIVE_LOG_LEVEL_DEBUG
#define HIGHFIVE_LOG_DEBUG(message) \
::HighFive::detail::log(::HighFive::LogSeverity::Debug, (message), __FILE__, __LINE__);
// Useful, for the common pattern: if ...; then log something.
#define HIGHFIVE_LOG_DEBUG_IF(cond, message) \
if ((cond)) { \
HIGHFIVE_LOG_DEBUG((message)); \
}
#else
#define HIGHFIVE_LOG_DEBUG(message) ;
#define HIGHFIVE_LOG_DEBUG_IF(cond, message) ;
#endif
#if HIGHFIVE_LOG_LEVEL <= HIGHFIVE_LOG_LEVEL_INFO
#define HIGHFIVE_LOG_INFO(message) \
::HighFive::detail::log(::HighFive::LogSeverity::Info, (message), __FILE__, __LINE__);
// Useful, for the common pattern: if ...; then log something.
#define HIGHFIVE_LOG_INFO_IF(cond, message) \
if ((cond)) { \
HIGHFIVE_LOG_INFO((message)); \
}
#else
#define HIGHFIVE_LOG_INFO(message) ;
#define HIGHFIVE_LOG_INFO_IF(cond, message) ;
#endif
#if HIGHFIVE_LOG_LEVEL <= HIGHFIVE_LOG_LEVEL_WARN
#define HIGHFIVE_LOG_WARN(message) \
::HighFive::detail::log(::HighFive::LogSeverity::Warn, (message), __FILE__, __LINE__);
// Useful, for the common pattern: if ...; then log something.
#define HIGHFIVE_LOG_WARN_IF(cond, message) \
if ((cond)) { \
HIGHFIVE_LOG_WARN((message)); \
}
#else
#define HIGHFIVE_LOG_WARN(message) ;
#define HIGHFIVE_LOG_WARN_IF(cond, message) ;
#endif
#if HIGHFIVE_LOG_LEVEL <= HIGHFIVE_LOG_LEVEL_ERROR
#define HIGHFIVE_LOG_ERROR(message) \
::HighFive::detail::log(::HighFive::LogSeverity::Error, (message), __FILE__, __LINE__);
// Useful, for the common pattern: if ...; then log something.
#define HIGHFIVE_LOG_ERROR_IF(cond, message) \
if ((cond)) { \
HIGHFIVE_LOG_ERROR((message)); \
}
#else
#define HIGHFIVE_LOG_ERROR(message) ;
#define HIGHFIVE_LOG_ERROR_IF(cond, message) ;
#endif
} // namespace HighFive

Some files were not shown because too many files have changed in this diff Show More