diff --git a/src/POET_Training.ipynb b/src/POET_Training.ipynb
index 47d6b4b..0598b22 100644
--- a/src/POET_Training.ipynb
+++ b/src/POET_Training.ipynb
@@ -54,17 +54,7 @@
"cell_type": "code",
"execution_count": 1,
"metadata": {},
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "2025-02-28 10:22:00.281793: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n",
- "2025-02-28 10:22:00.302002: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
- "To enable the following instructions: SSE4.1 SSE4.2 AVX AVX2 AVX_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n"
- ]
- }
- ],
+ "outputs": [],
"source": [
"from preprocessing import *\n",
"import numpy as np\n",
@@ -110,6 +100,7 @@
"source": [
"# load data and differentiate between design and results (before and after a simulation step)\n",
"data_file = h5py.File(\"../datasets/barite_50_4_corner.h5\")\n",
+ "# data_file = h5py.File(\"../datasets/Barite_4c_mdl.h5\")\n",
"\n",
"design = data_file[\"design\"]\n",
"results = data_file[\"result\"]\n",
@@ -124,6 +115,220 @@
"data_file.close()"
]
},
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "
\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " H | \n",
+ " O | \n",
+ " Charge | \n",
+ " Ba | \n",
+ " Cl | \n",
+ " S | \n",
+ " Sr | \n",
+ " Barite | \n",
+ " Celestite | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " | 0 | \n",
+ " 111.012434 | \n",
+ " 55.508192 | \n",
+ " -7.779554e-09 | \n",
+ " 2.041069e-02 | \n",
+ " 4.082138e-02 | \n",
+ " 4.938300e-04 | \n",
+ " 0.000494 | \n",
+ " 0.001000 | \n",
+ " 1.0 | \n",
+ "
\n",
+ " \n",
+ " | 1 | \n",
+ " 111.012434 | \n",
+ " 55.508427 | \n",
+ " -4.736083e-09 | \n",
+ " 1.094567e-02 | \n",
+ " 2.189133e-02 | \n",
+ " 5.525578e-04 | \n",
+ " 0.000553 | \n",
+ " 0.001000 | \n",
+ " 1.0 | \n",
+ "
\n",
+ " \n",
+ " | 2 | \n",
+ " 111.012434 | \n",
+ " 55.508691 | \n",
+ " -1.311169e-09 | \n",
+ " 2.943745e-04 | \n",
+ " 5.887491e-04 | \n",
+ " 6.186462e-04 | \n",
+ " 0.000619 | \n",
+ " 0.001000 | \n",
+ " 1.0 | \n",
+ "
\n",
+ " \n",
+ " | 3 | \n",
+ " 111.012434 | \n",
+ " 55.508698 | \n",
+ " -1.220023e-09 | \n",
+ " 1.091776e-05 | \n",
+ " 2.183551e-05 | \n",
+ " 6.204050e-04 | \n",
+ " 0.000620 | \n",
+ " 0.001000 | \n",
+ " 1.0 | \n",
+ "
\n",
+ " \n",
+ " | 4 | \n",
+ " 111.012434 | \n",
+ " 55.508699 | \n",
+ " -1.216643e-09 | \n",
+ " 4.049176e-07 | \n",
+ " 8.098352e-07 | \n",
+ " 6.204702e-04 | \n",
+ " 0.000620 | \n",
+ " 0.001000 | \n",
+ " 1.0 | \n",
+ "
\n",
+ " \n",
+ " | ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ "
\n",
+ " \n",
+ " | 629995 | \n",
+ " 111.012434 | \n",
+ " 55.506217 | \n",
+ " 1.725154e-08 | \n",
+ " 3.553636e-02 | \n",
+ " 1.453447e-01 | \n",
+ " 1.536968e-07 | \n",
+ " 0.037136 | \n",
+ " 1.009280 | \n",
+ " 0.0 | \n",
+ "
\n",
+ " \n",
+ " | 629996 | \n",
+ " 111.012434 | \n",
+ " 55.506217 | \n",
+ " 6.831420e-09 | \n",
+ " 4.912662e-02 | \n",
+ " 1.569033e-01 | \n",
+ " 1.268188e-07 | \n",
+ " 0.029325 | \n",
+ " 1.003515 | \n",
+ " 0.0 | \n",
+ "
\n",
+ " \n",
+ " | 629997 | \n",
+ " 111.012434 | \n",
+ " 55.506217 | \n",
+ " -5.813822e-09 | \n",
+ " 6.575386e-02 | \n",
+ " 1.710031e-01 | \n",
+ " 1.085205e-07 | \n",
+ " 0.019748 | \n",
+ " 1.002006 | \n",
+ " 0.0 | \n",
+ "
\n",
+ " \n",
+ " | 629998 | \n",
+ " 111.012434 | \n",
+ " 55.506217 | \n",
+ " -2.301308e-08 | \n",
+ " 8.714450e-02 | \n",
+ " 1.891176e-01 | \n",
+ " 8.563122e-08 | \n",
+ " 0.007414 | \n",
+ " 1.001537 | \n",
+ " 0.0 | \n",
+ "
\n",
+ " \n",
+ " | 629999 | \n",
+ " 111.012434 | \n",
+ " 55.506217 | \n",
+ " -3.077711e-08 | \n",
+ " 9.679703e-02 | \n",
+ " 1.972886e-01 | \n",
+ " 7.278077e-08 | \n",
+ " 0.001847 | \n",
+ " 1.001292 | \n",
+ " 0.0 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
630000 rows × 9 columns
\n",
+ "
"
+ ],
+ "text/plain": [
+ " H O Charge Ba Cl \\\n",
+ "0 111.012434 55.508192 -7.779554e-09 2.041069e-02 4.082138e-02 \n",
+ "1 111.012434 55.508427 -4.736083e-09 1.094567e-02 2.189133e-02 \n",
+ "2 111.012434 55.508691 -1.311169e-09 2.943745e-04 5.887491e-04 \n",
+ "3 111.012434 55.508698 -1.220023e-09 1.091776e-05 2.183551e-05 \n",
+ "4 111.012434 55.508699 -1.216643e-09 4.049176e-07 8.098352e-07 \n",
+ "... ... ... ... ... ... \n",
+ "629995 111.012434 55.506217 1.725154e-08 3.553636e-02 1.453447e-01 \n",
+ "629996 111.012434 55.506217 6.831420e-09 4.912662e-02 1.569033e-01 \n",
+ "629997 111.012434 55.506217 -5.813822e-09 6.575386e-02 1.710031e-01 \n",
+ "629998 111.012434 55.506217 -2.301308e-08 8.714450e-02 1.891176e-01 \n",
+ "629999 111.012434 55.506217 -3.077711e-08 9.679703e-02 1.972886e-01 \n",
+ "\n",
+ " S Sr Barite Celestite \n",
+ "0 4.938300e-04 0.000494 0.001000 1.0 \n",
+ "1 5.525578e-04 0.000553 0.001000 1.0 \n",
+ "2 6.186462e-04 0.000619 0.001000 1.0 \n",
+ "3 6.204050e-04 0.000620 0.001000 1.0 \n",
+ "4 6.204702e-04 0.000620 0.001000 1.0 \n",
+ "... ... ... ... ... \n",
+ "629995 1.536968e-07 0.037136 1.009280 0.0 \n",
+ "629996 1.268188e-07 0.029325 1.003515 0.0 \n",
+ "629997 1.085205e-07 0.019748 1.002006 0.0 \n",
+ "629998 8.563122e-08 0.007414 1.001537 0.0 \n",
+ "629999 7.278077e-08 0.001847 1.001292 0.0 \n",
+ "\n",
+ "[630000 rows x 9 columns]"
+ ]
+ },
+ "execution_count": 3,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_design"
+ ]
+ },
{
"cell_type": "markdown",
"metadata": {},
@@ -149,23 +354,28 @@
},
{
"cell_type": "code",
- "execution_count": 3,
+ "execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
- "species_columns = [\"H\", \"O\", \"Ba\", \"Cl\", \"S\", \"Sr\", \"Barite\", \"Celestite\"]"
+ "species_columns = [\"H\", \"O\", \"Ba\", \"Cl\", \"S\", \"Sr\", \"Barite\", \"Celestite\"]\n",
+ "\n",
+ "species_columns_input = [\"H\", \"O\", \"Ba\", \"Cl\", \"S\", \"Sr\", \"Barite\", \"Celestite\"]\n",
+ "species_columns_output = [\"H\", \"O\", \"Ba\", \"Cl\", \"S\", \"Sr\", \"Barite\", \"Celestite\"]\n",
+ "\n",
+ "# species_columns_output = [\"O\", \"Ba\", \"S\", \"Sr\", \"Barite\", \"Celestite\"]"
]
},
{
"cell_type": "code",
- "execution_count": 4,
+ "execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
- "/home/signer/bin/miniconda3/envs/training/lib/python3.11/site-packages/sklearn/base.py:1473: ConvergenceWarning: Number of distinct clusters (1) found smaller than n_clusters (2). Possibly due to duplicate points in X.\n",
+ "/Users/hannessigner/miniconda3/envs/ai/lib/python3.11/site-packages/sklearn/base.py:1473: ConvergenceWarning: Number of distinct clusters (1) found smaller than n_clusters (2). Possibly due to duplicate points in X.\n",
" return fit_method(estimator, *args, **kwargs)\n"
]
},
@@ -181,19 +391,18 @@
],
"source": [
"preprocess = preprocessing() #np.log1p, np.expm1\n",
- "X, y = preprocess.cluster(df_design[species_columns], df_results[species_columns])\n",
+ "X, y = preprocess.cluster(df_design[species_columns_input], df_results[species_columns_output])\n",
"\n",
"# optional: perform log transformation\n",
"# X, y = preprocess.funcTranform(X, y)\n",
"\n",
"X_train, X_test, y_train, y_test = preprocess.split(X, y, ratio=0.2)\n",
- "X_train_origin = X_train.copy()\n",
"X_train, y_train = preprocess.balancer(X_train, y_train, strategy=\"off\")\n",
"X_train, y_train = preprocess.class_selection(X_train, y_train, class_label=0)\n",
- "# preprocess.scale_fit(X_train, y_train, scaling=\"global\", type=\"standard\")\n",
- "# X_train, X_test, y_train, y_test = preprocess.scale_transform(\n",
- "# X_train, X_test, y_train, y_test\n",
- "# )\n",
+ "preprocess.scale_fit(X_train, y_train, scaling=\"global\", type=\"standard\")\n",
+ "X_train, X_test, y_train, y_test = preprocess.scale_transform(\n",
+ " X_train, X_test, y_train, y_test\n",
+ ")\n",
"X_train, X_val, y_train, y_val = preprocess.split(X_train, y_train, ratio=0.1)"
]
},
@@ -206,7 +415,7 @@
},
{
"cell_type": "code",
- "execution_count": 5,
+ "execution_count": 6,
"metadata": {},
"outputs": [
{
@@ -257,7 +466,7 @@
},
{
"cell_type": "code",
- "execution_count": 6,
+ "execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
@@ -275,7 +484,7 @@
},
{
"cell_type": "code",
- "execution_count": 7,
+ "execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
@@ -303,12 +512,12 @@
},
{
"cell_type": "code",
- "execution_count": 8,
+ "execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"# select model architecture\n",
- "model = model_definition(\"large_batch_normalization\")\n",
+ "model = model_definition(\"large\")\n",
"\n",
"# define learning rate adaptation\n",
"lr_schedule = keras.optimizers.schedules.ExponentialDecay(\n",
@@ -321,14 +530,17 @@
"h3 = 0.5099528144902471\n",
"\n",
"\n",
- "scaler_type = \"none\"\n",
+ "scaler_type = \"standard\"\n",
"loss_variant = \"huber_mass_balance\"\n",
"delta = 1.7642791340966357\n",
"\n",
"\n",
"optimizer = keras.optimizers.Adam(learning_rate=lr_schedule)\n",
+ "optimizer_sgd = keras.optimizers.SGD(learning_rate=lr_schedule)\n",
+ "optimizer_rmsprop = keras.optimizers.RMSprop(learning_rate=lr_schedule)\n",
+ "\n",
"model.compile(\n",
- " optimizer=optimizer,\n",
+ " optimizer=optimizer_rmsprop,\n",
" loss=custom_loss(preprocess, column_dict, h1, h2, h3, scaler_type, loss_variant, 1),\n",
" metrics=[\n",
" huber_metric(delta),\n",
@@ -339,22 +551,230 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
- "Epoch 1/50\n",
- "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m11s\u001b[0m 13ms/step - huber: 2.7347e-04 - loss: 0.0051 - mass_balance: 0.0098 - val_huber: 2.8895e-05 - val_loss: 0.0032 - val_mass_balance: 0.0062\n",
- "Epoch 2/50\n",
- "\u001b[1m647/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━\u001b[0m\u001b[37m━━━━━━\u001b[0m \u001b[1m3s\u001b[0m 13ms/step - huber: 3.2162e-04 - loss: 0.0052 - mass_balance: 0.0098"
+ "Epoch 1/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 0.0705 - loss: 0.0376 - mass_balance: 0.0547 - val_huber: 0.0055 - val_loss: 0.0084 - val_mass_balance: 0.0144\n",
+ "Epoch 2/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 0.0048 - loss: 0.0074 - mass_balance: 0.0128 - val_huber: 0.0027 - val_loss: 0.0069 - val_mass_balance: 0.0124\n",
+ "Epoch 3/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 0.0027 - loss: 0.0055 - mass_balance: 0.0098 - val_huber: 0.0018 - val_loss: 0.0040 - val_mass_balance: 0.0072\n",
+ "Epoch 4/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 11ms/step - huber: 0.0022 - loss: 0.0047 - mass_balance: 0.0084 - val_huber: 0.0015 - val_loss: 0.0051 - val_mass_balance: 0.0094\n",
+ "Epoch 5/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 12ms/step - huber: 0.0019 - loss: 0.0044 - mass_balance: 0.0078 - val_huber: 7.9947e-04 - val_loss: 0.0039 - val_mass_balance: 0.0073\n",
+ "Epoch 6/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 0.0014 - loss: 0.0037 - mass_balance: 0.0067 - val_huber: 7.5188e-04 - val_loss: 0.0029 - val_mass_balance: 0.0053\n",
+ "Epoch 7/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 0.0013 - loss: 0.0036 - mass_balance: 0.0065 - val_huber: 8.1429e-04 - val_loss: 0.0041 - val_mass_balance: 0.0076\n",
+ "Epoch 8/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 11ms/step - huber: 0.0010 - loss: 0.0031 - mass_balance: 0.0056 - val_huber: 9.1488e-04 - val_loss: 0.0026 - val_mass_balance: 0.0046\n",
+ "Epoch 9/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 0.0010 - loss: 0.0030 - mass_balance: 0.0054 - val_huber: 4.9456e-04 - val_loss: 0.0015 - val_mass_balance: 0.0028\n",
+ "Epoch 10/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 8.3918e-04 - loss: 0.0027 - mass_balance: 0.0049 - val_huber: 8.6339e-04 - val_loss: 0.0017 - val_mass_balance: 0.0029\n",
+ "Epoch 11/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m11s\u001b[0m 12ms/step - huber: 7.8853e-04 - loss: 0.0026 - mass_balance: 0.0047 - val_huber: 3.6463e-04 - val_loss: 0.0021 - val_mass_balance: 0.0039\n",
+ "Epoch 12/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m11s\u001b[0m 12ms/step - huber: 7.0225e-04 - loss: 0.0025 - mass_balance: 0.0045 - val_huber: 7.3322e-04 - val_loss: 0.0031 - val_mass_balance: 0.0057\n",
+ "Epoch 13/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 6.4799e-04 - loss: 0.0023 - mass_balance: 0.0042 - val_huber: 3.9136e-04 - val_loss: 0.0022 - val_mass_balance: 0.0042\n",
+ "Epoch 14/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 6.1216e-04 - loss: 0.0022 - mass_balance: 0.0041 - val_huber: 5.9635e-04 - val_loss: 0.0027 - val_mass_balance: 0.0050\n",
+ "Epoch 15/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 10ms/step - huber: 5.1730e-04 - loss: 0.0020 - mass_balance: 0.0037 - val_huber: 2.0320e-04 - val_loss: 0.0016 - val_mass_balance: 0.0031\n",
+ "Epoch 16/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 10ms/step - huber: 5.0061e-04 - loss: 0.0020 - mass_balance: 0.0036 - val_huber: 3.1570e-04 - val_loss: 0.0022 - val_mass_balance: 0.0042\n",
+ "Epoch 17/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 10ms/step - huber: 3.9930e-04 - loss: 0.0018 - mass_balance: 0.0033 - val_huber: 2.1322e-04 - val_loss: 0.0010 - val_mass_balance: 0.0019\n",
+ "Epoch 18/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 11ms/step - huber: 3.9617e-04 - loss: 0.0017 - mass_balance: 0.0032 - val_huber: 7.0048e-04 - val_loss: 0.0023 - val_mass_balance: 0.0042\n",
+ "Epoch 19/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 3.4277e-04 - loss: 0.0016 - mass_balance: 0.0030 - val_huber: 1.5957e-04 - val_loss: 0.0015 - val_mass_balance: 0.0028\n",
+ "Epoch 20/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 3.1705e-04 - loss: 0.0015 - mass_balance: 0.0029 - val_huber: 2.3869e-04 - val_loss: 0.0019 - val_mass_balance: 0.0036\n",
+ "Epoch 21/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 2.9762e-04 - loss: 0.0015 - mass_balance: 0.0027 - val_huber: 2.6323e-04 - val_loss: 0.0017 - val_mass_balance: 0.0032\n",
+ "Epoch 22/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 12ms/step - huber: 2.6150e-04 - loss: 0.0014 - mass_balance: 0.0026 - val_huber: 1.2177e-04 - val_loss: 6.8063e-04 - val_mass_balance: 0.0013\n",
+ "Epoch 23/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 10ms/step - huber: 2.7570e-04 - loss: 0.0014 - mass_balance: 0.0025 - val_huber: 7.4193e-04 - val_loss: 0.0017 - val_mass_balance: 0.0031\n",
+ "Epoch 24/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 10ms/step - huber: 2.2458e-04 - loss: 0.0012 - mass_balance: 0.0023 - val_huber: 4.6217e-04 - val_loss: 0.0019 - val_mass_balance: 0.0035\n",
+ "Epoch 25/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 12ms/step - huber: 2.2827e-04 - loss: 0.0012 - mass_balance: 0.0023 - val_huber: 1.4619e-04 - val_loss: 0.0013 - val_mass_balance: 0.0024\n",
+ "Epoch 26/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 12ms/step - huber: 1.8971e-04 - loss: 0.0011 - mass_balance: 0.0021 - val_huber: 9.1612e-05 - val_loss: 7.4290e-04 - val_mass_balance: 0.0014\n",
+ "Epoch 27/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m11s\u001b[0m 12ms/step - huber: 1.8916e-04 - loss: 0.0011 - mass_balance: 0.0021 - val_huber: 1.3871e-04 - val_loss: 0.0012 - val_mass_balance: 0.0023\n",
+ "Epoch 28/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 1.6124e-04 - loss: 0.0010 - mass_balance: 0.0019 - val_huber: 1.2757e-04 - val_loss: 0.0013 - val_mass_balance: 0.0025\n",
+ "Epoch 29/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 1.5497e-04 - loss: 9.9073e-04 - mass_balance: 0.0019 - val_huber: 3.6474e-04 - val_loss: 9.6044e-04 - val_mass_balance: 0.0017\n",
+ "Epoch 30/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 1.4456e-04 - loss: 9.6936e-04 - mass_balance: 0.0018 - val_huber: 1.6395e-04 - val_loss: 8.2863e-04 - val_mass_balance: 0.0015\n",
+ "Epoch 31/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 1.2945e-04 - loss: 8.7237e-04 - mass_balance: 0.0016 - val_huber: 1.0211e-04 - val_loss: 8.3225e-04 - val_mass_balance: 0.0016\n",
+ "Epoch 32/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 1.3383e-04 - loss: 8.8817e-04 - mass_balance: 0.0017 - val_huber: 1.3947e-04 - val_loss: 8.6029e-04 - val_mass_balance: 0.0016\n",
+ "Epoch 33/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 1.1013e-04 - loss: 7.9485e-04 - mass_balance: 0.0015 - val_huber: 6.5566e-05 - val_loss: 8.3252e-04 - val_mass_balance: 0.0016\n",
+ "Epoch 34/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 1.1124e-04 - loss: 7.9307e-04 - mass_balance: 0.0015 - val_huber: 1.2914e-04 - val_loss: 8.4401e-04 - val_mass_balance: 0.0016\n",
+ "Epoch 35/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 10ms/step - huber: 1.0443e-04 - loss: 7.1706e-04 - mass_balance: 0.0014 - val_huber: 6.9409e-05 - val_loss: 5.0371e-04 - val_mass_balance: 9.4951e-04\n",
+ "Epoch 36/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 10ms/step - huber: 9.8640e-05 - loss: 7.2031e-04 - mass_balance: 0.0014 - val_huber: 5.5465e-05 - val_loss: 4.5251e-04 - val_mass_balance: 8.5743e-04\n",
+ "Epoch 37/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 11ms/step - huber: 9.0942e-05 - loss: 6.7768e-04 - mass_balance: 0.0013 - val_huber: 5.0912e-05 - val_loss: 5.5779e-04 - val_mass_balance: 0.0011\n",
+ "Epoch 38/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 8.1881e-05 - loss: 6.4645e-04 - mass_balance: 0.0012 - val_huber: 6.0961e-05 - val_loss: 6.9782e-04 - val_mass_balance: 0.0013\n",
+ "Epoch 39/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 11ms/step - huber: 7.9824e-05 - loss: 6.4280e-04 - mass_balance: 0.0012 - val_huber: 9.4689e-05 - val_loss: 5.0573e-04 - val_mass_balance: 9.4438e-04\n",
+ "Epoch 40/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 10ms/step - huber: 7.9241e-05 - loss: 5.9208e-04 - mass_balance: 0.0011 - val_huber: 8.6559e-05 - val_loss: 3.5590e-04 - val_mass_balance: 6.6112e-04\n",
+ "Epoch 41/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 7.7285e-05 - loss: 5.9197e-04 - mass_balance: 0.0011 - val_huber: 4.1481e-05 - val_loss: 3.4559e-04 - val_mass_balance: 6.5390e-04\n",
+ "Epoch 42/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 6.4093e-05 - loss: 5.2739e-04 - mass_balance: 9.9723e-04 - val_huber: 4.7314e-05 - val_loss: 4.4883e-04 - val_mass_balance: 8.5088e-04\n",
+ "Epoch 43/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 6.3626e-05 - loss: 5.2942e-04 - mass_balance: 0.0010 - val_huber: 3.8719e-05 - val_loss: 3.5273e-04 - val_mass_balance: 6.6833e-04\n",
+ "Epoch 44/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m11s\u001b[0m 12ms/step - huber: 5.9336e-05 - loss: 4.9010e-04 - mass_balance: 9.2701e-04 - val_huber: 5.0533e-05 - val_loss: 5.5681e-04 - val_mass_balance: 0.0011\n",
+ "Epoch 45/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 6.5335e-05 - loss: 4.8230e-04 - mass_balance: 9.1032e-04 - val_huber: 6.0312e-05 - val_loss: 4.3221e-04 - val_mass_balance: 8.1501e-04\n",
+ "Epoch 46/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 10ms/step - huber: 5.4553e-05 - loss: 4.6042e-04 - mass_balance: 8.7118e-04 - val_huber: 4.0107e-05 - val_loss: 4.4224e-04 - val_mass_balance: 8.4141e-04\n",
+ "Epoch 47/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 10ms/step - huber: 5.3592e-05 - loss: 4.3683e-04 - mass_balance: 8.2614e-04 - val_huber: 3.8911e-05 - val_loss: 3.8720e-04 - val_mass_balance: 7.3242e-04\n",
+ "Epoch 48/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 12ms/step - huber: 4.9031e-05 - loss: 4.2875e-04 - mass_balance: 8.1180e-04 - val_huber: 3.3141e-05 - val_loss: 2.6060e-04 - val_mass_balance: 4.9409e-04\n",
+ "Epoch 49/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 5.1804e-05 - loss: 3.9923e-04 - mass_balance: 7.5477e-04 - val_huber: 4.3680e-05 - val_loss: 6.1061e-04 - val_mass_balance: 0.0012\n",
+ "Epoch 50/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 4.6059e-05 - loss: 3.9467e-04 - mass_balance: 7.4728e-04 - val_huber: 3.9279e-05 - val_loss: 4.7099e-04 - val_mass_balance: 8.9619e-04\n",
+ "Epoch 51/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 5.0427e-05 - loss: 3.6663e-04 - mass_balance: 6.9204e-04 - val_huber: 3.7131e-05 - val_loss: 4.1189e-04 - val_mass_balance: 7.8176e-04\n",
+ "Epoch 52/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m11s\u001b[0m 13ms/step - huber: 3.9474e-05 - loss: 3.6436e-04 - mass_balance: 6.9074e-04 - val_huber: 3.3401e-05 - val_loss: 3.6578e-04 - val_mass_balance: 6.9520e-04\n",
+ "Epoch 53/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 11ms/step - huber: 3.8509e-05 - loss: 3.3790e-04 - mass_balance: 6.4023e-04 - val_huber: 3.0546e-05 - val_loss: 2.9259e-04 - val_mass_balance: 5.5522e-04\n",
+ "Epoch 54/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 11ms/step - huber: 4.4154e-05 - loss: 3.3650e-04 - mass_balance: 6.3616e-04 - val_huber: 3.2173e-05 - val_loss: 3.8614e-04 - val_mass_balance: 7.3560e-04\n",
+ "Epoch 55/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 4.0561e-05 - loss: 3.2498e-04 - mass_balance: 6.1500e-04 - val_huber: 2.7365e-05 - val_loss: 3.4403e-04 - val_mass_balance: 6.5494e-04\n",
+ "Epoch 56/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 3.6654e-05 - loss: 3.0816e-04 - mass_balance: 5.8368e-04 - val_huber: 3.7312e-05 - val_loss: 2.6574e-04 - val_mass_balance: 5.0061e-04\n",
+ "Epoch 57/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m11s\u001b[0m 12ms/step - huber: 3.5056e-05 - loss: 3.0545e-04 - mass_balance: 5.7885e-04 - val_huber: 4.6792e-05 - val_loss: 2.2920e-04 - val_mass_balance: 4.2802e-04\n",
+ "Epoch 58/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 3.6068e-05 - loss: 2.8670e-04 - mass_balance: 5.4251e-04 - val_huber: 3.1751e-05 - val_loss: 2.8733e-04 - val_mass_balance: 5.4481e-04\n",
+ "Epoch 59/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 12ms/step - huber: 3.6832e-05 - loss: 2.8790e-04 - mass_balance: 5.4469e-04 - val_huber: 2.4118e-05 - val_loss: 2.3764e-04 - val_mass_balance: 4.5079e-04\n",
+ "Epoch 60/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 3.2742e-05 - loss: 2.6821e-04 - mass_balance: 5.0775e-04 - val_huber: 3.1259e-05 - val_loss: 3.1066e-04 - val_mass_balance: 5.8887e-04\n",
+ "Epoch 61/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 10ms/step - huber: 3.6154e-05 - loss: 2.6625e-04 - mass_balance: 5.0323e-04 - val_huber: 2.3741e-05 - val_loss: 2.9083e-04 - val_mass_balance: 5.5358e-04\n",
+ "Epoch 62/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 12ms/step - huber: 3.1682e-05 - loss: 2.4844e-04 - mass_balance: 4.7011e-04 - val_huber: 2.6166e-05 - val_loss: 2.8237e-04 - val_mass_balance: 5.3732e-04\n",
+ "Epoch 63/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m11s\u001b[0m 12ms/step - huber: 3.0130e-05 - loss: 2.4775e-04 - mass_balance: 4.6928e-04 - val_huber: 2.4671e-05 - val_loss: 2.3591e-04 - val_mass_balance: 4.4794e-04\n",
+ "Epoch 64/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 2.7064e-05 - loss: 2.3829e-04 - mass_balance: 4.5196e-04 - val_huber: 2.1523e-05 - val_loss: 1.8855e-04 - val_mass_balance: 3.5818e-04\n",
+ "Epoch 65/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 3.5418e-05 - loss: 2.2991e-04 - mass_balance: 4.3373e-04 - val_huber: 2.2211e-05 - val_loss: 2.4162e-04 - val_mass_balance: 4.5929e-04\n",
+ "Epoch 66/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m11s\u001b[0m 12ms/step - huber: 2.7332e-05 - loss: 2.2668e-04 - mass_balance: 4.2955e-04 - val_huber: 2.3143e-05 - val_loss: 2.1197e-04 - val_mass_balance: 4.0182e-04\n",
+ "Epoch 67/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m11s\u001b[0m 12ms/step - huber: 3.1761e-05 - loss: 2.1444e-04 - mass_balance: 4.0479e-04 - val_huber: 2.1088e-05 - val_loss: 2.0780e-04 - val_mass_balance: 3.9437e-04\n",
+ "Epoch 68/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 2.9258e-05 - loss: 2.1371e-04 - mass_balance: 4.0413e-04 - val_huber: 2.0805e-05 - val_loss: 2.2247e-04 - val_mass_balance: 4.2178e-04\n",
+ "Epoch 69/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 3.0333e-05 - loss: 1.9963e-04 - mass_balance: 3.7686e-04 - val_huber: 2.0850e-05 - val_loss: 2.4595e-04 - val_mass_balance: 4.6860e-04\n",
+ "Epoch 70/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 11ms/step - huber: 2.4570e-05 - loss: 1.9933e-04 - mass_balance: 3.7760e-04 - val_huber: 1.9540e-05 - val_loss: 2.2234e-04 - val_mass_balance: 4.2295e-04\n",
+ "Epoch 71/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 10ms/step - huber: 2.9600e-05 - loss: 1.8997e-04 - mass_balance: 3.5837e-04 - val_huber: 1.9270e-05 - val_loss: 1.8605e-04 - val_mass_balance: 3.5303e-04\n",
+ "Epoch 72/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 2.9223e-05 - loss: 1.8909e-04 - mass_balance: 3.5671e-04 - val_huber: 1.9539e-05 - val_loss: 2.0576e-04 - val_mass_balance: 3.9015e-04\n",
+ "Epoch 73/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 2.4424e-05 - loss: 1.8592e-04 - mass_balance: 3.5180e-04 - val_huber: 2.0195e-05 - val_loss: 1.4592e-04 - val_mass_balance: 2.7530e-04\n",
+ "Epoch 74/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 2.8129e-05 - loss: 1.7866e-04 - mass_balance: 3.3693e-04 - val_huber: 1.8796e-05 - val_loss: 1.6450e-04 - val_mass_balance: 3.1172e-04\n",
+ "Epoch 75/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 11ms/step - huber: 2.8131e-05 - loss: 1.7490e-04 - mass_balance: 3.2968e-04 - val_huber: 1.8508e-05 - val_loss: 1.6301e-04 - val_mass_balance: 3.0866e-04\n",
+ "Epoch 76/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 10ms/step - huber: 2.4057e-05 - loss: 1.6790e-04 - mass_balance: 3.1727e-04 - val_huber: 1.9083e-05 - val_loss: 1.7234e-04 - val_mass_balance: 3.2680e-04\n",
+ "Epoch 77/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 11ms/step - huber: 2.9163e-05 - loss: 1.6804e-04 - mass_balance: 3.1613e-04 - val_huber: 1.9127e-05 - val_loss: 1.4898e-04 - val_mass_balance: 2.8119e-04\n",
+ "Epoch 78/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 3.2734e-05 - loss: 1.5912e-04 - mass_balance: 2.9818e-04 - val_huber: 1.9169e-05 - val_loss: 1.5775e-04 - val_mass_balance: 2.9927e-04\n",
+ "Epoch 79/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 2.5924e-05 - loss: 1.5885e-04 - mass_balance: 2.9926e-04 - val_huber: 1.9022e-05 - val_loss: 1.7911e-04 - val_mass_balance: 3.4080e-04\n",
+ "Epoch 80/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 2.4396e-05 - loss: 1.5074e-04 - mass_balance: 2.8407e-04 - val_huber: 1.8558e-05 - val_loss: 1.3966e-04 - val_mass_balance: 2.6391e-04\n",
+ "Epoch 81/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 2.7325e-05 - loss: 1.5136e-04 - mass_balance: 2.8449e-04 - val_huber: 2.0540e-05 - val_loss: 1.3134e-04 - val_mass_balance: 2.4671e-04\n",
+ "Epoch 82/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m11s\u001b[0m 12ms/step - huber: 2.6151e-05 - loss: 1.4802e-04 - mass_balance: 2.7826e-04 - val_huber: 1.9531e-05 - val_loss: 1.2805e-04 - val_mass_balance: 2.4077e-04\n",
+ "Epoch 83/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 2.7614e-05 - loss: 1.4386e-04 - mass_balance: 2.6984e-04 - val_huber: 1.9598e-05 - val_loss: 1.8651e-04 - val_mass_balance: 3.5418e-04\n",
+ "Epoch 84/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 2.7264e-05 - loss: 1.4393e-04 - mass_balance: 2.7011e-04 - val_huber: 1.8636e-05 - val_loss: 1.6764e-04 - val_mass_balance: 3.1838e-04\n",
+ "Epoch 85/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 10ms/step - huber: 2.4043e-05 - loss: 1.3625e-04 - mass_balance: 2.5608e-04 - val_huber: 1.8908e-05 - val_loss: 1.1168e-04 - val_mass_balance: 2.0970e-04\n",
+ "Epoch 86/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 2.5515e-05 - loss: 1.3550e-04 - mass_balance: 2.5437e-04 - val_huber: 1.8760e-05 - val_loss: 1.2953e-04 - val_mass_balance: 2.4397e-04\n",
+ "Epoch 87/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m11s\u001b[0m 12ms/step - huber: 2.7067e-05 - loss: 1.3032e-04 - mass_balance: 2.4385e-04 - val_huber: 1.8677e-05 - val_loss: 1.3273e-04 - val_mass_balance: 2.5018e-04\n",
+ "Epoch 88/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 3.1999e-05 - loss: 1.3079e-04 - mass_balance: 2.4365e-04 - val_huber: 1.8354e-05 - val_loss: 1.2330e-04 - val_mass_balance: 2.3299e-04\n",
+ "Epoch 89/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 2.7458e-05 - loss: 1.2483e-04 - mass_balance: 2.3318e-04 - val_huber: 1.8978e-05 - val_loss: 1.3712e-04 - val_mass_balance: 2.5954e-04\n",
+ "Epoch 90/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 2.5748e-05 - loss: 1.2475e-04 - mass_balance: 2.3357e-04 - val_huber: 1.8841e-05 - val_loss: 1.0728e-04 - val_mass_balance: 2.0082e-04\n",
+ "Epoch 91/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 2.4423e-05 - loss: 1.2134e-04 - mass_balance: 2.2726e-04 - val_huber: 1.8523e-05 - val_loss: 1.0438e-04 - val_mass_balance: 1.9560e-04\n",
+ "Epoch 92/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m12s\u001b[0m 13ms/step - huber: 2.9029e-05 - loss: 1.1902e-04 - mass_balance: 2.2165e-04 - val_huber: 1.8708e-05 - val_loss: 1.2226e-04 - val_mass_balance: 2.3069e-04\n",
+ "Epoch 93/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 11ms/step - huber: 2.3506e-05 - loss: 1.1819e-04 - mass_balance: 2.2132e-04 - val_huber: 1.8732e-05 - val_loss: 1.2178e-04 - val_mass_balance: 2.2955e-04\n",
+ "Epoch 94/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 2.5712e-05 - loss: 1.1423e-04 - mass_balance: 2.1317e-04 - val_huber: 1.8863e-05 - val_loss: 1.1347e-04 - val_mass_balance: 2.1352e-04\n",
+ "Epoch 95/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 2.5301e-05 - loss: 1.1412e-04 - mass_balance: 2.1303e-04 - val_huber: 1.8430e-05 - val_loss: 1.0728e-04 - val_mass_balance: 2.0157e-04\n",
+ "Epoch 96/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 2.4672e-05 - loss: 1.0913e-04 - mass_balance: 2.0350e-04 - val_huber: 1.8425e-05 - val_loss: 1.2479e-04 - val_mass_balance: 2.3540e-04\n",
+ "Epoch 97/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 11ms/step - huber: 2.1816e-05 - loss: 1.0856e-04 - mass_balance: 2.0323e-04 - val_huber: 1.8269e-05 - val_loss: 1.0062e-04 - val_mass_balance: 1.8860e-04\n",
+ "Epoch 98/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 2.7558e-05 - loss: 1.0698e-04 - mass_balance: 1.9863e-04 - val_huber: 1.8533e-05 - val_loss: 1.0357e-04 - val_mass_balance: 1.9424e-04\n",
+ "Epoch 99/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 2.3566e-05 - loss: 1.0499e-04 - mass_balance: 1.9578e-04 - val_huber: 1.8690e-05 - val_loss: 1.1137e-04 - val_mass_balance: 2.0950e-04\n",
+ "Epoch 100/100\n",
+ "\u001b[1m886/886\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 11ms/step - huber: 2.6048e-05 - loss: 1.0412e-04 - mass_balance: 1.9363e-04 - val_huber: 1.8614e-05 - val_loss: 9.9243e-05 - val_mass_balance: 1.8583e-04\n",
+ "Training took 986.1324112415314 seconds\n"
]
}
],
"source": [
- "history = model_training(model, epochs=50)"
+ "history = model_training(model, epochs=100)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import pickle\n",
+ "with open('rmsprop_history.pkl', 'wb') as file_pi:\n",
+ " pickle.dump(history.history, file_pi)\n"
]
},
{
@@ -373,7 +793,7 @@
},
{
"cell_type": "code",
- "execution_count": 23,
+ "execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@@ -745,7 +1165,7 @@
],
"metadata": {
"kernelspec": {
- "display_name": "training",
+ "display_name": "ai",
"language": "python",
"name": "python3"
},