{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "## General Information" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "This notebook is used to train a simple neural network model to predict the chemistry in the barite benchmark (50x50 grid). The training data is stored in the repository using **git large file storage** and can be downloaded after the installation of git lfs using the `git lfs pull` command.\n", "\n", "It is then recommended to create a Python environment using miniconda. The necessary dependencies are contained in `environment.yml` and can be installed using `conda env create -f environment.yml`.\n", "\n", "The data set is divided into a design and result part and consists of the iterations of a reference simulation. The design part of the data set contains the chemical concentrations at time $t$ and the result part at time $t+1$, which are to be learned by the model." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Setup Libraries" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "2025-02-17 10:30:47.780794: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n", "2025-02-17 10:30:47.804086: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n", "To enable the following instructions: SSE4.1 SSE4.2 AVX AVX2 AVX_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Running Keras in version 3.8.0\n" ] } ], "source": [ "import keras\n", "from keras.layers import Dense, Dropout, Input,BatchNormalization\n", "import tensorflow as tf\n", "import h5py\n", "import numpy as np\n", "import pandas as pd\n", "import time\n", "import sklearn.model_selection as sk\n", "import matplotlib.pyplot as plt\n", "from sklearn.cluster import KMeans\n", "from sklearn.pipeline import Pipeline, make_pipeline\n", "from sklearn.preprocessing import StandardScaler, MinMaxScaler\n", "from imblearn.over_sampling import SMOTE\n", "from imblearn.under_sampling import RandomUnderSampler\n", "from imblearn.over_sampling import RandomOverSampler\n", "from collections import Counter\n", "import os\n", "from preprocessing import *\n", "from sklearn import set_config\n", "from importlib import reload\n", "set_config(transform_output = \"pandas\")" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "%load_ext autoreload\n", "%autoreload 2" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Define parameters" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "dtype = \"float32\"\n", "activation = \"relu\"\n", "\n", "lr = 0.001\n", "batch_size = 512\n", "epochs = 50 # default 400 epochs\n", "\n", "lr_schedule = keras.optimizers.schedules.ExponentialDecay(\n", " initial_learning_rate=lr,\n", " decay_steps=2000,\n", " decay_rate=0.9,\n", " staircase=True\n", ")\n", "\n", "optimizer_simple = keras.optimizers.Adam(learning_rate=lr_schedule)\n", "optimizer_large = keras.optimizers.Adam(learning_rate=lr_schedule)\n", "optimizer_paper = keras.optimizers.Adam(learning_rate=lr_schedule)\n", "\n", "\n", "loss = keras.losses.Huber()\n", "\n", "sample_fraction = 0.8" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Setup the model" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
Model: \"sequential\"\n",
"\n"
],
"text/plain": [
"\u001b[1mModel: \"sequential\"\u001b[0m\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓\n",
"┃ Layer (type) ┃ Output Shape ┃ Param # ┃\n",
"┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩\n",
"│ dense (Dense) │ (None, 128) │ 1,152 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_1 (Dense) │ (None, 128) │ 16,512 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_2 (Dense) │ (None, 8) │ 1,032 │\n",
"└─────────────────────────────────┴────────────────────────┴───────────────┘\n",
"\n"
],
"text/plain": [
"┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓\n",
"┃\u001b[1m \u001b[0m\u001b[1mLayer (type) \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1mOutput Shape \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1m Param #\u001b[0m\u001b[1m \u001b[0m┃\n",
"┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩\n",
"│ dense (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m128\u001b[0m) │ \u001b[38;5;34m1,152\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_1 (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m128\u001b[0m) │ \u001b[38;5;34m16,512\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_2 (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m8\u001b[0m) │ \u001b[38;5;34m1,032\u001b[0m │\n",
"└─────────────────────────────────┴────────────────────────┴───────────────┘\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"Total params: 18,696 (73.03 KB)\n", "\n" ], "text/plain": [ "\u001b[1m Total params: \u001b[0m\u001b[38;5;34m18,696\u001b[0m (73.03 KB)\n" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "
Trainable params: 18,696 (73.03 KB)\n", "\n" ], "text/plain": [ "\u001b[1m Trainable params: \u001b[0m\u001b[38;5;34m18,696\u001b[0m (73.03 KB)\n" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "
Non-trainable params: 0 (0.00 B)\n", "\n" ], "text/plain": [ "\u001b[1m Non-trainable params: \u001b[0m\u001b[38;5;34m0\u001b[0m (0.00 B)\n" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "# small model\n", "model_simple = keras.Sequential(\n", " [\n", " keras.Input(shape = (8,), dtype = \"float32\"),\n", " keras.layers.Dense(units = 128, activation = \"linear\", dtype = \"float32\"),\n", " # Dropout(0.2),\n", " keras.layers.Dense(units = 128, activation = \"elu\", dtype = \"float32\"),\n", " keras.layers.Dense(units = 8, dtype = \"float32\")\n", " ]\n", ")\n", "\n", "model_simple.compile(optimizer=optimizer_simple, loss = loss)\n", "model_simple.summary()" ] }, { "cell_type": "code", "execution_count": 51, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
Model: \"sequential_5\"\n",
"\n"
],
"text/plain": [
"\u001b[1mModel: \"sequential_5\"\u001b[0m\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓\n",
"┃ Layer (type) ┃ Output Shape ┃ Param # ┃\n",
"┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩\n",
"│ dense_21 (Dense) │ (None, 512) │ 4,608 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_22 (Dense) │ (None, 1024) │ 525,312 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_23 (Dense) │ (None, 512) │ 524,800 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_24 (Dense) │ (None, 8) │ 4,104 │\n",
"└─────────────────────────────────┴────────────────────────┴───────────────┘\n",
"\n"
],
"text/plain": [
"┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓\n",
"┃\u001b[1m \u001b[0m\u001b[1mLayer (type) \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1mOutput Shape \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1m Param #\u001b[0m\u001b[1m \u001b[0m┃\n",
"┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩\n",
"│ dense_21 (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m512\u001b[0m) │ \u001b[38;5;34m4,608\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_22 (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m1024\u001b[0m) │ \u001b[38;5;34m525,312\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_23 (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m512\u001b[0m) │ \u001b[38;5;34m524,800\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_24 (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m8\u001b[0m) │ \u001b[38;5;34m4,104\u001b[0m │\n",
"└─────────────────────────────────┴────────────────────────┴───────────────┘\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"Total params: 1,058,824 (4.04 MB)\n", "\n" ], "text/plain": [ "\u001b[1m Total params: \u001b[0m\u001b[38;5;34m1,058,824\u001b[0m (4.04 MB)\n" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "
Trainable params: 1,058,824 (4.04 MB)\n", "\n" ], "text/plain": [ "\u001b[1m Trainable params: \u001b[0m\u001b[38;5;34m1,058,824\u001b[0m (4.04 MB)\n" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "
Non-trainable params: 0 (0.00 B)\n", "\n" ], "text/plain": [ "\u001b[1m Non-trainable params: \u001b[0m\u001b[38;5;34m0\u001b[0m (0.00 B)\n" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "# large model\n", "model_large = keras.Sequential(\n", " [keras.layers.Input(shape=(8,), dtype=dtype),\n", " keras.layers.Dense(512, activation='relu', dtype=dtype),\n", " keras.layers.Dense(1024, activation='relu', dtype=dtype),\n", " keras.layers.Dense(512, activation='relu', dtype=dtype),\n", " keras.layers.Dense(8, dtype=dtype)\n", " ])\n", "\n", "model_large.compile(optimizer=optimizer_large, loss = loss)\n", "model_large.summary()\n" ] }, { "cell_type": "code", "execution_count": 47, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
Model: \"sequential_4\"\n",
"\n"
],
"text/plain": [
"\u001b[1mModel: \"sequential_4\"\u001b[0m\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓\n",
"┃ Layer (type) ┃ Output Shape ┃ Param # ┃\n",
"┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩\n",
"│ dense_16 (Dense) │ (None, 128) │ 1,152 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_17 (Dense) │ (None, 256) │ 33,024 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_18 (Dense) │ (None, 512) │ 131,584 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_19 (Dense) │ (None, 256) │ 131,328 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_20 (Dense) │ (None, 8) │ 2,056 │\n",
"└─────────────────────────────────┴────────────────────────┴───────────────┘\n",
"\n"
],
"text/plain": [
"┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓\n",
"┃\u001b[1m \u001b[0m\u001b[1mLayer (type) \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1mOutput Shape \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1m Param #\u001b[0m\u001b[1m \u001b[0m┃\n",
"┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩\n",
"│ dense_16 (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m128\u001b[0m) │ \u001b[38;5;34m1,152\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_17 (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m256\u001b[0m) │ \u001b[38;5;34m33,024\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_18 (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m512\u001b[0m) │ \u001b[38;5;34m131,584\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_19 (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m256\u001b[0m) │ \u001b[38;5;34m131,328\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_20 (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m8\u001b[0m) │ \u001b[38;5;34m2,056\u001b[0m │\n",
"└─────────────────────────────────┴────────────────────────┴───────────────┘\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"Total params: 299,144 (1.14 MB)\n", "\n" ], "text/plain": [ "\u001b[1m Total params: \u001b[0m\u001b[38;5;34m299,144\u001b[0m (1.14 MB)\n" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "
Trainable params: 299,144 (1.14 MB)\n", "\n" ], "text/plain": [ "\u001b[1m Trainable params: \u001b[0m\u001b[38;5;34m299,144\u001b[0m (1.14 MB)\n" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "
Non-trainable params: 0 (0.00 B)\n", "\n" ], "text/plain": [ "\u001b[1m Non-trainable params: \u001b[0m\u001b[38;5;34m0\u001b[0m (0.00 B)\n" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "# model from paper\n", "# (see https://doi.org/10.1007/s11242-022-01779-3 model for the complex chemistry)\n", "model_paper = keras.Sequential(\n", " [keras.layers.Input(shape=(8,), dtype=dtype),\n", " keras.layers.Dense(128, activation='relu', dtype=dtype),\n", " keras.layers.Dense(256, activation='relu', dtype=dtype),\n", " keras.layers.Dense(512, activation='relu', dtype=dtype),\n", " keras.layers.Dense(256, activation='relu', dtype=dtype),\n", " keras.layers.Dense(8, dtype=dtype)\n", " ])\n", "\n", "model_paper.compile(optimizer=optimizer_paper, loss = loss)\n", "model_paper.summary()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Define transformer functions" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [], "source": [ "def Safelog(val):\n", " # get range of vector\n", " if val > 0:\n", " return np.log10(val)\n", " elif val < 0:\n", " return -np.log10(-val)\n", " else:\n", " return 0\n", "\n", "def Safeexp(val):\n", " if val > 0:\n", " return -10 ** -val\n", " elif val < 0:\n", " return 10 ** val\n", " else:\n", " return 0" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [], "source": [ "# ? Why does the charge is using another logarithm than the other species\n", "\n", "func_dict_in = {\n", " \"H\" : np.log1p,\n", " \"O\" : np.log1p,\n", " \"Charge\" : Safelog,\n", " \"H_0_\" : np.log1p,\n", " \"O_0_\" : np.log1p,\n", " \"Ba\" : np.log1p,\n", " \"Cl\" : np.log1p,\n", " \"S_2_\" : np.log1p,\n", " \"S_6_\" : np.log1p,\n", " \"Sr\" : np.log1p,\n", " \"Barite\" : np.log1p,\n", " \"Celestite\" : np.log1p,\n", "}\n", "\n", "func_dict_out = {\n", " \"H\" : np.expm1,\n", " \"O\" : np.expm1,\n", " \"Charge\" : Safeexp,\n", " \"H_0_\" : np.expm1,\n", " \"O_0_\" : np.expm1,\n", " \"Ba\" : np.expm1,\n", " \"Cl\" : np.expm1,\n", " \"S_2_\" : np.expm1,\n", " \"S_6_\" : np.expm1,\n", " \"Sr\" : np.expm1,\n", " \"Barite\" : np.expm1,\n", " \"Celestite\" : np.expm1,\n", "}\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Read data from `.h5` file and convert it to a `pandas.DataFrame`" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [], "source": [ "# os.chdir('/mnt/beegfs/home/signer/projects/model-training')\n", "data_file = h5py.File(\"barite_50_4_corner.h5\")\n", "\n", "design = data_file[\"design\"]\n", "results = data_file[\"result\"]\n", "\n", "df_design = pd.DataFrame(np.array(design[\"data\"]).transpose(), columns = np.array(design[\"names\"].asstr()))\n", "df_results = pd.DataFrame(np.array(results[\"data\"]).transpose(), columns = np.array(results[\"names\"].asstr()))\n", "\n", "data_file.close()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Preprocess Data\n", "\n", "The data are preprocessed in the following way:\n", "\n", "1. Label data points in the `design` dataset with `reactive` and `non-reactive` labels using kmeans clustering\n", "2. Transform `design` and `results` data set into log-scaled data.\n", "3. Split data into training and test sets.\n", "4. Learn scaler on training data for `design` and `results` together (option `global`) or individual (option `individual`).\n", "5. Transform training and test data.\n", "6. Split training data into training and validation dataset." ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [], "source": [ "species_columns = ['H', 'O', 'Ba', 'Cl', 'S', 'Sr', 'Barite', 'Celestite']" ] }, { "cell_type": "code", "execution_count": 37, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/mnt/scratch/miniconda3/envs/model-training/lib/python3.12/site-packages/sklearn/base.py:1389: ConvergenceWarning: Number of distinct clusters (1) found smaller than n_clusters (2). Possibly due to duplicate points in X.\n", " return fit_method(estimator, *args, **kwargs)\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Amount class 0 before: 0.9521309523809524\n", "Amount class 1 before: 0.04786904761904762\n", "Using Oversampling\n", "Amount class 0 after: 0.5\n", "Amount class 1 after: 0.5\n" ] } ], "source": [ "preprocess = preprocessing(func_dict_in=func_dict_in, func_dict_out=func_dict_out)\n", "X, y = preprocess.cluster(df_design[species_columns], df_results[species_columns])\n", "# X, y = preprocess.funcTranform(X, y)\n", "\n", "X_train, X_test, y_train, y_test = preprocess.split(X, y, ratio = 0.2)\n", "X_train, y_train = preprocess.balancer(X_train, y_train, strategy = \"over\")\n", "preprocess.scale_fit(X_train, y_train, scaling = \"individual\")\n", "X_train, X_test, y_train, y_test = preprocess.scale_transform(X_train, X_test, y_train, y_test)\n", "X_train, X_val, y_train, y_val = preprocess.split(X_train, y_train, ratio = 0.1)" ] }, { "cell_type": "code", "execution_count": 12, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "
| \n", " | H | \n", "O | \n", "Charge | \n", "Ba | \n", "Cl | \n", "S_6_ | \n", "Sr | \n", "Barite | \n", "Celestite | \n", "
|---|---|---|---|---|---|---|---|---|---|
| 0 | \n", "111.012434 | \n", "55.510420 | \n", "-5.285676e-07 | \n", "4.536952e-07 | \n", "0.000022 | \n", "1.050707e-03 | \n", "0.000625 | \n", "0.001010 | \n", "1.717461 | \n", "
| 1 | \n", "111.012434 | \n", "55.507697 | \n", "-5.292985e-07 | \n", "1.091671e-06 | \n", "0.002399 | \n", "3.700427e-04 | \n", "0.001488 | \n", "0.001738 | \n", "1.716139 | \n", "
| 2 | \n", "111.012434 | \n", "55.506335 | \n", "-5.311407e-07 | \n", "6.816584e-05 | \n", "0.008922 | \n", "2.946349e-05 | \n", "0.004445 | \n", "0.004898 | \n", "1.708478 | \n", "
| 3 | \n", "111.012434 | \n", "55.506229 | \n", "-5.326179e-07 | \n", "1.435037e-03 | \n", "0.017414 | \n", "3.035681e-06 | \n", "0.007281 | \n", "0.008778 | \n", "1.698481 | \n", "
| 4 | \n", "111.012434 | \n", "55.506224 | \n", "-5.354202e-07 | \n", "3.264876e-03 | \n", "0.026235 | \n", "1.872898e-06 | \n", "0.009764 | \n", "0.012641 | \n", "1.688408 | \n", "
| ... | \n", "... | \n", "... | \n", "... | \n", "... | \n", "... | \n", "... | \n", "... | \n", "... | \n", "... | \n", "
| 995 | \n", "111.012434 | \n", "55.506217 | \n", "-5.369526e-07 | \n", "6.381593e-02 | \n", "0.223770 | \n", "1.220403e-07 | \n", "0.032096 | \n", "1.714723 | \n", "0.000000 | \n", "
| 996 | \n", "111.012434 | \n", "55.506217 | \n", "-5.370535e-07 | \n", "6.386712e-02 | \n", "0.223789 | \n", "1.220029e-07 | \n", "0.032055 | \n", "1.714723 | \n", "0.000000 | \n", "
| 997 | \n", "111.012434 | \n", "55.506217 | \n", "-5.371457e-07 | \n", "6.391481e-02 | \n", "0.223807 | \n", "1.219644e-07 | \n", "0.032017 | \n", "1.714723 | \n", "0.000000 | \n", "
| 998 | \n", "111.012434 | \n", "55.506217 | \n", "-5.372196e-07 | \n", "6.395922e-02 | \n", "0.223826 | \n", "1.219672e-07 | \n", "0.031982 | \n", "1.714723 | \n", "0.000000 | \n", "
| 999 | \n", "111.012434 | \n", "55.506217 | \n", "-5.372770e-07 | \n", "6.400057e-02 | \n", "0.223844 | \n", "1.220142e-07 | \n", "0.031950 | \n", "1.714723 | \n", "0.000000 | \n", "
1000 rows × 9 columns
\n", "