From 14d3f4561d1ab7bd0f962126d766277a94401380 Mon Sep 17 00:00:00 2001
From: rink <rink@ssec.wisc.edu>
Date: Mon, 31 Mar 2025 16:34:58 -0500
Subject: [PATCH] add option to pass in absolute tolerance

---
 modules/deeplearning/quantile_regression.py | 127 ++++++++++++++++++++
 1 file changed, 127 insertions(+)
 create mode 100644 modules/deeplearning/quantile_regression.py

diff --git a/modules/deeplearning/quantile_regression.py b/modules/deeplearning/quantile_regression.py
new file mode 100644
index 00000000..93e0b888
--- /dev/null
+++ b/modules/deeplearning/quantile_regression.py
@@ -0,0 +1,127 @@
+import tensorflow as tf
+
+import numpy as np
+import matplotlib.pyplot as plt
+from sklearn.model_selection import train_test_split
+
+def bulk_quantile_loss():
+    def loss(y_true, y_pred):
+        q = np.array([.01, .02, .05, .1, .2, .3, .4, .5, .6, .7, .8, .9, .95, .98, .99])
+        e = y_true - y_pred
+        return tf.reduce_mean(tf.maximum(q * e, (q - 1) * e), axis=-1)
+    return loss
+
+# Define the custom quantile loss function
+def quantile_loss(q):
+    def loss(y_true, y_pred):
+        e = y_true - y_pred
+        return tf.reduce_mean(tf.maximum(q * e, (q - 1) * e))
+    return loss
+
+# Generate synthetic dataset
+def make_data(num_points=1000):
+    np.random.seed(42)
+    X = np.random.rand(num_points, 1) * 10
+    # epsilon = np.random.normal(0, X/2, size=(num_points, 1))  # Noise increasing with X
+    epsilon = np.random.normal(0, 0.5 + X/6, size=(num_points, 1))
+    # Y = 2 + 1.5 * X + epsilon  # Linear relationship with variance increasing
+    Y = 1 + np.exp(X / 4) + epsilon
+
+    # Split into training and test sets
+    X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=42)
+    return X_train, X_test, Y_train, Y_test, X, Y
+
+
+# Function to create a quantile regression model
+def build_quantile_model(q):
+    model = tf.keras.models.Sequential([
+        tf.keras.layers.InputLayer(shape=(1,)),
+        tf.keras.layers.Dense(64, activation='relu'),  # Hidden layer
+        tf.keras.layers.Dense(64, activation='relu'),
+        tf.keras.layers.Dense(64, activation='relu'),
+        tf.keras.layers.Dense(1)  # Output layer
+    ])
+    model.compile(optimizer='adam', loss=quantile_loss(q))
+    return model
+
+def build_bulk_quantile_model():
+    model = tf.keras.models.Sequential([
+        tf.keras.layers.InputLayer(shape=(1,)),
+        tf.keras.layers.Dense(64, activation='relu'),  # Hidden layer
+        tf.keras.layers.Dense(64, activation='relu'),
+        tf.keras.layers.Dense(64, activation='relu'),
+        tf.keras.layers.Dense(1)  # Output layer
+    ])
+    model.compile(optimizer='adam', loss=bulk_quantile_loss())
+    return model
+
+def build_mae_model():
+    model = tf.keras.models.Sequential([
+        tf.keras.layers.InputLayer(shape=(1,)),
+        tf.keras.layers.Dense(64, activation='relu'),  # Hidden layer
+        tf.keras.layers.Dense(64, activation='relu'),
+        tf.keras.layers.Dense(64, activation='relu'),
+        tf.keras.layers.Dense(1)  # Output layer
+    ])
+    # model.compile(optimizer='adam', loss=tf.keras.losses.MeanSquaredError())
+    model.compile(optimizer='adam', loss=tf.keras.losses.MeanAbsoluteError())
+    return model
+
+def build_mse_model():
+    model = tf.keras.models.Sequential([
+        tf.keras.layers.InputLayer(shape=(1,)),
+        tf.keras.layers.Dense(64, activation='relu'),  # Hidden layer
+        tf.keras.layers.Dense(64, activation='relu'),
+        tf.keras.layers.Dense(64, activation='relu'),
+        tf.keras.layers.Dense(1)  # Output layer
+    ])
+    model.compile(optimizer='adam', loss=tf.keras.losses.MeanSquaredError())
+    return model
+
+def run(num_points=1000):
+    # Define quantiles
+    quantiles = [0.1, 0.5, 0.9]
+    models = {}
+
+    X_train, X_test, Y_train, Y_test, X, Y = make_data(num_points=num_points)
+    print(X_train.shape, Y_train.shape, X_test.shape, Y_test.shape)
+
+    # Train a model for each quantile
+    for q in quantiles:
+        print(f"Training quantile {q} model...")
+        models[q] = build_quantile_model(q)
+        models[q].fit(X_train, Y_train, epochs=100, batch_size=32, verbose=0)
+
+    # Generate test data predictions
+    X_range = np.linspace(X.min(), X.max(), 100).reshape(-1, 1)
+    predictions = {q: models[q].predict(X_range) for q in quantiles}
+
+    model = build_mae_model()
+    print(f"Training MAE model...")
+    model.fit(X_train, Y_train, epochs=100, batch_size=32, verbose=0)
+    mae_predictions = model.predict(X_range)
+
+    model = build_mse_model()
+    print(f"Training MSE model...")
+    model.fit(X_train, Y_train, epochs=100, batch_size=32, verbose=0)
+    mse_predictions = model.predict(X_range)
+
+    model = build_bulk_quantile_model()
+    print(f"Training bulk quantile model...")
+    model.fit(X_train, Y_train, epochs=100, batch_size=32, verbose=0)
+    bulk_predictions = model.predict(X_range)
+
+    # Plot the results
+    plt.figure(figsize=(8, 6))
+    plt.scatter(X_test, Y_test, alpha=0.3, label="Test Data")
+    plt.plot(X_range, predictions[0.1], label="Quantile 0.1", color='red')
+    plt.plot(X_range, predictions[0.5], label="Quantile 0.5 (Median)", color='green')
+    plt.plot(X_range, predictions[0.9], label="Quantile 0.9", color='blue')
+    plt.plot(X_range, mae_predictions, label="MAE", color='magenta')
+    plt.plot(X_range, mse_predictions, label="MSE", color='cyan')
+    plt.plot(X_range, bulk_predictions, label="Bulk Quantile Model (Wimmers)", color='black')
+    plt.xlabel("X")
+    plt.ylabel("Y")
+    plt.legend()
+    plt.title("Quantile Regression")
+    plt.show()
-- 
GitLab