Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
P
python
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package Registry
Container Registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Tom Rink
python
Commits
14d3f456
Commit
14d3f456
authored
1 month ago
by
rink
Browse files
Options
Downloads
Patches
Plain Diff
add option to pass in absolute tolerance
parent
919948c6
No related branches found
No related tags found
No related merge requests found
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
modules/deeplearning/quantile_regression.py
+127
-0
127 additions, 0 deletions
modules/deeplearning/quantile_regression.py
with
127 additions
and
0 deletions
modules/deeplearning/quantile_regression.py
0 → 100644
+
127
−
0
View file @
14d3f456
import
tensorflow
as
tf
import
numpy
as
np
import
matplotlib.pyplot
as
plt
from
sklearn.model_selection
import
train_test_split
def
bulk_quantile_loss
():
def
loss
(
y_true
,
y_pred
):
q
=
np
.
array
([.
01
,
.
02
,
.
05
,
.
1
,
.
2
,
.
3
,
.
4
,
.
5
,
.
6
,
.
7
,
.
8
,
.
9
,
.
95
,
.
98
,
.
99
])
e
=
y_true
-
y_pred
return
tf
.
reduce_mean
(
tf
.
maximum
(
q
*
e
,
(
q
-
1
)
*
e
),
axis
=-
1
)
return
loss
# Define the custom quantile loss function
def
quantile_loss
(
q
):
def
loss
(
y_true
,
y_pred
):
e
=
y_true
-
y_pred
return
tf
.
reduce_mean
(
tf
.
maximum
(
q
*
e
,
(
q
-
1
)
*
e
))
return
loss
# Generate synthetic dataset
def
make_data
(
num_points
=
1000
):
np
.
random
.
seed
(
42
)
X
=
np
.
random
.
rand
(
num_points
,
1
)
*
10
# epsilon = np.random.normal(0, X/2, size=(num_points, 1)) # Noise increasing with X
epsilon
=
np
.
random
.
normal
(
0
,
0.5
+
X
/
6
,
size
=
(
num_points
,
1
))
# Y = 2 + 1.5 * X + epsilon # Linear relationship with variance increasing
Y
=
1
+
np
.
exp
(
X
/
4
)
+
epsilon
# Split into training and test sets
X_train
,
X_test
,
Y_train
,
Y_test
=
train_test_split
(
X
,
Y
,
test_size
=
0.2
,
random_state
=
42
)
return
X_train
,
X_test
,
Y_train
,
Y_test
,
X
,
Y
# Function to create a quantile regression model
def
build_quantile_model
(
q
):
model
=
tf
.
keras
.
models
.
Sequential
([
tf
.
keras
.
layers
.
InputLayer
(
shape
=
(
1
,)),
tf
.
keras
.
layers
.
Dense
(
64
,
activation
=
'
relu
'
),
# Hidden layer
tf
.
keras
.
layers
.
Dense
(
64
,
activation
=
'
relu
'
),
tf
.
keras
.
layers
.
Dense
(
64
,
activation
=
'
relu
'
),
tf
.
keras
.
layers
.
Dense
(
1
)
# Output layer
])
model
.
compile
(
optimizer
=
'
adam
'
,
loss
=
quantile_loss
(
q
))
return
model
def
build_bulk_quantile_model
():
model
=
tf
.
keras
.
models
.
Sequential
([
tf
.
keras
.
layers
.
InputLayer
(
shape
=
(
1
,)),
tf
.
keras
.
layers
.
Dense
(
64
,
activation
=
'
relu
'
),
# Hidden layer
tf
.
keras
.
layers
.
Dense
(
64
,
activation
=
'
relu
'
),
tf
.
keras
.
layers
.
Dense
(
64
,
activation
=
'
relu
'
),
tf
.
keras
.
layers
.
Dense
(
1
)
# Output layer
])
model
.
compile
(
optimizer
=
'
adam
'
,
loss
=
bulk_quantile_loss
())
return
model
def
build_mae_model
():
model
=
tf
.
keras
.
models
.
Sequential
([
tf
.
keras
.
layers
.
InputLayer
(
shape
=
(
1
,)),
tf
.
keras
.
layers
.
Dense
(
64
,
activation
=
'
relu
'
),
# Hidden layer
tf
.
keras
.
layers
.
Dense
(
64
,
activation
=
'
relu
'
),
tf
.
keras
.
layers
.
Dense
(
64
,
activation
=
'
relu
'
),
tf
.
keras
.
layers
.
Dense
(
1
)
# Output layer
])
# model.compile(optimizer='adam', loss=tf.keras.losses.MeanSquaredError())
model
.
compile
(
optimizer
=
'
adam
'
,
loss
=
tf
.
keras
.
losses
.
MeanAbsoluteError
())
return
model
def
build_mse_model
():
model
=
tf
.
keras
.
models
.
Sequential
([
tf
.
keras
.
layers
.
InputLayer
(
shape
=
(
1
,)),
tf
.
keras
.
layers
.
Dense
(
64
,
activation
=
'
relu
'
),
# Hidden layer
tf
.
keras
.
layers
.
Dense
(
64
,
activation
=
'
relu
'
),
tf
.
keras
.
layers
.
Dense
(
64
,
activation
=
'
relu
'
),
tf
.
keras
.
layers
.
Dense
(
1
)
# Output layer
])
model
.
compile
(
optimizer
=
'
adam
'
,
loss
=
tf
.
keras
.
losses
.
MeanSquaredError
())
return
model
def
run
(
num_points
=
1000
):
# Define quantiles
quantiles
=
[
0.1
,
0.5
,
0.9
]
models
=
{}
X_train
,
X_test
,
Y_train
,
Y_test
,
X
,
Y
=
make_data
(
num_points
=
num_points
)
print
(
X_train
.
shape
,
Y_train
.
shape
,
X_test
.
shape
,
Y_test
.
shape
)
# Train a model for each quantile
for
q
in
quantiles
:
print
(
f
"
Training quantile
{
q
}
model...
"
)
models
[
q
]
=
build_quantile_model
(
q
)
models
[
q
].
fit
(
X_train
,
Y_train
,
epochs
=
100
,
batch_size
=
32
,
verbose
=
0
)
# Generate test data predictions
X_range
=
np
.
linspace
(
X
.
min
(),
X
.
max
(),
100
).
reshape
(
-
1
,
1
)
predictions
=
{
q
:
models
[
q
].
predict
(
X_range
)
for
q
in
quantiles
}
model
=
build_mae_model
()
print
(
f
"
Training MAE model...
"
)
model
.
fit
(
X_train
,
Y_train
,
epochs
=
100
,
batch_size
=
32
,
verbose
=
0
)
mae_predictions
=
model
.
predict
(
X_range
)
model
=
build_mse_model
()
print
(
f
"
Training MSE model...
"
)
model
.
fit
(
X_train
,
Y_train
,
epochs
=
100
,
batch_size
=
32
,
verbose
=
0
)
mse_predictions
=
model
.
predict
(
X_range
)
model
=
build_bulk_quantile_model
()
print
(
f
"
Training bulk quantile model...
"
)
model
.
fit
(
X_train
,
Y_train
,
epochs
=
100
,
batch_size
=
32
,
verbose
=
0
)
bulk_predictions
=
model
.
predict
(
X_range
)
# Plot the results
plt
.
figure
(
figsize
=
(
8
,
6
))
plt
.
scatter
(
X_test
,
Y_test
,
alpha
=
0.3
,
label
=
"
Test Data
"
)
plt
.
plot
(
X_range
,
predictions
[
0.1
],
label
=
"
Quantile 0.1
"
,
color
=
'
red
'
)
plt
.
plot
(
X_range
,
predictions
[
0.5
],
label
=
"
Quantile 0.5 (Median)
"
,
color
=
'
green
'
)
plt
.
plot
(
X_range
,
predictions
[
0.9
],
label
=
"
Quantile 0.9
"
,
color
=
'
blue
'
)
plt
.
plot
(
X_range
,
mae_predictions
,
label
=
"
MAE
"
,
color
=
'
magenta
'
)
plt
.
plot
(
X_range
,
mse_predictions
,
label
=
"
MSE
"
,
color
=
'
cyan
'
)
plt
.
plot
(
X_range
,
bulk_predictions
,
label
=
"
Bulk Quantile Model (Wimmers)
"
,
color
=
'
black
'
)
plt
.
xlabel
(
"
X
"
)
plt
.
ylabel
(
"
Y
"
)
plt
.
legend
()
plt
.
title
(
"
Quantile Regression
"
)
plt
.
show
()
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment