Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
P
python
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package Registry
Container Registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Tom Rink
python
Commits
3eb8545d
"modules/deeplearning/icing_dnn.py" did not exist on "eb93a2b5595dee4d674fd9413451e6cb3d5d5930"
Commit
3eb8545d
authored
2 years ago
by
tomrink
Browse files
Options
Downloads
Patches
Plain Diff
snapshot...
parent
3f31dde2
No related branches found
No related tags found
No related merge requests found
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
modules/deeplearning/unet.py
+5
-236
5 additions, 236 deletions
modules/deeplearning/unet.py
with
5 additions
and
236 deletions
modules/deeplearning/unet.py
+
5
−
236
View file @
3eb8545d
...
@@ -76,38 +76,6 @@ zero_out_params = ['cld_reff_dcomp', 'cld_opd_dcomp', 'iwc_dcomp', 'lwc_dcomp']
...
@@ -76,38 +76,6 @@ zero_out_params = ['cld_reff_dcomp', 'cld_opd_dcomp', 'iwc_dcomp', 'lwc_dcomp']
DO_ZERO_OUT
=
False
DO_ZERO_OUT
=
False
def
build_residual_block_1x1
(
input_layer
,
num_filters
,
activation
,
block_name
,
padding
=
'
SAME
'
,
drop_rate
=
0.5
,
do_drop_out
=
True
,
do_batch_norm
=
True
):
with
tf
.
name_scope
(
block_name
):
skip
=
input_layer
if
do_drop_out
:
input_layer
=
tf
.
keras
.
layers
.
Dropout
(
drop_rate
)(
input_layer
)
if
do_batch_norm
:
input_layer
=
tf
.
keras
.
layers
.
BatchNormalization
()(
input_layer
)
conv
=
tf
.
keras
.
layers
.
Conv2D
(
num_filters
,
kernel_size
=
1
,
strides
=
1
,
padding
=
padding
,
activation
=
activation
)(
input_layer
)
print
(
conv
.
shape
)
# if do_drop_out:
# conv = tf.keras.layers.Dropout(drop_rate)(conv)
# if do_batch_norm:
# conv = tf.keras.layers.BatchNormalization()(conv)
# conv = tf.keras.layers.Conv2D(num_filters, kernel_size=1, strides=1, padding=padding, activation=activation)(conv)
# print(conv.shape)
if
do_drop_out
:
conv
=
tf
.
keras
.
layers
.
Dropout
(
drop_rate
)(
conv
)
if
do_batch_norm
:
conv
=
tf
.
keras
.
layers
.
BatchNormalization
()(
conv
)
conv
=
tf
.
keras
.
layers
.
Conv2D
(
num_filters
,
kernel_size
=
1
,
strides
=
1
,
padding
=
padding
,
activation
=
None
)(
conv
)
conv
=
conv
+
skip
conv
=
tf
.
keras
.
layers
.
LeakyReLU
()(
conv
)
print
(
conv
.
shape
)
return
conv
def
build_conv2d_block
(
conv
,
num_filters
,
activation
,
block_name
,
padding
=
'
SAME
'
):
def
build_conv2d_block
(
conv
,
num_filters
,
activation
,
block_name
,
padding
=
'
SAME
'
):
with
tf
.
name_scope
(
block_name
):
with
tf
.
name_scope
(
block_name
):
skip
=
conv
skip
=
conv
...
@@ -550,7 +518,7 @@ class UNET:
...
@@ -550,7 +518,7 @@ class UNET:
self
.
get_evaluate_dataset
(
idxs
)
self
.
get_evaluate_dataset
(
idxs
)
def
build_
cnn
(
self
):
def
build_
unet
(
self
):
print
(
'
build_cnn
'
)
print
(
'
build_cnn
'
)
# padding = "VALID"
# padding = "VALID"
padding
=
"
SAME
"
padding
=
"
SAME
"
...
@@ -665,45 +633,9 @@ class UNET:
...
@@ -665,45 +633,9 @@ class UNET:
activation
=
tf
.
nn
.
softmax
# For multi-class
activation
=
tf
.
nn
.
softmax
# For multi-class
# Called logits, but these are actually probabilities, see activation
# Called logits, but these are actually probabilities, see activation
logits
=
tf
.
keras
.
layers
.
Conv2D
(
1
,
kernel_size
=
1
,
strides
=
1
,
padding
=
padding
,
name
=
'
probability
'
,
activation
=
activation
)(
conv
)
self
.
logits
=
tf
.
keras
.
layers
.
Conv2D
(
1
,
kernel_size
=
1
,
strides
=
1
,
padding
=
padding
,
name
=
'
probability
'
,
activation
=
activation
)(
conv
)
print
(
logits
.
shape
)
return
logits
def
build_fcl
(
self
,
input_layer
):
print
(
'
build fully connected layer
'
)
num_filters
=
input_layer
.
shape
[
3
]
drop_rate
=
0.5
# activation = tf.nn.relu
print
(
self
.
logits
.
shape
)
# activation = tf.nn.elu
activation
=
tf
.
nn
.
leaky_relu
# padding = "VALID"
padding
=
"
SAME
"
conv
=
build_residual_block_1x1
(
input_layer
,
num_filters
,
activation
,
'
Residual_Block_1
'
,
padding
=
padding
)
conv
=
build_residual_block_1x1
(
conv
,
num_filters
,
activation
,
'
Residual_Block_2
'
,
padding
=
padding
)
conv
=
build_residual_block_1x1
(
conv
,
num_filters
,
activation
,
'
Residual_Block_3
'
,
padding
=
padding
)
#conv = build_residual_block_1x1(conv, num_filters, activation, 'Residual_Block_4', padding=padding)
print
(
conv
.
shape
)
if
NumClasses
==
2
:
activation
=
tf
.
nn
.
sigmoid
# For binary
else
:
activation
=
tf
.
nn
.
softmax
# For multi-class
# Called logits, but these are actually probabilities, see activation
logits
=
tf
.
keras
.
layers
.
Conv2D
(
1
,
kernel_size
=
1
,
strides
=
1
,
padding
=
padding
,
name
=
'
probability
'
,
activation
=
activation
)(
conv
)
print
(
logits
.
shape
)
self
.
logits
=
logits
def
build_training
(
self
):
def
build_training
(
self
):
if
NumClasses
==
2
:
if
NumClasses
==
2
:
...
@@ -981,11 +913,7 @@ class UNET:
...
@@ -981,11 +913,7 @@ class UNET:
f
.
close
()
f
.
close
()
def
build_model
(
self
):
def
build_model
(
self
):
cnn
=
self
.
build_cnn
()
self
.
build_unet
()
print
(
cnn
.
shape
,
self
.
inputs
[
1
].
shape
)
if
self
.
USE_FLIGHT_ALTITUDE
:
cnn
=
tf
.
keras
.
layers
.
concatenate
([
cnn
,
self
.
inputs
[
1
]])
self
.
build_fcl
(
cnn
)
self
.
model
=
tf
.
keras
.
Model
(
self
.
inputs
,
self
.
logits
)
self
.
model
=
tf
.
keras
.
Model
(
self
.
inputs
,
self
.
logits
)
def
restore
(
self
,
ckpt_dir
):
def
restore
(
self
,
ckpt_dir
):
...
@@ -1070,165 +998,6 @@ class UNET:
...
@@ -1070,165 +998,6 @@ class UNET:
self
.
do_evaluate
(
ckpt_dir
)
self
.
do_evaluate
(
ckpt_dir
)
def
run_restore_static
(
filename_l1b
,
filename_l2
,
ckpt_dir_s_path
,
day_night
=
'
DAY
'
,
use_flight_altitude
=
False
):
ckpt_dir_s
=
os
.
listdir
(
ckpt_dir_s_path
)
cm_s
=
[]
prob_s
=
[]
labels
=
None
for
ckpt
in
ckpt_dir_s
:
ckpt_dir
=
ckpt_dir_s_path
+
ckpt
if
not
os
.
path
.
isdir
(
ckpt_dir
):
continue
nn
=
IcingIntensityFCN
(
day_night
=
day_night
,
use_flight_altitude
=
use_flight_altitude
)
nn
.
run_restore
(
filename_l1b
,
filename_l2
,
ckpt_dir
)
cm_s
.
append
(
tf
.
math
.
confusion_matrix
(
nn
.
test_labels
.
flatten
(),
nn
.
test_preds
.
flatten
()))
prob_s
.
append
(
nn
.
test_probs
.
flatten
())
if
labels
is
None
:
# These should be the same
labels
=
nn
.
test_labels
.
flatten
()
num
=
len
(
cm_s
)
cm_avg
=
cm_s
[
0
]
prob_avg
=
prob_s
[
0
]
for
k
in
range
(
num
-
1
):
cm_avg
+=
cm_s
[
k
+
1
]
prob_avg
+=
prob_s
[
k
+
1
]
cm_avg
/=
num
prob_avg
/=
num
return
labels
,
prob_avg
,
cm_avg
def
run_evaluate_static_avg
(
data_dct
,
ll
,
cc
,
ckpt_dir_s_path
,
day_night
=
'
DAY
'
,
flight_level
=
4
,
use_flight_altitude
=
False
,
prob_thresh
=
0.5
,
satellite
=
'
GOES16
'
,
domain
=
'
FD
'
):
num_elems
=
len
(
cc
)
num_lines
=
len
(
ll
)
cc
=
np
.
array
(
cc
)
ll
=
np
.
array
(
ll
)
ckpt_dir_s
=
os
.
listdir
(
ckpt_dir_s_path
)
nav
=
get_navigation
(
satellite
,
domain
)
prob_s
=
[]
for
ckpt
in
ckpt_dir_s
:
ckpt_dir
=
ckpt_dir_s_path
+
ckpt
if
not
os
.
path
.
isdir
(
ckpt_dir
):
continue
nn
=
IcingIntensityFCN
(
day_night
=
day_night
,
use_flight_altitude
=
use_flight_altitude
)
nn
.
flight_level
=
flight_level
nn
.
setup_eval_pipeline
(
data_dct
,
num_lines
*
num_elems
)
nn
.
build_model
()
nn
.
build_training
()
nn
.
build_evaluation
()
nn
.
do_evaluate
(
ckpt_dir
)
prob_s
.
append
(
nn
.
test_probs
)
num
=
len
(
prob_s
)
prob_avg
=
prob_s
[
0
]
for
k
in
range
(
num
-
1
):
prob_avg
+=
prob_s
[
k
+
1
]
prob_avg
/=
num
probs
=
prob_avg
if
NumClasses
==
2
:
preds
=
np
.
where
(
probs
>
prob_thresh
,
1
,
0
)
else
:
preds
=
np
.
argmax
(
probs
,
axis
=
1
)
preds_2d
=
preds
.
reshape
((
num_lines
,
num_elems
))
ll
,
cc
=
np
.
meshgrid
(
ll
,
cc
,
indexing
=
'
ij
'
)
cc
=
cc
.
flatten
()
ll
=
ll
.
flatten
()
ice_mask
=
preds
==
1
ice_cc
=
cc
[
ice_mask
]
ice_ll
=
ll
[
ice_mask
]
ice_lons
,
ice_lats
=
nav
.
lc_to_earth
(
ice_cc
,
ice_ll
)
return
ice_lons
,
ice_lats
,
preds_2d
# def run_evaluate_static_fcn(data_dct, ckpt_dir_s_path, day_night='DAY', l1b_or_l2='both', prob_thresh=0.5,
# flight_levels=[0, 1, 2, 3, 4], use_flight_altitude=False):
#
# ckpt_dir_s = os.listdir(ckpt_dir_s_path)
# ckpt_dir = ckpt_dir_s_path + ckpt_dir_s[0]
#
# if not use_flight_altitude:
# flight_levels = [0]
#
# probs_dct = {flvl: None for flvl in flight_levels}
# preds_dct = {flvl: None for flvl in flight_levels}
#
# nn = IcingIntensityFCN(day_night=day_night, l1b_or_l2=l1b_or_l2, use_flight_altitude=use_flight_altitude)
# nn.num_data_samples = 1
# nn.build_model()
# nn.build_training()
# nn.build_evaluation()
#
# ckpt = tf.train.Checkpoint(step=tf.Variable(1), model=nn.model)
# ckpt_manager = tf.train.CheckpointManager(ckpt, ckpt_dir, max_to_keep=3)
# ckpt.restore(ckpt_manager.latest_checkpoint)
#
# for flvl in flight_levels:
# nn.flight_level = flvl
# nn.setup_eval_pipeline(data_dct)
# nn.do_evaluate(ckpt_dir)
#
# probs = nn.test_probs
# if NumClasses == 2:
# preds = np.where(probs > prob_thresh, 1, 0)
# else:
# preds = np.argmax(probs, axis=1)
#
# probs_dct[flvl] = probs
# preds_dct[flvl] = preds
#
# return preds_dct, probs_dct
def
run_evaluate_static_fcn
(
data_dct
,
num_tiles
,
ckpt_dir_s_path
,
day_night
=
'
DAY
'
,
l1b_or_l2
=
'
both
'
,
prob_thresh
=
0.5
,
flight_levels
=
[
0
,
1
,
2
,
3
,
4
],
use_flight_altitude
=
False
):
ckpt_dir_s
=
os
.
listdir
(
ckpt_dir_s_path
)
ckpt_dir
=
ckpt_dir_s_path
+
ckpt_dir_s
[
0
]
if
not
use_flight_altitude
:
flight_levels
=
[
0
]
probs_dct
=
{
flvl
:
None
for
flvl
in
flight_levels
}
preds_dct
=
{
flvl
:
None
for
flvl
in
flight_levels
}
nn
=
IcingIntensityFCN
(
day_night
=
day_night
,
l1b_or_l2
=
l1b_or_l2
,
use_flight_altitude
=
use_flight_altitude
)
nn
.
num_data_samples
=
num_tiles
nn
.
build_model
()
nn
.
build_training
()
nn
.
build_evaluation
()
ckpt
=
tf
.
train
.
Checkpoint
(
step
=
tf
.
Variable
(
1
),
model
=
nn
.
model
)
ckpt_manager
=
tf
.
train
.
CheckpointManager
(
ckpt
,
ckpt_dir
,
max_to_keep
=
3
)
ckpt
.
restore
(
ckpt_manager
.
latest_checkpoint
)
for
flvl
in
flight_levels
:
nn
.
flight_level
=
flvl
nn
.
setup_eval_pipeline
(
data_dct
,
num_tiles
)
nn
.
do_evaluate
(
ckpt_dir
)
probs
=
nn
.
test_probs
if
NumClasses
==
2
:
preds
=
np
.
where
(
probs
>
prob_thresh
,
1
,
0
)
else
:
preds
=
np
.
argmax
(
probs
,
axis
=
1
)
probs_dct
[
flvl
]
=
probs
preds_dct
[
flvl
]
=
preds
return
preds_dct
,
probs_dct
if
__name__
==
"
__main__
"
:
if
__name__
==
"
__main__
"
:
nn
=
IcingIntensityFCN
()
nn
=
UNET
()
nn
.
run
(
'
matchup_filename
'
)
nn
.
run
(
'
matchup_filename
'
)
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment