Skip to content
Snippets Groups Projects
Commit f87a4bcd authored by tomrink's avatar tomrink
Browse files

snapshot...

parent c1a98186
No related branches found
No related tags found
No related merge requests found
......@@ -3,13 +3,13 @@ import contextlib
import tensorflow as tf
from deeplearning.cloud_fraction_fcn_abi import get_label_data_5cat
from util.plot_cm import confusion_matrix_values
from util.augment import augment_image
from util.setup_cloud_fraction import logdir, modeldir, now, ancillary_path
from util.util import EarlyStop, normalize, denormalize, scale, scale2, descale, \
get_grid_values_all, make_tf_callable_generator
import glob
import os, datetime
import os
import datetime
import numpy as np
import pickle
import h5py
......@@ -31,9 +31,10 @@ else:
NumLogits = NumClasses
BATCH_SIZE = 128
NUM_EPOCHS = 80
NUM_EPOCHS = 100
EARLY_STOP = True
PATIENCE = 7
NOISE_TRAINING = False
NOISE_STDDEV = 0.01
......@@ -71,8 +72,7 @@ label_param = 'cld_opd_dcomp'
params = ['temp_11_0um_nom', 'refl_0_65um_nom', 'refl_submin_ch01', 'refl_submax_ch01', 'refl_substddev_ch01', 'cloud_probability', label_param]
params_i = ['temp_11_0um_nom', 'refl_0_65um_nom', 'cloud_probability', label_param]
data_params_half = ['temp_11_0um_nom']
# data_params_half = ['temp_11_0um_nom', 'refl_0_65um_nom']
data_params_half = ['temp_11_0um_nom', 'refl_0_65um_nom']
sub_fields = ['refl_submin_ch01', 'refl_submax_ch01', 'refl_substddev_ch01']
data_params_full = ['refl_0_65um_nom']
......@@ -145,19 +145,17 @@ def upsample_mean(grd):
def get_grid_cell_mean(grd_k):
grd_k = np.where(np.isnan(grd_k), 0, grd_k)
mean = np.nanmean([grd_k[:, 0::4, 0::4], grd_k[:, 1::4, 0::4], grd_k[:, 2::4, 0::4], grd_k[:, 3::4, 0::4],
grd_k[:, 0::4, 1::4], grd_k[:, 1::4, 1::4], grd_k[:, 2::4, 1::4], grd_k[:, 3::4, 1::4],
grd_k[:, 0::4, 2::4], grd_k[:, 1::4, 2::4], grd_k[:, 2::4, 2::4], grd_k[:, 3::4, 2::4],
grd_k[:, 0::4, 3::4], grd_k[:, 1::4, 3::4], grd_k[:, 2::4, 3::4], grd_k[:, 3::4, 3::4]], axis=0)
# For an all-Nan slice
np.where(np.isnan(mean), 0, mean)
return mean
def get_min_max_std(grd_k):
grd_k = np.where(np.isnan(grd_k), 0, grd_k)
lo = np.nanmin([grd_k[:, 0::4, 0::4], grd_k[:, 1::4, 0::4], grd_k[:, 2::4, 0::4], grd_k[:, 3::4, 0::4],
grd_k[:, 0::4, 1::4], grd_k[:, 1::4, 1::4], grd_k[:, 2::4, 1::4], grd_k[:, 3::4, 1::4],
grd_k[:, 0::4, 2::4], grd_k[:, 1::4, 2::4], grd_k[:, 2::4, 2::4], grd_k[:, 3::4, 2::4],
......@@ -177,14 +175,19 @@ def get_min_max_std(grd_k):
grd_k[:, 0::4, 1::4], grd_k[:, 1::4, 1::4], grd_k[:, 2::4, 1::4], grd_k[:, 3::4, 1::4],
grd_k[:, 0::4, 2::4], grd_k[:, 1::4, 2::4], grd_k[:, 2::4, 2::4], grd_k[:, 3::4, 2::4],
grd_k[:, 0::4, 3::4], grd_k[:, 1::4, 3::4], grd_k[:, 2::4, 3::4], grd_k[:, 3::4, 3::4]], axis=0)
# For an all-NaN slice
np.where(np.isnan(lo), 0, lo)
np.where(np.isnan(hi), 0, hi)
np.where(np.isnan(std), 0, std)
np.where(np.isnan(avg), 0, avg)
return lo, hi, std, avg
def get_cldy_frac_opd(cld_prob, opd):
cld_prob = np.where(np.isnan(cld_prob), 0.0, cld_prob)
opd = np.where(np.isnan(opd), 0.0, opd)
cld = np.where(cld_prob < 0.5, 0, 1)
opd = np.where(np.isnan(opd), 0.0, opd)
cnt_cld = cld[:, 0::4, 0::4] + cld[:, 1::4, 0::4] + cld[:, 2::4, 0::4] + cld[:, 3::4, 0::4] + \
cld[:, 0::4, 1::4] + cld[:, 1::4, 1::4] + cld[:, 2::4, 1::4] + cld[:, 3::4, 1::4] + \
......@@ -296,7 +299,7 @@ class SRCNN:
# self.n_chans = len(data_params_half) + len(data_params_full) + 1
self.n_chans = 6
self.X_img = tf.keras.Input(shape=(None, None, self.n_chans + 1))
self.X_img = tf.keras.Input(shape=(None, None, self.n_chans + 2))
self.inputs.append(self.X_img)
......@@ -323,14 +326,32 @@ class SRCNN:
input_data = np.concatenate(data_s)
input_label = np.concatenate(label_s)
# refl_i = input_label[:, params_i.index('refl_0_65um_nom'), :, :]
# rlo, rhi, rstd, rmean = get_min_max_std(refl_i)
# rmean = rmean[:, slc_y, slc_x]
# rmean = scale2(rmean, -2.0, 120.0)
# rlo = rlo[:, slc_y, slc_x]
# rlo = scale2(rlo, -2.0, 120.0)
# rhi = rhi[:, slc_y, slc_x]
# rhi = scale2(rhi, -2.0, 120.0)
# refl_rng = rhi - rlo
# rstd = rstd[:, slc_y, slc_x]
# rstd = scale2(rstd, 0.0, 20.0)
data_norm = []
for param in data_params_half:
idx = params.index(param)
tmp = input_data[:, idx, :, :]
tmp = tmp[:, slc_y, slc_x]
# tmp = normalize(tmp, param, mean_std_dct)
tmp = scale(tmp, param, mean_std_dct)
data_norm.append(tmp)
# for param in data_params_half:
# idx = params.index(param)
# tmp = input_data[:, idx, :, :]
# tmp = tmp[:, slc_y, slc_x]
# # tmp = normalize(tmp, param, mean_std_dct)
# tmp = scale(tmp, param, mean_std_dct)
# data_norm.append(tmp)
bt = input_data[:, params.index('temp_11_0um_nom'), :, :]
bt = bt[:, slc_y, slc_x]
# bt = normalize(bt, 'temp_11_0um_nom', mean_std_dct)
bt = scale(bt, 'temp_11_0um_nom', mean_std_dct)
data_norm.append(bt)
tmp = input_label[:, params_i.index('cloud_probability'), :, :]
cld_prob = tmp.copy()
......@@ -338,6 +359,12 @@ class SRCNN:
tmp = tmp[:, slc_y, slc_x]
data_norm.append(tmp)
refl = input_data[:, params.index('refl_0_65um_nom'), :, :]
refl = refl[:, slc_y, slc_x]
# refl = normalize(refl, 'refl_0_65um_nom', mean_std_dct)
refl = scale(refl, 'refl_0_65um_nom', mean_std_dct)
data_norm.append(refl)
# for param in sub_fields:
# idx = params.index(param)
# tmp = input_data[:, idx, :, :]
......@@ -350,23 +377,21 @@ class SRCNN:
# tmp = scale2(tmp, 0.0, 20.0)
# data_norm.append(tmp)
idx = params.index(sub_fields[0])
tmp = input_data[:, idx, :, :]
tmp = tmp[:, slc_y, slc_x]
rlo = scale(tmp, 'refl_0_65um_nom', mean_std_dct)
data_norm.append(rlo)
refl_lo = input_data[:, params.index(sub_fields[0]), :, :]
refl_lo = refl_lo[:, slc_y, slc_x]
refl_lo = scale2[refl_lo, -2.0, 120.0]
idx = params.index(sub_fields[1])
tmp = input_data[:, idx, :, :]
tmp = tmp[:, slc_y, slc_x]
tmp = scale(tmp, 'refl_0_65um_nom', mean_std_dct)
data_norm.append(tmp - rlo)
refl_hi = input_data[:, params.index(sub_fields[1]), :, :]
refl_hi = refl_hi[:, slc_y, slc_x]
refl_hi = scale2[refl_hi, -2.0, 120.0]
idx = params.index(sub_fields[2])
tmp = input_data[:, idx, :, :]
tmp = tmp[:, slc_y, slc_x]
tmp = scale2(tmp, 0.0, 20.0)
data_norm.append(tmp)
refl_rng = refl_hi - refl_lo
data_norm.append(refl_rng)
refl_std = input_data[:, params.index(sub_fields[2]), :, :]
refl_std = refl_std[:, slc_y, slc_x]
refl_std = scale2[refl_std, 0.0, 30.0]
data_norm.append(refl_std)
tmp = input_label[:, label_idx_i, :, :]
tmp = get_grid_cell_mean(tmp)
......@@ -383,16 +408,16 @@ class SRCNN:
label = label[:, y_64, x_64]
cld_prob = cld_prob[:, y_64, x_64]
cat_cf = get_label_data_5cat(cld_prob)
_, _, cp_std, _ = get_min_max_std(cld_prob)
if KERNEL_SIZE != 1:
cat_cf = np.pad(cat_cf, pad_width=[(0, 0), (1, 1), (1, 1)])
cp_std = np.pad(cp_std, pad_width=[(0, 0), (1, 1), (1, 1)])
data_norm.append(cat_cf)
data_norm.append(cp_std)
data = np.stack(data_norm, axis=3)
data = data.astype(np.float32)
label = get_cldy_frac_opd(cld_prob, label)
# label = scale(label, label_param, mean_std_dct)
label = np.where(np.isnan(label), 0, label)
label = np.where(np.isnan(label), 0, label)
label = np.expand_dims(label, axis=3)
......@@ -540,7 +565,7 @@ class SRCNN:
initial_learning_rate = 0.001
decay_rate = 0.95
steps_per_epoch = int(self.num_data_samples/BATCH_SIZE) # one epoch
decay_steps = int(steps_per_epoch) * 1
decay_steps = int(steps_per_epoch) * 2
print('initial rate, decay rate, steps/epoch, decay steps: ', initial_learning_rate, decay_rate, steps_per_epoch, decay_steps)
self.learningRateSchedule = tf.keras.optimizers.schedules.ExponentialDecay(initial_learning_rate, decay_steps, decay_rate)
......@@ -640,7 +665,7 @@ class SRCNN:
best_test_loss = np.finfo(dtype=np.float64).max
if EARLY_STOP:
es = EarlyStop()
es = EarlyStop(patience=PATIENCE)
for epoch in range(NUM_EPOCHS):
self.train_loss.reset_states()
......@@ -962,7 +987,8 @@ def run_restore_static(directory, ckpt_dir, out_file=None):
descale(inputs[:, 1:y_hi, 1:x_hi, 3], 'refl_0_65um_nom', mean_std_dct),
inputs[:, 1:y_hi, 1:x_hi, 4],
descale(inputs[:, 1:y_hi, 1:x_hi, 5], label_param, mean_std_dct),
inputs[:, 1:y_hi, 1:x_hi, 6]])
inputs[:, 1:y_hi, 1:x_hi, 6],
inputs[:, 1:y_hi, 1:x_hi, 7]])
def run_evaluate_static(in_file, out_file, ckpt_dir):
......@@ -1138,226 +1164,61 @@ def run_evaluate_static_(bt, refl, refl_lo, refl_hi, refl_std, cp, ckpt_dir):
return cld_frac
def analyze_3cat(file):
tup = np.load(file, allow_pickle=True)
lbls = tup[0]
pred = tup[1]
lbls = lbls.flatten()
pred = pred.flatten()
print(np.sum(lbls == 0), np.sum(lbls == 1), np.sum(lbls == 2))
msk_0_1 = lbls != 2
msk_1_2 = lbls != 0
msk_0_2 = lbls != 1
lbls_0_1 = lbls[msk_0_1]
pred_0_1 = pred[msk_0_1]
pred_0_1 = np.where(pred_0_1 == 2, 1, pred_0_1)
# ----
lbls_1_2 = lbls[msk_1_2]
lbls_1_2 = np.where(lbls_1_2 == 1, 0, lbls_1_2)
lbls_1_2 = np.where(lbls_1_2 == 2, 1, lbls_1_2)
pred_1_2 = pred[msk_1_2]
pred_1_2 = np.where(pred_1_2 == 0, -9, pred_1_2)
pred_1_2 = np.where(pred_1_2 == 1, 0, pred_1_2)
pred_1_2 = np.where(pred_1_2 == 2, 1, pred_1_2)
pred_1_2 = np.where(pred_1_2 == -9, 1, pred_1_2)
# ----
lbls_0_2 = lbls[msk_0_2]
lbls_0_2 = np.where(lbls_0_2 == 2, 1, lbls_0_2)
pred_0_2 = pred[msk_0_2]
pred_0_2 = np.where(pred_0_2 == 2, 1, pred_0_2)
cm_0_1 = confusion_matrix_values(lbls_0_1, pred_0_1)
cm_1_2 = confusion_matrix_values(lbls_1_2, pred_1_2)
cm_0_2 = confusion_matrix_values(lbls_0_2, pred_0_2)
true_0_1 = (lbls_0_1 == 0) & (pred_0_1 == 0)
false_0_1 = (lbls_0_1 == 1) & (pred_0_1 == 0)
true_no_0_1 = (lbls_0_1 == 1) & (pred_0_1 == 1)
false_no_0_1 = (lbls_0_1 == 0) & (pred_0_1 == 1)
true_0_2 = (lbls_0_2 == 0) & (pred_0_2 == 0)
false_0_2 = (lbls_0_2 == 1) & (pred_0_2 == 0)
true_no_0_2 = (lbls_0_2 == 1) & (pred_0_2 == 1)
false_no_0_2 = (lbls_0_2 == 0) & (pred_0_2 == 1)
true_1_2 = (lbls_1_2 == 0) & (pred_1_2 == 0)
false_1_2 = (lbls_1_2 == 1) & (pred_1_2 == 0)
true_no_1_2 = (lbls_1_2 == 1) & (pred_1_2 == 1)
false_no_1_2 = (lbls_1_2 == 0) & (pred_1_2 == 1)
tp_0 = np.sum(true_0_1).astype(np.float64)
tp_1 = np.sum(true_1_2).astype(np.float64)
tp_2 = np.sum(true_0_2).astype(np.float64)
tn_0 = np.sum(true_no_0_1).astype(np.float64)
tn_1 = np.sum(true_no_1_2).astype(np.float64)
tn_2 = np.sum(true_no_0_2).astype(np.float64)
fp_0 = np.sum(false_0_1).astype(np.float64)
fp_1 = np.sum(false_1_2).astype(np.float64)
fp_2 = np.sum(false_0_2).astype(np.float64)
fn_0 = np.sum(false_no_0_1).astype(np.float64)
fn_1 = np.sum(false_no_1_2).astype(np.float64)
fn_2 = np.sum(false_no_0_2).astype(np.float64)
recall_0 = tp_0 / (tp_0 + fn_0)
recall_1 = tp_1 / (tp_1 + fn_1)
recall_2 = tp_2 / (tp_2 + fn_2)
precision_0 = tp_0 / (tp_0 + fp_0)
precision_1 = tp_1 / (tp_1 + fp_1)
precision_2 = tp_2 / (tp_2 + fp_2)
mcc_0 = ((tp_0 * tn_0) - (fp_0 * fn_0)) / np.sqrt((tp_0 + fp_0) * (tp_0 + fn_0) * (tn_0 + fp_0) * (tn_0 + fn_0))
mcc_1 = ((tp_1 * tn_1) - (fp_1 * fn_1)) / np.sqrt((tp_1 + fp_1) * (tp_1 + fn_1) * (tn_1 + fp_1) * (tn_1 + fn_1))
mcc_2 = ((tp_2 * tn_2) - (fp_2 * fn_2)) / np.sqrt((tp_2 + fp_2) * (tp_2 + fn_2) * (tn_2 + fp_2) * (tn_2 + fn_2))
acc_0 = np.sum(lbls_0_1 == pred_0_1)/pred_0_1.size
acc_1 = np.sum(lbls_1_2 == pred_1_2)/pred_1_2.size
acc_2 = np.sum(lbls_0_2 == pred_0_2)/pred_0_2.size
print(acc_0, recall_0, precision_0, mcc_0)
print(acc_1, recall_1, precision_1, mcc_1)
print(acc_2, recall_2, precision_2, mcc_2)
return cm_0_1, cm_1_2, cm_0_2, [acc_0, acc_1, acc_2], [recall_0, recall_1, recall_2],\
[precision_0, precision_1, precision_2], [mcc_0, mcc_1, mcc_2]
def analyze_5cat(file):
tup = np.load(file, allow_pickle=True)
lbls = tup[0]
pred = tup[1]
lbls = lbls.flatten()
pred = pred.flatten()
np.histogram(lbls, bins=5)
np.histogram(pred, bins=5)
new_lbls = np.zeros(lbls.size, dtype=np.int32)
new_pred = np.zeros(pred.size, dtype=np.int32)
new_lbls[lbls == 0] = 0
new_lbls[lbls == 1] = 1
new_lbls[lbls == 2] = 1
new_lbls[lbls == 3] = 1
new_lbls[lbls == 4] = 2
new_pred[pred == 0] = 0
new_pred[pred == 1] = 1
new_pred[pred == 2] = 1
new_pred[pred == 3] = 1
new_pred[pred == 4] = 2
np.histogram(new_lbls, bins=3)
np.histogram(new_pred, bins=3)
lbls = new_lbls
pred = new_pred
print(np.sum(lbls == 0), np.sum(lbls == 1), np.sum(lbls == 2))
msk_0_1 = lbls != 2
msk_1_2 = lbls != 0
msk_0_2 = lbls != 1
lbls_0_1 = lbls[msk_0_1]
pred_0_1 = pred[msk_0_1]
pred_0_1 = np.where(pred_0_1 == 2, 1, pred_0_1)
# ----------------------------------------------
lbls_1_2 = lbls[msk_1_2]
lbls_1_2 = np.where(lbls_1_2 == 1, 0, lbls_1_2)
lbls_1_2 = np.where(lbls_1_2 == 2, 1, lbls_1_2)
pred_1_2 = pred[msk_1_2]
pred_1_2 = np.where(pred_1_2 == 0, -9, pred_1_2)
pred_1_2 = np.where(pred_1_2 == 1, 0, pred_1_2)
pred_1_2 = np.where(pred_1_2 == 2, 1, pred_1_2)
pred_1_2 = np.where(pred_1_2 == -9, 1, pred_1_2)
# -----------------------------------------------
lbls_0_2 = lbls[msk_0_2]
lbls_0_2 = np.where(lbls_0_2 == 2, 1, lbls_0_2)
pred_0_2 = pred[msk_0_2]
pred_0_2 = np.where(pred_0_2 == 2, 1, pred_0_2)
cm_0_1 = confusion_matrix_values(lbls_0_1, pred_0_1)
cm_1_2 = confusion_matrix_values(lbls_1_2, pred_1_2)
cm_0_2 = confusion_matrix_values(lbls_0_2, pred_0_2)
true_0_1 = (lbls_0_1 == 0) & (pred_0_1 == 0)
false_0_1 = (lbls_0_1 == 1) & (pred_0_1 == 0)
true_no_0_1 = (lbls_0_1 == 1) & (pred_0_1 == 1)
false_no_0_1 = (lbls_0_1 == 0) & (pred_0_1 == 1)
true_0_2 = (lbls_0_2 == 0) & (pred_0_2 == 0)
false_0_2 = (lbls_0_2 == 1) & (pred_0_2 == 0)
def analyze(directory, outfile):
train_data_files = glob.glob(directory + 'train*mres*.npy')
valid_data_files = glob.glob(directory + 'valid*mres*.npy')
train_label_files = glob.glob(directory + 'train*ires*.npy')
valid_label_files = glob.glob(directory + 'valid*ires*.npy')
true_no_0_2 = (lbls_0_2 == 1) & (pred_0_2 == 1)
false_no_0_2 = (lbls_0_2 == 0) & (pred_0_2 == 1)
data_s = []
label_s = []
for idx, data_f in enumerate(valid_data_files):
nda = np.load(data_f)
data_s.append(nda)
true_1_2 = (lbls_1_2 == 0) & (pred_1_2 == 0)
false_1_2 = (lbls_1_2 == 1) & (pred_1_2 == 0)
f = valid_label_files[idx]
nda = np.load(f)
label_s.append(nda)
true_no_1_2 = (lbls_1_2 == 1) & (pred_1_2 == 1)
false_no_1_2 = (lbls_1_2 == 0) & (pred_1_2 == 1)
input_data = np.concatenate(data_s)
input_label = np.concatenate(label_s)
tp_0 = np.sum(true_0_1).astype(np.float64)
tp_1 = np.sum(true_1_2).astype(np.float64)
tp_2 = np.sum(true_0_2).astype(np.float64)
refl_i = input_label[:, params_i.index('refl_0_65um_nom'), :, :]
rlo, rhi, rstd, rmean = get_min_max_std(refl_i)
rmean_i = rmean[:, slc_y, slc_x]
rlo_i = rlo[:, slc_y, slc_x]
rhi_i = rhi[:, slc_y, slc_x]
rstd_i = rstd[:, slc_y, slc_x]
tn_0 = np.sum(true_no_0_1).astype(np.float64)
tn_1 = np.sum(true_no_1_2).astype(np.float64)
tn_2 = np.sum(true_no_0_2).astype(np.float64)
rlo_m = input_data[:, params.index('refl_submin_ch01'), :, :]
rlo_m = rlo_m[:, slc_y, slc_x]
fp_0 = np.sum(false_0_1).astype(np.float64)
fp_1 = np.sum(false_1_2).astype(np.float64)
fp_2 = np.sum(false_0_2).astype(np.float64)
rhi_m = input_data[:, params.index('refl_submax_ch01'), :, :]
rhi_m = rhi_m[:, slc_y, slc_x]
fn_0 = np.sum(false_no_0_1).astype(np.float64)
fn_1 = np.sum(false_no_1_2).astype(np.float64)
fn_2 = np.sum(false_no_0_2).astype(np.float64)
rstd_m = input_data[:, params.index('refl_substddev_ch01'), :, :]
rstd_m = rstd_m[:, slc_y, slc_x]
recall_0 = tp_0 / (tp_0 + fn_0)
recall_1 = tp_1 / (tp_1 + fn_1)
recall_2 = tp_2 / (tp_2 + fn_2)
rmean = input_data[:, params.index('refl_0_65um_nom'), :, :]
rmean_m = rmean[:, slc_y, slc_x]
# ------------------------
precision_0 = tp_0 / (tp_0 + fp_0)
precision_1 = tp_1 / (tp_1 + fp_1)
precision_2 = tp_2 / (tp_2 + fp_2)
cp_i = input_label[:, params_i.index('cloud_probability'), :, :]
_, _, _, mean = get_min_max_std(cp_i)
cp_mean_i = mean[:, slc_y, slc_x]
mcc_0 = ((tp_0 * tn_0) - (fp_0 * fn_0)) / np.sqrt((tp_0 + fp_0) * (tp_0 + fn_0) * (tn_0 + fp_0) * (tn_0 + fn_0))
mcc_1 = ((tp_1 * tn_1) - (fp_1 * fn_1)) / np.sqrt((tp_1 + fp_1) * (tp_1 + fn_1) * (tn_1 + fp_1) * (tn_1 + fn_1))
mcc_2 = ((tp_2 * tn_2) - (fp_2 * fn_2)) / np.sqrt((tp_2 + fp_2) * (tp_2 + fn_2) * (tn_2 + fp_2) * (tn_2 + fn_2))
mean = input_data[:, params.index('cloud_probability'), :, :]
cp_mean_m = mean[:, slc_y, slc_x]
# -----------------------------
acc_0 = np.sum(lbls_0_1 == pred_0_1)/pred_0_1.size
acc_1 = np.sum(lbls_1_2 == pred_1_2)/pred_1_2.size
acc_2 = np.sum(lbls_0_2 == pred_0_2)/pred_0_2.size
opd_i = input_label[:, params_i.index('cld_opd_dcomp'), :, :]
_, _, _, mean = get_min_max_std(opd_i)
opd_mean_i = mean[:, slc_y, slc_x]
print(acc_0, recall_0, precision_0, mcc_0)
print(acc_1, recall_1, precision_1, mcc_1)
print(acc_2, recall_2, precision_2, mcc_2)
mean = input_data[:, params.index('cld_opd_dcomp'), :, :]
opd_mean_m = mean[:, slc_y, slc_x]
return cm_0_1, cm_1_2, cm_0_2, [acc_0, acc_1, acc_2], [recall_0, recall_1, recall_2],\
[precision_0, precision_1, precision_2], [mcc_0, mcc_1, mcc_2], lbls, pred
np.save(outfile, (rmean_i, rmean_m, cp_mean_i, cp_mean_m, opd_mean_i, opd_mean_m, rlo_i, rlo_m, rhi_i, rhi_m, rstd_i, rstd_m))
if __name__ == "__main__":
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment