Skip to content
Snippets Groups Projects
Select Git revision
  • 912ed9f2b4cc6bb494731fd4fc1fae8bf0d8fc9c
  • master default protected
  • use_flight_altitude
  • distribute
4 results

cloud_fraction_fcn_viirs.py

Blame
  • user avatar
    tomrink authored
    3ee5f10b
    History
    cloud_fraction_fcn_viirs.py 38.16 KiB
    import glob
    import tensorflow as tf
    
    from util.plot_cm import confusion_matrix_values
    from util.setup import logdir, modeldir, now, ancillary_path
    from util.util import EarlyStop, normalize, denormalize, get_grid_values_all
    import os, datetime
    import numpy as np
    import pickle
    import h5py
    import xarray as xr
    import gc
    
    AUTOTUNE = tf.data.AUTOTUNE
    
    LOG_DEVICE_PLACEMENT = False
    
    PROC_BATCH_SIZE = 4
    PROC_BATCH_BUFFER_SIZE = 5000
    
    NumClasses = 5
    if NumClasses == 2:
        NumLogits = 1
    else:
        NumLogits = NumClasses
    
    BATCH_SIZE = 128
    NUM_EPOCHS = 80
    
    TRACK_MOVING_AVERAGE = False
    EARLY_STOP = True
    
    NOISE_TRAINING = False
    NOISE_STDDEV = 0.01
    DO_AUGMENT = False
    
    DO_SMOOTH = False
    SIGMA = 1.0
    DO_ZERO_OUT = False
    
    # setup scaling parameters dictionary
    mean_std_dct = {}
    mean_std_file = ancillary_path+'mean_std_lo_hi_l2.pkl'
    f = open(mean_std_file, 'rb')
    mean_std_dct_l2 = pickle.load(f)
    f.close()
    
    mean_std_file = ancillary_path+'mean_std_lo_hi_l1b.pkl'
    f = open(mean_std_file, 'rb')
    mean_std_dct_l1b = pickle.load(f)
    f.close()
    
    mean_std_dct.update(mean_std_dct_l1b)
    mean_std_dct.update(mean_std_dct_l2)
    
    IMG_DEPTH = 1
    
    label_param = 'cloud_probability'
    
    params = ['temp_11_0um_nom', 'refl_0_65um_nom', label_param]
    params_i = ['temp_11_0um_nom', 'refl_0_65um_nom', label_param]
    data_params_half = ['temp_11_0um_nom']
    data_params_full = ['refl_0_65um_nom']
    
    label_idx_i = params_i.index(label_param)
    label_idx = params.index(label_param)
    
    print('data_params_half: ', data_params_half)
    print('data_params_full: ', data_params_full)
    print('label_param: ', label_param)
    
    KERNEL_SIZE = 3  # target size: (128, 128)
    X_LEN = Y_LEN = 128
    
    if KERNEL_SIZE == 3:
        slc_x = slice(1, int(X_LEN/2) + 3)
        slc_y = slice(1, int(Y_LEN/2) + 3)
        x_128 = slice(4, X_LEN + 4)
        y_128 = slice(4, Y_LEN + 4)
    # ----------------------------------------
    
    
    def build_residual_conv2d_block(conv, num_filters, block_name, activation=tf.nn.relu, padding='SAME',
                                    kernel_initializer='he_uniform', scale=None, kernel_size=3,
                                    do_drop_out=True, drop_rate=0.5, do_batch_norm=True):
    
        with tf.name_scope(block_name):
            skip = tf.keras.layers.Conv2D(num_filters, kernel_size=kernel_size, padding=padding, kernel_initializer=kernel_initializer, activation=activation)(conv)
            skip = tf.keras.layers.Conv2D(num_filters, kernel_size=kernel_size, padding=padding, activation=None)(skip)
    
            if scale is not None:
                skip = tf.keras.layers.Lambda(lambda x: x * scale)(skip)
    
            if do_drop_out:
                skip = tf.keras.layers.Dropout(drop_rate)(skip)
    
            if do_batch_norm:
                skip = tf.keras.layers.BatchNormalization()(skip)
    
            conv = conv + skip
            print(block_name+':', conv.shape)
    
        return conv
    
    
    def upsample_mean(grd):
        bsize, ylen, xlen = grd.shape
        up = np.zeros((bsize, ylen*2, xlen*2))
    
        up[:, ::2, ::2] = grd[:, ::2, ::2]
        up[:, 1::2, ::2] = grd[:, ::2, ::2]
        up[:, ::2, 1::2] = grd[:, ::2, ::2]
        up[:, 1::2, 1::2] = grd[:, ::2, ::2]
    
        return up
    
    
    def get_grid_cell_mean(grd_k):
        # grd_k = np.where(np.isnan(grd_k), 0, grd_k)
        a = grd_k[:, 0::2, 0::2]
        b = grd_k[:, 1::2, 0::2]
        c = grd_k[:, 0::2, 1::2]
        d = grd_k[:, 1::2, 1::2]
        mean = np.nanmean([a, b, c, d], axis=0)
    
        return mean
    
    
    def get_min_max_std(grd_k):
        # grd_k = np.where(np.isnan(grd_k), 0, grd_k)
        a = grd_k[:, 0::2, 0::2]
        b = grd_k[:, 1::2, 0::2]
        c = grd_k[:, 0::2, 1::2]
        d = grd_k[:, 1::2, 1::2]
    
        lo = np.nanmin([a, b, c, d], axis=0)
        hi = np.nanmax([a, b, c, d], axis=0)
        std = np.nanstd([a, b, c, d], axis=0)
        avg = np.nanmean([a, b, c, d], axis=0)
    
        return lo, hi, std, avg
    
    
    def get_label_data(grd_k):
        grd_k = np.where(np.isnan(grd_k), 0, grd_k)
        grd_k = np.where(grd_k < 0.50, 0, 1)
    
        a = grd_k[:, 0::2, 0::2]
        b = grd_k[:, 1::2, 0::2]
        c = grd_k[:, 0::2, 1::2]
        d = grd_k[:, 1::2, 1::2]
        s = a + b + c + d
    
        cat_0 = (s == 0)
        cat_1 = np.logical_and(s > 0, s < 4)
        cat_2 = (s == 4)
        s[cat_0] = 0
        s[cat_1] = 1
        s[cat_2] = 2
    
        return s
    
    
    def get_label_data_5cat(grd_k):
        grd_k = np.where(np.isnan(grd_k), 0, grd_k)
        grd_k = np.where(grd_k < 0.5, 0, 1)
    
        a = grd_k[:, 0::2, 0::2]
        b = grd_k[:, 1::2, 0::2]
        c = grd_k[:, 0::2, 1::2]
        d = grd_k[:, 1::2, 1::2]
        s = a + b + c + d
    
        cat_0 = (s == 0)
        cat_1 = (s == 1)
        cat_2 = (s == 2)
        cat_3 = (s == 3)
        cat_4 = (s == 4)
    
        s[cat_0] = 0
        s[cat_1] = 1
        s[cat_2] = 2
        s[cat_3] = 3
        s[cat_4] = 4
    
        return s
    
    
    class SRCNN:
        
        def __init__(self):
    
            self.train_data = None
            self.train_label = None
            self.test_data = None
            self.test_label = None
            self.test_data_denorm = None
            
            self.train_dataset = None
            self.inner_train_dataset = None
            self.test_dataset = None
            self.eval_dataset = None
            self.X_img = None
            self.X_prof = None
            self.X_u = None
            self.X_v = None
            self.X_sfc = None
            self.inputs = []
            self.y = None
            self.handle = None
            self.inner_handle = None
            self.in_mem_batch = None
    
            self.h5f_l1b_trn = None
            self.h5f_l1b_tst = None
            self.h5f_l2_trn = None
            self.h5f_l2_tst = None
    
            self.logits = None
    
            self.predict_data = None
            self.predict_dataset = None
            self.mean_list = None
            self.std_list = None
            
            self.training_op = None
            self.correct = None
            self.accuracy = None
            self.loss = None
            self.pred_class = None
            self.variable_averages = None
    
            self.global_step = None
    
            self.writer_train = None
            self.writer_valid = None
            self.writer_train_valid_loss = None
    
            self.OUT_OF_RANGE = False
    
            self.model = None
            self.optimizer = None
            self.ema = None
            self.train_loss = None
            self.train_accuracy = None
            self.test_loss = None
            self.test_accuracy = None
            self.test_auc = None
            self.test_recall = None
            self.test_precision = None
            self.test_confusion_matrix = None
            self.test_true_pos = None
            self.test_true_neg = None
            self.test_false_pos = None
            self.test_false_neg = None
    
            self.test_labels = []
            self.test_preds = []
            self.test_probs = None
            self.test_input = []
    
            self.learningRateSchedule = None
            self.num_data_samples = None
            self.initial_learning_rate = None
    
            self.data_dct = None
            self.train_data_files = None
            self.train_label_files = None
            self.test_data_files = None
            self.test_label_files = None
    
            # self.n_chans = len(data_params_half) + len(data_params_full) + 1
            self.n_chans = 5
    
            self.X_img = tf.keras.Input(shape=(None, None, self.n_chans))
    
            self.inputs.append(self.X_img)
    
            tf.debugging.set_log_device_placement(LOG_DEVICE_PLACEMENT)
    
        def get_in_mem_data_batch(self, idxs, is_training):
            if is_training:
                data_files = self.train_data_files
                label_files = self.train_label_files
            else:
                data_files = self.test_data_files
                label_files = self.test_label_files
    
            data_s = []
            label_s = []
            for k in idxs:
                f = data_files[k]
                nda = np.load(f)
                data_s.append(nda)
    
                f = label_files[k]
                nda = np.load(f)
                label_s.append(nda)
            input_data = np.concatenate(data_s)
            input_label = np.concatenate(label_s)
    
            data_norm = []
            for param in data_params_half:
                # If next 2 uncommented, take out get_grid_cell_mean
                idx = params.index(param)
                tmp = input_data[:, idx, :, :]
                # idx = params_i.index(param)
                # tmp = input_label[:, idx, :, :]
                # tmp = get_grid_cell_mean(tmp)
                tmp = tmp[:, slc_y, slc_x]
                tmp = normalize(tmp, param, mean_std_dct)
                data_norm.append(tmp)
    
            for param in data_params_full:
                idx = params_i.index(param)
                tmp = input_label[:, idx, :, :]
    
                lo, hi, std, avg = get_min_max_std(tmp)
                lo = normalize(lo, param, mean_std_dct)
                hi = normalize(hi, param, mean_std_dct)
                avg = normalize(avg, param, mean_std_dct)
    
                data_norm.append(lo[:, slc_y, slc_x])
                data_norm.append(hi[:, slc_y, slc_x])
                data_norm.append(avg[:, slc_y, slc_x])
            # ---------------------------------------------------
            # If next uncommented, take out get_grid_cell_mean
            # tmp = input_data[:, label_idx, :, :]
            tmp = input_label[:, label_idx_i, :, :]
            tmp = get_grid_cell_mean(tmp)
            tmp = tmp[:, slc_y, slc_x]
            data_norm.append(tmp)
            # ---------
            data = np.stack(data_norm, axis=3)
            data = data.astype(np.float32)
    
            # -----------------------------------------------------
            # -----------------------------------------------------
            label = input_label[:, label_idx_i, :, :]
            label = label[:, y_128, x_128]
            if NumClasses == 5:
                label = get_label_data_5cat(label)
            else:
                label = get_label_data(label)
    
            label = np.where(np.isnan(label), 0, label)
            label = np.expand_dims(label, axis=3)
    
            data = data.astype(np.float32)
            label = label.astype(np.float32)
    
            if is_training and DO_AUGMENT:
                data_ud = np.flip(data, axis=1)
                label_ud = np.flip(label, axis=1)
    
                data_lr = np.flip(data, axis=2)
                label_lr = np.flip(label, axis=2)
    
                data = np.concatenate([data, data_ud, data_lr])
                label = np.concatenate([label, label_ud, label_lr])
    
            return data, label
    
        def get_in_mem_data_batch_train(self, idxs):
            return self.get_in_mem_data_batch(idxs, True)
    
        def get_in_mem_data_batch_test(self, idxs):
            return self.get_in_mem_data_batch(idxs, False)
    
        @tf.function(input_signature=[tf.TensorSpec(None, tf.int32)])
        def data_function(self, indexes):
            out = tf.numpy_function(self.get_in_mem_data_batch_train, [indexes], [tf.float32, tf.float32])
            return out
    
        @tf.function(input_signature=[tf.TensorSpec(None, tf.int32)])
        def data_function_test(self, indexes):
            out = tf.numpy_function(self.get_in_mem_data_batch_test, [indexes], [tf.float32, tf.float32])
            return out
    
        def get_train_dataset(self, indexes):
            indexes = list(indexes)
    
            dataset = tf.data.Dataset.from_tensor_slices(indexes)
            dataset = dataset.batch(PROC_BATCH_SIZE)
            dataset = dataset.map(self.data_function, num_parallel_calls=AUTOTUNE)
            dataset = dataset.cache()
            if DO_AUGMENT:
                dataset = dataset.shuffle(PROC_BATCH_BUFFER_SIZE)
            dataset = dataset.prefetch(buffer_size=AUTOTUNE)
            self.train_dataset = dataset
    
        def get_test_dataset(self, indexes):
            indexes = list(indexes)
    
            dataset = tf.data.Dataset.from_tensor_slices(indexes)
            dataset = dataset.batch(PROC_BATCH_SIZE)
            dataset = dataset.map(self.data_function_test, num_parallel_calls=AUTOTUNE)
            dataset = dataset.cache()
            self.test_dataset = dataset
    
        def setup_pipeline(self, train_data_files, train_label_files, test_data_files, test_label_files, num_train_samples):
            self.train_data_files = train_data_files
            self.train_label_files = train_label_files
            self.test_data_files = test_data_files
            self.test_label_files = test_label_files
    
            trn_idxs = np.arange(len(train_data_files))
            np.random.shuffle(trn_idxs)
    
            tst_idxs = np.arange(len(test_data_files))
    
            self.get_train_dataset(trn_idxs)
            self.get_test_dataset(tst_idxs)
    
            self.num_data_samples = num_train_samples  # approximately
    
            print('datetime: ', now)
            print('training and test data: ')
            print('---------------------------')
            print('num train samples: ', self.num_data_samples)
            print('BATCH SIZE: ', BATCH_SIZE)
            print('num test samples: ', tst_idxs.shape[0])
            print('setup_pipeline: Done')
    
        def setup_test_pipeline(self, test_data_files, test_label_files):
            self.test_data_files = test_data_files
            self.test_label_files = test_label_files
            tst_idxs = np.arange(len(test_data_files))
            self.get_test_dataset(tst_idxs)
            print('setup_test_pipeline: Done')
    
        def build_srcnn(self, do_drop_out=False, do_batch_norm=False, drop_rate=0.5, factor=2):
            print('build_cnn')
            padding = "SAME"
    
            # activation = tf.nn.relu
            # activation = tf.nn.elu
            activation = tf.nn.relu
            momentum = 0.99
    
            num_filters = 64
    
            input_2d = self.inputs[0]
            print('input: ', input_2d.shape)
    
            conv = conv_b = tf.keras.layers.Conv2D(num_filters, kernel_size=KERNEL_SIZE, kernel_initializer='he_uniform', activation=activation, padding='VALID')(input_2d)
            print(conv.shape)
    
            # if NOISE_TRAINING:
            #     conv = conv_b = tf.keras.layers.GaussianNoise(stddev=NOISE_STDDEV)(conv)
    
            scale = 0.2
    
            conv_b = build_residual_conv2d_block(conv_b, num_filters, 'Residual_Block_1', kernel_size=KERNEL_SIZE, scale=scale)
    
            conv_b = build_residual_conv2d_block(conv_b, num_filters, 'Residual_Block_2', kernel_size=KERNEL_SIZE, scale=scale)
    
            conv_b = build_residual_conv2d_block(conv_b, num_filters, 'Residual_Block_3', kernel_size=KERNEL_SIZE, scale=scale)
    
            conv_b = build_residual_conv2d_block(conv_b, num_filters, 'Residual_Block_4', kernel_size=KERNEL_SIZE, scale=scale)
    
            conv_b = build_residual_conv2d_block(conv_b, num_filters, 'Residual_Block_5', kernel_size=KERNEL_SIZE, scale=scale)
    
            conv_b = build_residual_conv2d_block(conv_b, num_filters, 'Residual_Block_6', kernel_size=KERNEL_SIZE, scale=scale)
    
            conv_b = tf.keras.layers.Conv2D(num_filters, kernel_size=3, strides=1, activation=activation, kernel_initializer='he_uniform', padding=padding)(conv_b)
    
            # conv = conv + conv_b
            conv = conv_b
            print(conv.shape)
    
            if NumClasses == 2:
                final_activation = tf.nn.sigmoid  # For binary
            else:
                final_activation = tf.nn.softmax  # For multi-class
    
            # This is effectively a Dense layer
            self.logits = tf.keras.layers.Conv2D(NumLogits, kernel_size=1, strides=1, padding=padding, activation=final_activation)(conv)
            print(self.logits.shape)
    
        def build_training(self):
            if NumClasses == 2:
                self.loss = tf.keras.losses.BinaryCrossentropy(from_logits=False)  # for two-class only
            else:
                self.loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False)  # For multi-class
            # self.loss = tf.keras.losses.MeanAbsoluteError()  # Regression
    
            # decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps)
            initial_learning_rate = 0.002
            decay_rate = 0.95
            steps_per_epoch = int(self.num_data_samples/BATCH_SIZE)  # one epoch
            decay_steps = int(steps_per_epoch) * 4
            print('initial rate, decay rate, steps/epoch, decay steps: ', initial_learning_rate, decay_rate, steps_per_epoch, decay_steps)
    
            self.learningRateSchedule = tf.keras.optimizers.schedules.ExponentialDecay(initial_learning_rate, decay_steps, decay_rate)
    
            optimizer = tf.keras.optimizers.Adam(learning_rate=self.learningRateSchedule)
    
            if TRACK_MOVING_AVERAGE:
                # Not sure that this works properly (from tfa)
                # optimizer = tfa.optimizers.MovingAverage(optimizer)
                self.ema = tf.train.ExponentialMovingAverage(decay=0.9999)
    
            self.optimizer = optimizer
            self.initial_learning_rate = initial_learning_rate
    
        def build_evaluation(self):
            self.train_loss = tf.keras.metrics.Mean(name='train_loss')
            self.test_loss = tf.keras.metrics.Mean(name='test_loss')
    
            if NumClasses == 2:
                self.train_accuracy = tf.keras.metrics.BinaryAccuracy(name='train_accuracy')
                self.test_accuracy = tf.keras.metrics.BinaryAccuracy(name='test_accuracy')
                self.test_auc = tf.keras.metrics.AUC(name='test_auc')
                self.test_recall = tf.keras.metrics.Recall(name='test_recall')
                self.test_precision = tf.keras.metrics.Precision(name='test_precision')
                self.test_true_neg = tf.keras.metrics.TrueNegatives(name='test_true_neg')
                self.test_true_pos = tf.keras.metrics.TruePositives(name='test_true_pos')
                self.test_false_neg = tf.keras.metrics.FalseNegatives(name='test_false_neg')
                self.test_false_pos = tf.keras.metrics.FalsePositives(name='test_false_pos')
            else:
                self.train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
                self.test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy')
    
        @tf.function(input_signature=[tf.TensorSpec(None, tf.float32), tf.TensorSpec(None, tf.float32)])
        def train_step(self, inputs, labels):
            labels = tf.squeeze(labels, axis=[3])
            with tf.GradientTape() as tape:
                pred = self.model([inputs], training=True)
                loss = self.loss(labels, pred)
                total_loss = loss
                if len(self.model.losses) > 0:
                    reg_loss = tf.math.add_n(self.model.losses)
                    total_loss = loss + reg_loss
            gradients = tape.gradient(total_loss, self.model.trainable_variables)
            self.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables))
            if TRACK_MOVING_AVERAGE:
                self.ema.apply(self.model.trainable_variables)
    
            self.train_loss(loss)
            self.train_accuracy(labels, pred)
    
            return loss
    
        @tf.function(input_signature=[tf.TensorSpec(None, tf.float32), tf.TensorSpec(None, tf.float32)])
        def test_step(self, inputs, labels):
            labels = tf.squeeze(labels, axis=[3])
            pred = self.model([inputs], training=False)
            t_loss = self.loss(labels, pred)
    
            self.test_loss(t_loss)
            self.test_accuracy(labels, pred)
    
        # @tf.function(input_signature=[tf.TensorSpec(None, tf.float32), tf.TensorSpec(None, tf.float32)])
        # decorator commented out because pred.numpy(): pred not evaluated yet.
        def predict(self, inputs, labels):
            pred = self.model([inputs], training=False)
            # t_loss = self.loss(tf.squeeze(labels, axis=[3]), pred)
            t_loss = self.loss(labels, pred)
    
            self.test_labels.append(labels)
            self.test_preds.append(pred.numpy())
            self.test_input.append(inputs)
    
            self.test_loss(t_loss)
            self.test_accuracy(labels, pred)
    
        def reset_test_metrics(self):
            self.test_loss.reset_states()
            self.test_accuracy.reset_states()
    
        def get_metrics(self):
            recall = self.test_recall.result()
            precsn = self.test_precision.result()
            f1 = 2 * (precsn * recall) / (precsn + recall)
    
            tn = self.test_true_neg.result()
            tp = self.test_true_pos.result()
            fn = self.test_false_neg.result()
            fp = self.test_false_pos.result()
    
            mcc = ((tp * tn) - (fp * fn)) / np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
            return f1, mcc
    
        def do_training(self, ckpt_dir=None):
    
            if ckpt_dir is None:
                if not os.path.exists(modeldir):
                    os.mkdir(modeldir)
                ckpt = tf.train.Checkpoint(step=tf.Variable(1), model=self.model)
                ckpt_manager = tf.train.CheckpointManager(ckpt, modeldir, max_to_keep=3)
            else:
                ckpt = tf.train.Checkpoint(step=tf.Variable(1), model=self.model)
                ckpt_manager = tf.train.CheckpointManager(ckpt, ckpt_dir, max_to_keep=3)
                ckpt.restore(ckpt_manager.latest_checkpoint)
    
            self.writer_train = tf.summary.create_file_writer(os.path.join(logdir, 'plot_train'))
            self.writer_valid = tf.summary.create_file_writer(os.path.join(logdir, 'plot_valid'))
            self.writer_train_valid_loss = tf.summary.create_file_writer(os.path.join(logdir, 'plot_train_valid_loss'))
    
            step = 0
            total_time = 0
            best_test_loss = np.finfo(dtype=np.float).max
    
            if EARLY_STOP:
                es = EarlyStop()
    
            for epoch in range(NUM_EPOCHS):
                self.train_loss.reset_states()
                self.train_accuracy.reset_states()
    
                t0 = datetime.datetime.now().timestamp()
    
                proc_batch_cnt = 0
                n_samples = 0
    
                for data, label in self.train_dataset:
                    trn_ds = tf.data.Dataset.from_tensor_slices((data, label))
                    trn_ds = trn_ds.batch(BATCH_SIZE)
                    for mini_batch in trn_ds:
                        if self.learningRateSchedule is not None:
                            loss = self.train_step(mini_batch[0], mini_batch[1])
    
                        if (step % 100) == 0:
    
                            with self.writer_train.as_default():
                                tf.summary.scalar('loss_trn', loss.numpy(), step=step)
                                tf.summary.scalar('learning_rate', self.optimizer._decayed_lr('float32').numpy(), step=step)
                                tf.summary.scalar('num_train_steps', step, step=step)
                                tf.summary.scalar('num_epochs', epoch, step=step)
    
                            self.reset_test_metrics()
                            for data_tst, label_tst in self.test_dataset:
                                tst_ds = tf.data.Dataset.from_tensor_slices((data_tst, label_tst))
                                tst_ds = tst_ds.batch(BATCH_SIZE)
                                for mini_batch_test in tst_ds:
                                    self.test_step(mini_batch_test[0], mini_batch_test[1])
    
                            with self.writer_valid.as_default():
                                tf.summary.scalar('loss_val', self.test_loss.result(), step=step)
                                tf.summary.scalar('acc_val', self.test_accuracy.result(), step=step)
    
                            with self.writer_train_valid_loss.as_default():
                                tf.summary.scalar('loss_trn', loss.numpy(), step=step)
                                tf.summary.scalar('loss_val', self.test_loss.result(), step=step)
    
                            print('****** test loss, acc, lr: ', self.test_loss.result().numpy(), self.test_accuracy.result().numpy(),
                                  self.optimizer._decayed_lr('float32').numpy())
    
                        step += 1
                        print('train loss: ', loss.numpy())
    
                    proc_batch_cnt += 1
                    n_samples += data.shape[0]
                    print('proc_batch_cnt: ', proc_batch_cnt, n_samples)
    
                t1 = datetime.datetime.now().timestamp()
                print('End of Epoch: ', epoch+1, 'elapsed time: ', (t1-t0))
                total_time += (t1-t0)
    
                self.reset_test_metrics()
                for data, label in self.test_dataset:
                    ds = tf.data.Dataset.from_tensor_slices((data, label))
                    ds = ds.batch(BATCH_SIZE)
                    for mini_batch in ds:
                        self.test_step(mini_batch[0], mini_batch[1])
    
                print('loss, acc: ', self.test_loss.result().numpy(), self.test_accuracy.result().numpy())
                print('------------------------------------------------------')
    
                tst_loss = self.test_loss.result().numpy()
                if tst_loss < best_test_loss:
                    best_test_loss = tst_loss
                    ckpt_manager.save()
    
                if EARLY_STOP and es.check_stop(tst_loss):
                    break
    
            print('total time: ', total_time)
            self.writer_train.close()
            self.writer_valid.close()
            self.writer_train_valid_loss.close()
    
        def build_model(self):
            self.build_srcnn()
            self.model = tf.keras.Model(self.inputs, self.logits)
    
        def restore(self, ckpt_dir):
    
            ckpt = tf.train.Checkpoint(step=tf.Variable(1), model=self.model)
            ckpt_manager = tf.train.CheckpointManager(ckpt, ckpt_dir, max_to_keep=3)
            ckpt.restore(ckpt_manager.latest_checkpoint)
    
            self.reset_test_metrics()
    
            for data, label in self.test_dataset:
                ds = tf.data.Dataset.from_tensor_slices((data, label))
                ds = ds.batch(BATCH_SIZE)
                for mini_batch_test in ds:
                    self.predict(mini_batch_test[0], mini_batch_test[1])
    
            print('loss, acc: ', self.test_loss.result().numpy(), self.test_accuracy.result().numpy())
    
            labels = np.concatenate(self.test_labels)
            preds = np.concatenate(self.test_preds)
            inputs = np.concatenate(self.test_input)
            print(labels.shape, preds.shape)
    
            return labels, preds, inputs
    
        def do_evaluate(self, inputs, ckpt_dir):
    
            ckpt = tf.train.Checkpoint(step=tf.Variable(1), model=self.model)
            ckpt_manager = tf.train.CheckpointManager(ckpt, ckpt_dir, max_to_keep=3)
            ckpt.restore(ckpt_manager.latest_checkpoint)
    
            self.reset_test_metrics()
    
            pred = self.model([inputs], training=False)
            self.test_probs = pred
            pred = pred.numpy()
    
            return pred
    
        def run(self, directory, ckpt_dir=None, num_data_samples=50000):
            train_data_files = glob.glob(directory+'train*mres*.npy')
            valid_data_files = glob.glob(directory+'valid*mres*.npy')
            train_label_files = glob.glob(directory+'train*ires*.npy')
            valid_label_files = glob.glob(directory+'valid*ires*.npy')
            self.setup_pipeline(train_data_files, train_label_files, valid_data_files, valid_label_files, num_data_samples)
    
            self.build_model()
            self.build_training()
            self.build_evaluation()
            self.do_training(ckpt_dir=ckpt_dir)
    
        def run_restore(self, directory, ckpt_dir):
            self.num_data_samples = 1000
    
            valid_data_files = glob.glob(directory + 'valid*mres*.npy')
            valid_label_files = glob.glob(directory + 'valid*ires*.npy')
            self.setup_test_pipeline(valid_data_files, valid_label_files)
    
            self.build_model()
            self.build_training()
            self.build_evaluation()
            return self.restore(ckpt_dir)
    
        def run_evaluate(self, data, ckpt_dir):
            # data = tf.convert_to_tensor(data, dtype=tf.float32)
            self.num_data_samples = 80000
            self.build_model()
            self.build_training()
            self.build_evaluation()
            return self.do_evaluate(data, ckpt_dir)
    
    
    def run_restore_static(directory, ckpt_dir, out_file=None):
        nn = SRCNN()
        labels, preds, inputs = nn.run_restore(directory, ckpt_dir)
        if out_file is not None:
            np.save(out_file,
                    [np.squeeze(labels), preds.argmax(axis=3),
                     denormalize(inputs[:, 1:65, 1:65, 0], 'temp_11_0um_nom', mean_std_dct),
                     denormalize(inputs[:, 1:65, 1:65, 1], 'refl_0_65um_nom', mean_std_dct),
                     denormalize(inputs[:, 1:65, 1:65, 2], 'refl_0_65um_nom', mean_std_dct),
                     denormalize(inputs[:, 1:65, 1:65, 3], 'refl_0_65um_nom', mean_std_dct),
                     inputs[:, 1:65, 1:65, 4]])
    
    
    def run_evaluate_static(in_file, out_file, ckpt_dir):
        gc.collect()
    
        h5f = h5py.File(in_file, 'r')
    
        bt = get_grid_values_all(h5f, 'orig/temp_11_0um')
        y_len, x_len = bt.shape[0], bt.shape[1]
        lons = get_grid_values_all(h5f, 'orig/longitude')
        lats = get_grid_values_all(h5f, 'orig/latitude')
        bt = np.where(np.isnan(bt), 0, bt)
        bt = normalize(bt, 'temp_11_0um_nom', mean_std_dct)
    
        refl = get_grid_values_all(h5f, 'super/refl_0_65um')
        refl = np.where(np.isnan(refl), 0, refl)
        refl = np.expand_dims(refl, axis=0)
        refl_lo, refl_hi, refl_std, refl_avg = get_min_max_std(refl)
        refl_lo = normalize(refl_lo, 'refl_0_65um_nom', mean_std_dct)
        refl_hi = normalize(refl_hi, 'refl_0_65um_nom', mean_std_dct)
        refl_avg = normalize(refl_avg, 'refl_0_65um_nom', mean_std_dct)
        refl_lo = np.squeeze(refl_lo)
        refl_hi = np.squeeze(refl_hi)
        refl_avg = np.squeeze(refl_avg)
    
        cp = get_grid_values_all(h5f, 'orig/'+label_param)
        cp = np.where(np.isnan(cp), 0, cp)
    
        data = np.stack([bt, refl_lo, refl_hi, refl_avg, cp], axis=2)
        data = np.expand_dims(data, axis=0)
    
        h5f.close()
    
        nn = SRCNN()
        probs = nn.run_evaluate(data, ckpt_dir)
        cld_frac = probs.argmax(axis=3)
        cld_frac = cld_frac.astype(np.int8)
        cld_frac_out = np.zeros((y_len, x_len), dtype=np.int8)
        border = int((KERNEL_SIZE - 1)/2)
        cld_frac_out[border:y_len - border, border:x_len - border] = cld_frac[0, :, :]
    
        bt = denormalize(bt, 'temp_11_0um_nom', mean_std_dct)
        refl_avg = denormalize(refl_avg, 'refl_0_65um_nom', mean_std_dct)
    
        var_names = ['cloud_fraction', 'temp_11_0um', 'refl_0_65um']
        dims = ['num_params', 'y', 'x']
    
        da = xr.DataArray(np.stack([cld_frac_out, bt, refl_avg], axis=0), dims=dims)
        da.assign_coords({
            'num_params': var_names,
            'lat': (['y', 'x'], lats),
            'lon': (['y', 'x'], lons)
        })
    
        if out_file is not None:
            np.save(out_file, (cld_frac_out, bt, refl_avg, cp, lons, lats))
        else:
            return [cld_frac_out, bt, refl_avg, cp, lons, lats]
    
    
    def analyze_3cat(file):
    
        tup = np.load(file, allow_pickle=True)
        lbls = tup[0]
        pred = tup[1]
    
        lbls = lbls.flatten()
        pred = pred.flatten()
        print(np.sum(lbls == 0), np.sum(lbls == 1), np.sum(lbls == 2))
    
        msk_0_1 = lbls != 2
        msk_1_2 = lbls != 0
        msk_0_2 = lbls != 1
    
        lbls_0_1 = lbls[msk_0_1]
    
        pred_0_1 = pred[msk_0_1]
        pred_0_1 = np.where(pred_0_1 == 2, 1, pred_0_1)
    
        # ----
        lbls_1_2 = lbls[msk_1_2]
        lbls_1_2 = np.where(lbls_1_2 == 1, 0, lbls_1_2)
        lbls_1_2 = np.where(lbls_1_2 == 2, 1, lbls_1_2)
    
        pred_1_2 = pred[msk_1_2]
        pred_1_2 = np.where(pred_1_2 == 0, -9, pred_1_2)
        pred_1_2 = np.where(pred_1_2 == 1, 0, pred_1_2)
        pred_1_2 = np.where(pred_1_2 == 2, 1, pred_1_2)
        pred_1_2 = np.where(pred_1_2 == -9, 1, pred_1_2)
    
        # ----
        lbls_0_2 = lbls[msk_0_2]
        lbls_0_2 = np.where(lbls_0_2 == 2, 1, lbls_0_2)
    
        pred_0_2 = pred[msk_0_2]
        pred_0_2 = np.where(pred_0_2 == 2, 1, pred_0_2)
    
        cm_0_1 = confusion_matrix_values(lbls_0_1, pred_0_1)
        cm_1_2 = confusion_matrix_values(lbls_1_2, pred_1_2)
        cm_0_2 = confusion_matrix_values(lbls_0_2, pred_0_2)
    
        true_0_1 = (lbls_0_1 == 0) & (pred_0_1 == 0)
        false_0_1 = (lbls_0_1 == 1) & (pred_0_1 == 0)
    
        true_no_0_1 = (lbls_0_1 == 1) & (pred_0_1 == 1)
        false_no_0_1 = (lbls_0_1 == 0) & (pred_0_1 == 1)
    
        true_0_2 = (lbls_0_2 == 0) & (pred_0_2 == 0)
        false_0_2 = (lbls_0_2 == 1) & (pred_0_2 == 0)
    
        true_no_0_2 = (lbls_0_2 == 1) & (pred_0_2 == 1)
        false_no_0_2 = (lbls_0_2 == 0) & (pred_0_2 == 1)
    
        true_1_2 = (lbls_1_2 == 0) & (pred_1_2 == 0)
        false_1_2 = (lbls_1_2 == 1) & (pred_1_2 == 0)
    
        true_no_1_2 = (lbls_1_2 == 1) & (pred_1_2 == 1)
        false_no_1_2 = (lbls_1_2 == 0) & (pred_1_2 == 1)
    
        tp_0 = np.sum(true_0_1).astype(np.float64)
        tp_1 = np.sum(true_1_2).astype(np.float64)
        tp_2 = np.sum(true_0_2).astype(np.float64)
    
        tn_0 = np.sum(true_no_0_1).astype(np.float64)
        tn_1 = np.sum(true_no_1_2).astype(np.float64)
        tn_2 = np.sum(true_no_0_2).astype(np.float64)
    
        fp_0 = np.sum(false_0_1).astype(np.float64)
        fp_1 = np.sum(false_1_2).astype(np.float64)
        fp_2 = np.sum(false_0_2).astype(np.float64)
    
        fn_0 = np.sum(false_no_0_1).astype(np.float64)
        fn_1 = np.sum(false_no_1_2).astype(np.float64)
        fn_2 = np.sum(false_no_0_2).astype(np.float64)
    
        recall_0 = tp_0 / (tp_0 + fn_0)
        recall_1 = tp_1 / (tp_1 + fn_1)
        recall_2 = tp_2 / (tp_2 + fn_2)
    
        precision_0 = tp_0 / (tp_0 + fp_0)
        precision_1 = tp_1 / (tp_1 + fp_1)
        precision_2 = tp_2 / (tp_2 + fp_2)
    
        mcc_0 = ((tp_0 * tn_0) - (fp_0 * fn_0)) / np.sqrt((tp_0 + fp_0) * (tp_0 + fn_0) * (tn_0 + fp_0) * (tn_0 + fn_0))
        mcc_1 = ((tp_1 * tn_1) - (fp_1 * fn_1)) / np.sqrt((tp_1 + fp_1) * (tp_1 + fn_1) * (tn_1 + fp_1) * (tn_1 + fn_1))
        mcc_2 = ((tp_2 * tn_2) - (fp_2 * fn_2)) / np.sqrt((tp_2 + fp_2) * (tp_2 + fn_2) * (tn_2 + fp_2) * (tn_2 + fn_2))
    
        acc_0 = np.sum(lbls_0_1 == pred_0_1)/pred_0_1.size
        acc_1 = np.sum(lbls_1_2 == pred_1_2)/pred_1_2.size
        acc_2 = np.sum(lbls_0_2 == pred_0_2)/pred_0_2.size
    
        print(acc_0, recall_0, precision_0, mcc_0)
        print(acc_1, recall_1, precision_1, mcc_1)
        print(acc_2, recall_2, precision_2, mcc_2)
    
        return cm_0_1, cm_1_2, cm_0_2, [acc_0, acc_1, acc_2], [recall_0, recall_1, recall_2],\
            [precision_0, precision_1, precision_2], [mcc_0, mcc_1, mcc_2]
    
    
    def analyze_5cat(file):
    
        tup = np.load(file, allow_pickle=True)
        lbls = tup[0]
        pred = tup[1]
    
        lbls = lbls.flatten()
        pred = pred.flatten()
        np.histogram(lbls, bins=5)
        np.histogram(pred, bins=5)
    
        new_lbls = np.zeros(lbls.size, dtype=np.int32)
        new_pred = np.zeros(pred.size, dtype=np.int32)
    
        new_lbls[lbls == 0] = 0
        new_lbls[lbls == 1] = 1
        new_lbls[lbls == 2] = 1
        new_lbls[lbls == 3] = 1
        new_lbls[lbls == 4] = 2
    
        new_pred[pred == 0] = 0
        new_pred[pred == 1] = 1
        new_pred[pred == 2] = 1
        new_pred[pred == 3] = 1
        new_pred[pred == 4] = 2
    
        np.histogram(new_lbls, bins=3)
        np.histogram(new_pred, bins=3)
    
        lbls = new_lbls
        pred = new_pred
    
        print(np.sum(lbls == 0), np.sum(lbls == 1), np.sum(lbls == 2))
    
        msk_0_1 = lbls != 2
        msk_1_2 = lbls != 0
        msk_0_2 = lbls != 1
    
        lbls_0_1 = lbls[msk_0_1]
    
        pred_0_1 = pred[msk_0_1]
        pred_0_1 = np.where(pred_0_1 == 2, 1, pred_0_1)
    
        # ----------------------------------------------
        lbls_1_2 = lbls[msk_1_2]
        lbls_1_2 = np.where(lbls_1_2 == 1, 0, lbls_1_2)
        lbls_1_2 = np.where(lbls_1_2 == 2, 1, lbls_1_2)
    
        pred_1_2 = pred[msk_1_2]
        pred_1_2 = np.where(pred_1_2 == 0, -9, pred_1_2)
        pred_1_2 = np.where(pred_1_2 == 1, 0, pred_1_2)
        pred_1_2 = np.where(pred_1_2 == 2, 1, pred_1_2)
        pred_1_2 = np.where(pred_1_2 == -9, 1, pred_1_2)
    
        # -----------------------------------------------
        lbls_0_2 = lbls[msk_0_2]
        lbls_0_2 = np.where(lbls_0_2 == 2, 1, lbls_0_2)
    
        pred_0_2 = pred[msk_0_2]
        pred_0_2 = np.where(pred_0_2 == 2, 1, pred_0_2)
    
        cm_0_1 = confusion_matrix_values(lbls_0_1, pred_0_1)
        cm_1_2 = confusion_matrix_values(lbls_1_2, pred_1_2)
        cm_0_2 = confusion_matrix_values(lbls_0_2, pred_0_2)
    
        true_0_1 = (lbls_0_1 == 0) & (pred_0_1 == 0)
        false_0_1 = (lbls_0_1 == 1) & (pred_0_1 == 0)
    
        true_no_0_1 = (lbls_0_1 == 1) & (pred_0_1 == 1)
        false_no_0_1 = (lbls_0_1 == 0) & (pred_0_1 == 1)
    
        true_0_2 = (lbls_0_2 == 0) & (pred_0_2 == 0)
        false_0_2 = (lbls_0_2 == 1) & (pred_0_2 == 0)
    
        true_no_0_2 = (lbls_0_2 == 1) & (pred_0_2 == 1)
        false_no_0_2 = (lbls_0_2 == 0) & (pred_0_2 == 1)
    
        true_1_2 = (lbls_1_2 == 0) & (pred_1_2 == 0)
        false_1_2 = (lbls_1_2 == 1) & (pred_1_2 == 0)
    
        true_no_1_2 = (lbls_1_2 == 1) & (pred_1_2 == 1)
        false_no_1_2 = (lbls_1_2 == 0) & (pred_1_2 == 1)
    
        tp_0 = np.sum(true_0_1).astype(np.float64)
        tp_1 = np.sum(true_1_2).astype(np.float64)
        tp_2 = np.sum(true_0_2).astype(np.float64)
    
        tn_0 = np.sum(true_no_0_1).astype(np.float64)
        tn_1 = np.sum(true_no_1_2).astype(np.float64)
        tn_2 = np.sum(true_no_0_2).astype(np.float64)
    
        fp_0 = np.sum(false_0_1).astype(np.float64)
        fp_1 = np.sum(false_1_2).astype(np.float64)
        fp_2 = np.sum(false_0_2).astype(np.float64)
    
        fn_0 = np.sum(false_no_0_1).astype(np.float64)
        fn_1 = np.sum(false_no_1_2).astype(np.float64)
        fn_2 = np.sum(false_no_0_2).astype(np.float64)
    
        recall_0 = tp_0 / (tp_0 + fn_0)
        recall_1 = tp_1 / (tp_1 + fn_1)
        recall_2 = tp_2 / (tp_2 + fn_2)
    
        precision_0 = tp_0 / (tp_0 + fp_0)
        precision_1 = tp_1 / (tp_1 + fp_1)
        precision_2 = tp_2 / (tp_2 + fp_2)
    
        mcc_0 = ((tp_0 * tn_0) - (fp_0 * fn_0)) / np.sqrt((tp_0 + fp_0) * (tp_0 + fn_0) * (tn_0 + fp_0) * (tn_0 + fn_0))
        mcc_1 = ((tp_1 * tn_1) - (fp_1 * fn_1)) / np.sqrt((tp_1 + fp_1) * (tp_1 + fn_1) * (tn_1 + fp_1) * (tn_1 + fn_1))
        mcc_2 = ((tp_2 * tn_2) - (fp_2 * fn_2)) / np.sqrt((tp_2 + fp_2) * (tp_2 + fn_2) * (tn_2 + fp_2) * (tn_2 + fn_2))
    
        acc_0 = np.sum(lbls_0_1 == pred_0_1)/pred_0_1.size
        acc_1 = np.sum(lbls_1_2 == pred_1_2)/pred_1_2.size
        acc_2 = np.sum(lbls_0_2 == pred_0_2)/pred_0_2.size
    
        print(acc_0, recall_0, precision_0, mcc_0)
        print(acc_1, recall_1, precision_1, mcc_1)
        print(acc_2, recall_2, precision_2, mcc_2)
    
        return cm_0_1, cm_1_2, cm_0_2, [acc_0, acc_1, acc_2], [recall_0, recall_1, recall_2],\
            [precision_0, precision_1, precision_2], [mcc_0, mcc_1, mcc_2], lbls, pred
    
    # from util.plot_cm import *
    # from sklearn.metrics import confusion_matrix
    # import numpy as np
    # tup = np.load('/Users/tomrink/cld_frac_viirs.npy', allow_pickle=True)
    # lbls = tup[0]
    # pred = tup[1]
    # bt = tup[2]
    # refl_lo = tup[3]
    # refl_hi = tup[4]
    # refl_avg = tup[5]
    # cld_prob = tup[6]
    # from util.plot import plot_image
    # cm = confusion_matrix(lbls.flatten(), pred.flatten())
    # plot_confusion_matrix(cm, ['CLR', '1/4', '1/2', '3/4', 'CLD'], normalize=True, axis=0)
    
    # lbls = lbls.flatten()
    # pred = pred.flatten()
    # cld_prob = cld_prob.flatten()
    # cat_0 = lbls == 0
    # cat_1 = lbls == 1
    # cat_2 = lbls == 2
    # cat_3 = lbls == 3
    # cat_4 = lbls == 4
    # plt.hist(cld_prob[cat_0], log=True, histtype='step')
    # plt.hist(cld_prob[cat_1], log=True, histtype='step')
    # plt.hist(cld_prob[cat_2], log=True, histtype='step')
    # plt.hist(cld_prob[cat_3], log=True, histtype='step')
    # plt.hist(cld_prob[cat_4], log=True, histtype='step')
    
    # from deeplearning.cloud_fraction_fcn_viirs import run_evaluate_static
    # run_evaluate_static('/Users/tomrink/clavrx_VNP02IMG.A2019306.1912.001.2019307003236.uwssec.nc',
    # '/Users/tomrink/cld_frac_A2019306.1912', '/Users/tomrink/tf_model_cld_frac_viirs/run-20230421193944/')
    # import numpy as np
    # tup = np.load('/Users/tomrink/cld_frac_A2019306.1912.npy', allow_pickle=True)
    # cfrac = tup[0]
    # bt = tup[1]
    # refl = tup[2]
    # cp = tup[3]
    # from util.plot import plot_image
    
    # from deeplearning.cloud_fraction_fcn_viirs import analyze_5cat
    # cm_0_1, cm_1_2, cm_0_2, acc, recall, prec, mcc, lbls, pred = analyze_5cat('/Users/tomrink/cld_frac_viirs.npy')
    # from util.bar_plot import do_plot
    # do_plot(['ACC', 'RECALL', 'PREC', 'MCC'], [[acc[0], recall[0], prec[0], mcc[0]],
    #                                            [acc[1], recall[1], prec[1], mcc[1]],
    #                                            [acc[2], recall[2], prec[2], mcc[2]]],
    #         ['CLR v MIX', 'MIX v CLD', 'CLR v CLD'], ['green', 'blue', 'black'],
    #         title='CLD FRAC', xlabel='Metric', barWidth=0.15, ylim=[0.4, 1.0])
    
    
    if __name__ == "__main__":
        nn = SRCNN()
        nn.run('matchup_filename')