diff --git a/aosstower/level_00/influxdb.py b/aosstower/level_00/influxdb.py
index c03fb6af10c82dabb7fcc8cd6ef766d97e014f85..2fb1689af0f3675313308f19266f8e3fb16b8892 100644
--- a/aosstower/level_00/influxdb.py
+++ b/aosstower/level_00/influxdb.py
@@ -70,55 +70,24 @@ class Updater(object):
     def rolling_average(self, record):
         self.start_time = self.start_time if self.start_time else record['timestamp']
         time_interval = (record['timestamp'] - self.start_time).total_seconds()
-        # Add in 5 minutes for cutoff
-        start = record['timestamp'] - timedelta(seconds=time_interval % 300)
-        end = self.start_time + timedelta(minutes=5)
-        if time_interval > 300:
-            for key in record:
-                if key == 'timestamp':
-                    self.data[key] = np.append(self.data[key], end)
-                else:
-                    self.data[key] = np.append(self.data[key], np.nan)
-        else:
-            for key in record:
-                if self.data.get(key) is None:
-                    self.data[key] = np.array([])
-                self.data[key] = np.append(self.data[key], record[key])
+        for key in record:
+            if self.data.get(key) is None:
+                self.data[key] = np.array([])
+            self.data[key] = np.append(self.data[key], record[key])
         # If 5 minutes of data are ready, average current data in dict. Holds up to 15 minutes.
         # 60 * 5 seconds = 5 minutes.
-        if time_interval >= 300:
+        if time_interval >= 300 or (record['timestamp'].minute % 5 == 0 and record['timestamp'].second == 0):
             # Appending to a DataFrame is slow. Instead, this adds to a dict in chunks and passes it to the DataFrame.
             frame = self._calculate_averages()
             frame.fillna(value=np.nan, inplace=True)
-            print(frame['rel_hum'])
-            print(frame.resample('5T', closed='right').mean()['rel_hum'])
-            print(frame.asfreq('5T')['rel_hum'])
             # Keep data set within minimum window to improve speed.
             # Wind gusts looks at 10 minute intervals, including the first data point which needs 2 minutes of data
             # before it, totalling 12 minutes.
-            test = frame.asfreq('5T')
-            if len(test.index) > 3:
-                new_frame = frame.copy()
-                new_frame['timestamp'] = list(new_frame.index)
-                data = new_frame.drop(new_frame.index[:new_frame.index.get_loc(test.index[-3])]).to_dict('list')
-                for key in self.data:
-                    self.data[key] = data[key]
-            if time_interval > 300:
-                for key in record:
-                    if key == 'timestamp':
-                        if end != start:
-                            self.data[key] = np.array([start])
-                    else:
-                        if end != start:
-                            self.data[key] = np.array([np.nan])
-                self.start_time = self.data['timestamp'][-1]
-                for key in record:
-                    if self.data.get(key) is None:
-                        self.data[key] = np.array([])
-                    self.data[key] = np.append(self.data[key], record[key])
-            else:
-                self.start_time = self.data['timestamp'][-1]
-            return test
+            time_mask = frame.index > frame.index[-1] - timedelta(minutes=12)
+            self.data = {key: val[time_mask] for key, val in self.data.items()}
+            self.start_time = self.data['timestamp'][-1]
+            if record['timestamp'].minute % 5 == 0 and record['timestamp'].second == 0:
+                return frame
 
     def _calculate_averages(self):
         KNOTS_9 = calc.knots_to_mps(9.)