Skip to content
Snippets Groups Projects
Commit 82a04e31 authored by tomrink's avatar tomrink
Browse files

minor

parent b828a3d7
Branches
No related tags found
No related merge requests found
...@@ -217,17 +217,17 @@ class IcingIntensityNN: ...@@ -217,17 +217,17 @@ class IcingIntensityNN:
tf.debugging.set_log_device_placement(LOG_DEVICE_PLACEMENT) tf.debugging.set_log_device_placement(LOG_DEVICE_PLACEMENT)
gpus = tf.config.experimental.list_physical_devices('GPU') # gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus: # if gpus:
try: # try:
# Currently, memory growth needs to be the same across GPUs # # Currently, memory growth needs to be the same across GPUs
for gpu in gpus: # for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True) # tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU') # logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs") # print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e: # except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized # # Memory growth must be set before GPUs have been initialized
print(e) # print(e)
def get_in_mem_data_batch(self, idxs, is_training): def get_in_mem_data_batch(self, idxs, is_training):
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please to comment