diff --git a/modules/GSOC/.deepsource.toml b/modules/GSOC/.deepsource.toml
new file mode 100644
index 0000000000000000000000000000000000000000..e6c1240dfe995df2a66f553485aa93d54e0f386d
--- /dev/null
+++ b/modules/GSOC/.deepsource.toml
@@ -0,0 +1,6 @@
+version = 1
+[[analyzers]]
+name = "python"
+enabled = true
+  [analyzers.meta]
+  runtime_version = "3.x.x"
diff --git a/modules/GSOC/E1_ShuffleNet/README.md b/modules/GSOC/E1_ShuffleNet/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..af41fca00801bcd2a74402bb77beed62fb7fdd3c
--- /dev/null
+++ b/modules/GSOC/E1_ShuffleNet/README.md
@@ -0,0 +1,29 @@
+ShuffleNet ONNX to Tensorflow Hub Module Export
+-------------------------------------------------
+**Original Repsitory for ONNX Model**: https://github.com/onnx/models/tree/master/shufflenet
+
+**Description**: Shufflenet is a Deep Convolutional Network for Classification
+
+**Original Paper**: [ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices](https://arxiv.org/abs/1707.01083)
+
+## Module Properties
+1. Load as **Channel First**
+2. Input Shape: [1, 3, 224, 224]
+2. Output Shape: [1, 1000]
+3. Download Size: 5.3 MB
+
+## Steps
+- `pip install -r requirements.txt`
+- `python3 export.py`
+
+ The Tensorflow Hub Module is Exported as *onnx/shufflenet/1*
+
+## Load and Use Model
+
+ ```python3
+ import tensorflow_hub as hub
+ module = hub.Module("onnx/shufflenet/1")
+ result = module(...)
+ ```
+ ## Important
+ The module produced is **not** functional. Check issue, https://github.com/captain-pool/GSOC/issues/3
diff --git a/modules/GSOC/E1_ShuffleNet/export.py b/modules/GSOC/E1_ShuffleNet/export.py
new file mode 100644
index 0000000000000000000000000000000000000000..3363f02f1a7d859512cd2bd3d09a5b636e021b96
--- /dev/null
+++ b/modules/GSOC/E1_ShuffleNet/export.py
@@ -0,0 +1,80 @@
+# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import onnx
+from onnx_tf.backend import prepare
+import tensorflow as tf
+import tensorflow_hub as hub
+import os
+import io
+import requests
+from tqdm import tqdm
+import tarfile
+
+DOWNLOAD_LINK = "https://s3.amazonaws.com/download.onnx/models/opset_8/shufflenet.tar.gz"
+SHUFFLENET_PB = "shufflenet.pb"
+
+
+def load_shufflenet():
+  # Download Shufflenet if it doesn't exist
+  if not os.path.exists(DOWNLOAD_LINK.split("/")[-1]):
+    response = requests.get(DOWNLOAD_LINK, stream=True)
+    with open(DOWNLOAD_LINK.split("/")[-1], "wb") as handle:
+      for data in tqdm(
+              response.iter_content(
+                  chunk_size=io.DEFAULT_BUFFER_SIZE),
+              total=int(
+                  response.headers['Content-length']) //
+              io.DEFAULT_BUFFER_SIZE,
+              desc="Downloading"):
+        handle.write(data)
+    tar = tarfile.open(DOWNLOAD_LINK.split("/")[-1])
+    tar.extractall()
+    tar.close()
+  # Export Protobuf File if not present
+  if not os.path.exists(SHUFFLENET_PB):
+    model = onnx.load("shufflenet/model.onnx")
+    tf_rep = prepare(model)
+    tf_rep.export_graph(SHUFFLENET_PB)
+
+
+def module_fn():
+  input_name = "gpu_0/data_0:0"
+  output_name = "Softmax:0"
+  graph_def = tf.GraphDef()
+  with tf.gfile.GFile(SHUFFLENET_PB, 'rb') as f:
+    graph_def.ParseFromString(f.read())
+  input_tensor = tf.placeholder(tf.float32, shape=[1, 3, 224, 224])
+  output_tensor, = tf.import_graph_def(
+      graph_def, input_map={
+          input_name: input_tensor}, return_elements=[output_name])
+  hub.add_signature(inputs=input_tensor, outputs=output_tensor)
+
+
+def main():
+  load_shufflenet()
+  spec = hub.create_module_spec(module_fn)
+  module = hub.Module(spec)
+  with tf.Session() as sess:
+    module.export("onnx/shufflenet/1", sess)
+  print("Exported")
+
+
+if __name__ == "__main__":
+  main()
diff --git a/modules/GSOC/E1_TFHub_Sample_Deploy/BUILD b/modules/GSOC/E1_TFHub_Sample_Deploy/BUILD
new file mode 100644
index 0000000000000000000000000000000000000000..9c06a633d5651ac7cf0eb0e5ecbf9b1c6807c172
--- /dev/null
+++ b/modules/GSOC/E1_TFHub_Sample_Deploy/BUILD
@@ -0,0 +1,39 @@
+py_binary(
+    name = "export",
+    srcs = ["export.py"],
+    python_version = "PY3",
+    srcs_version = "PY2AND3",
+    deps = [":export_lib"],
+)
+
+py_library(
+    name = "export_lib",
+    srcs = ["export.py"],
+    srcs_version = "PY2AND3",
+    deps = [
+        ":expect_numpy_installed",
+        ":expect_tensorflow_installed",
+        "//external:tensorflow_hub",
+    ],
+)
+
+py_test(
+    name = "export_test",
+    srcs = ["export_test.py"],
+    python_version = "PY3",
+    srcs_version = "PY2AND3",
+    deps = [
+        ":expect_numpy_installed",
+        ":expect_tensorflow_installed",
+        ":export_lib",
+        "//external:tensorflow_hub",
+    ],
+)
+
+py_library(
+    name = "expect_tensorflow_installed",
+)
+
+py_library(
+    name = "expect_numpy_installed",
+)
diff --git a/modules/GSOC/E1_TFHub_Sample_Deploy/README.md b/modules/GSOC/E1_TFHub_Sample_Deploy/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..c474bd65810957460f94f95cbaaece28a90520a6
--- /dev/null
+++ b/modules/GSOC/E1_TFHub_Sample_Deploy/README.md
@@ -0,0 +1,8 @@
+Sample MNIST Graph to export as Tensorflow Hub Module
+------------------------------------------------------
+### To Export
+```bash
+$ python3 export.py
+```
+### to run The Tests
+- `$ bazel test E1_TFHub_Sample_Deploy:export_test`
diff --git a/modules/GSOC/E1_TFHub_Sample_Deploy/export.py b/modules/GSOC/E1_TFHub_Sample_Deploy/export.py
new file mode 100644
index 0000000000000000000000000000000000000000..7cf1a36183adbf5156a4365af29c5056721da4c8
--- /dev/null
+++ b/modules/GSOC/E1_TFHub_Sample_Deploy/export.py
@@ -0,0 +1,144 @@
+import sys
+import tensorflow as tf
+import tensorflow_datasets as tfds
+import tensorflow_hub as hub
+import argparse
+
+"""Exporter for TF-Hub Modules in SavedModel v2.0 format.
+
+The module has as a single signature, accepting a batch of images with shape [None, 28, 28, 1] and returning a prediction vector.
+In this example, we are loading the MNIST Dataset from TFDS and training a simple digit classifier.
+"""
+
+FLAGS = None
+
+
+class MNIST(tf.keras.models.Model):
+  """Model representing a MNIST classifier"""
+
+  def __init__(self, output_activation="softmax"):
+    """
+    Args:
+      output_activation (str): activation for last layer.
+    """
+    super(MNIST, self).__init__()
+    self.layer_1 = tf.keras.layers.Dense(64)
+    self.layer_2 = tf.keras.layers.Dense(10, activation=output_activation)
+
+  @tf.function(
+      input_signature=[
+          tf.TensorSpec(
+              shape=[None, 28, 28, 1],
+              dtype=tf.uint8)])
+  def call(self, inputs):
+    casted = tf.keras.layers.Lambda(
+        lambda x: tf.cast(x, tf.float32))(inputs)
+    flatten = tf.keras.layers.Flatten()(casted)
+    normalize = tf.keras.layers.Lambda(
+        lambda x: x / tf.reduce_max(tf.gather(x, 0)))(flatten)
+    x = self.layer_1(normalize)
+    output = self.layer_2(x)
+    return output
+
+
+def train_step(model, loss_fn, optimizer_fn, metric, image, label):
+  """ Perform one training step for the model.
+    Args:
+      model       : Keras model to train.
+      loss_fn     : Loss function to use.
+      optimizer_fn: Optimizer function to use.
+      metric      : keras.metric to use.
+      image       : Tensor of training images of shape [batch_size, 28, 28, 1].
+      label       : Tensor of class labels of shape [batch_size, ]
+  """
+  with tf.GradientTape() as tape:
+    preds = model(image)
+    label_onehot = tf.one_hot(label, 10)
+    loss_ = loss_fn(label_onehot, preds)
+  grads = tape.gradient(loss_, model.trainable_variables)
+  optimizer_fn.apply_gradients(zip(grads, model.trainable_variables))
+  metric(loss_)
+
+
+def train_and_export(
+        data_dir=None,
+        buffer_size=1000,
+        batch_size=32,
+        learning_rate=1e-3,
+        epoch=10,
+        dataset=None,
+        export_path="/tmp/tfhub_modules/mnist/digits/1"):
+  """
+    Trains and export the model as SavedModel 2.0.
+    Args:
+      data_dir (str)           : Directory where to store datasets from TFDS.
+                                 (With proper authentication, cloud bucket supported).
+      buffer_size (int)        : Size of buffer to use while shuffling.
+      batch_size (int)         : Size of each training batch.
+      learning_rate (float)    : Learning rate to use for the optimizer.
+      epoch (int)              : Number of Epochs to train for.
+      dataset (tf.data.Dataset): Dataset object if data_dir is not provided.
+      export_path (str)        : Path to export the trained model.
+  """
+  model = MNIST()
+  if not dataset:
+    train = tfds.load(
+        "mnist",
+        split="train",
+        data_dir=data_dir,
+        batch_size=batch_size).shuffle(
+        buffer_size,
+        reshuffle_each_iteration=True)
+  else:
+    train = dataset
+  optimizer_fn = tf.optimizers.Adam(learning_rate=learning_rate)
+  loss_fn = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
+  metric = tf.keras.metrics.Mean()
+  model.compile(optimizer_fn, loss=loss_fn)
+  # Training Loop
+  for epoch in range(epoch):
+    for step, data in enumerate(train):
+      train_step(
+          model,
+          loss_fn,
+          optimizer_fn,
+          metric,
+          data['image'],
+          data['label'])
+      sys.stdout.write("\rEpoch: #{}\tStep: #{}\tLoss: {}".format(
+          epoch, step, metric.result().numpy()))
+  # Exporting Model as SavedModel 2.0
+  tf.saved_model.save(model, export_path)
+
+
+if __name__ == "__main__":
+  parser = argparse.ArgumentParser()
+  parser.add_argument(
+      "--export_path",
+      type=str,
+      default="/tmp/tfhub_modules/mnist/digits/1",
+      help="Path to export the module")
+  parser.add_argument("--data_dir", type=str, default=None,
+                      help="Path to dustom TFDS data directory")
+  parser.add_argument(
+      "--buffer_size",
+      type=int,
+      default=1000,
+      help="Buffer Size to use while shuffling the dataset")
+  parser.add_argument(
+      "--batch_size",
+      type=int,
+      default=32,
+      help="Size of each batch")
+  parser.add_argument(
+      "--learning_rate",
+      type=float,
+      default=1e-3,
+      help="learning rate")
+  parser.add_argument(
+      "--epoch",
+      type=int,
+      default=10,
+      help="Number of iterations")
+  FLAGS, unparsed = parser.parse_known_args()
+  train_and_export(**vars(FLAGS))
diff --git a/modules/GSOC/E1_TFHub_Sample_Deploy/export_test.py b/modules/GSOC/E1_TFHub_Sample_Deploy/export_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e5e33dbd07c31f59c2d2b041fd246b405ca9b1a
--- /dev/null
+++ b/modules/GSOC/E1_TFHub_Sample_Deploy/export_test.py
@@ -0,0 +1,39 @@
+import tensorflow as tf
+import tensorflow_hub as hub
+import unittest
+import tempfile
+import os
+import re
+import export
+
+
+class TFHubMNISTTest(tf.test.TestCase):
+  def setUp(self):
+    self.mock_dataset = tf.data.Dataset.range(5).map(
+        lambda x: {
+            "image": tf.cast(
+                255 * tf.random.normal([1, 28, 28, 1]), tf.uint8),
+            "label": x})
+
+  def test_model_exporting(self):
+    export.train_and_export(
+        epoch=1,
+        dataset=self.mock_dataset,
+        export_path="%s/model/1" %
+        self.get_temp_dir())
+    self.assertTrue(os.listdir(self.get_temp_dir()))
+
+  def test_empty_input(self):
+    export.train_and_export(
+        epoch=1,
+        dataset=self.mock_dataset,
+        export_path="%s/model/1" %
+        self.get_temp_dir())
+    model = hub.load("%s/model/1" % self.get_temp_dir())
+    output_ = model.call(
+        tf.zeros([1, 28, 28, 1], dtype=tf.uint8).numpy())
+    self.assertEqual(output_.shape, [1, 10])
+
+
+if __name__ == '__main__':
+  tf.test.main()
diff --git a/modules/GSOC/E1_TPU_Sample/README.md b/modules/GSOC/E1_TPU_Sample/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..3663ea684db120ed98879198c3ea8d4b4740bef1
--- /dev/null
+++ b/modules/GSOC/E1_TPU_Sample/README.md
@@ -0,0 +1,74 @@
+# Image Retraining Sample
+
+**Topics:** Tensorflow 2.0, TF Hub, Cloud TPU
+
+## Specs
+### Cloud TPU
+
+**TPU Type:** v2.8
+**Tensorflow Version:** 1.14
+
+### Cloud VM
+
+**Machine Type:** n1-standard-2
+**OS**: Debian 9
+**Tensorflow Version**: Came with tf-nightly. Manually installed Tensorflow 2.0 Beta
+
+Launching Instance and VM
+---------------------------
+- Open Google Cloud Shell
+- `ctpu up -tf-version 1.14`
+- If cloud bucket is not setup automatically, create a cloud storage bucket
+with the same name as TPU and the VM
+- enable HTTP traffic for the VM instance
+- SSH into the system
+  - `pip3 uninstall -y tf-nightly`
+  - `pip3 install -r requirements.txt`
+  - `export CTPU_NAME=<common name of the tpu, vm and bucket>`
+
+
+Running Tensorboard:
+----------------------
+### Pre Requisites
+```bash
+$ sudo -i
+$ pip3 uninstall -y tf-nightly
+$ pip3 install tensorflow==2.0.0-beta0
+$ exit
+```
+
+### Launch
+```bash
+$ sudo tensorboard --logdir gs://$CTPU_NAME/model_dir --port 80 &>/dev/null &
+```
+To view Tensorboard, Browse to the Public IP of the VM Instance
+
+Running the Code:
+----------------------
+#### Train The Model
+
+```bash
+$ python3 image_retraining_tpu.py --tpu $CTPU_NAME --use_tpu \
+--modeldir gs://$CTPU_NAME/modeldir \
+--datadir gs://$CTPU_NAME/datadir \
+--logdir gs://$CTPU_NAME/logdir \
+--num_steps 2000 \
+--dataset horses_or_humans
+```
+Training Saves one single checkpoint at the end of training. This checkpoint can be loaded up
+later to export a SavedModel from it.
+
+#### Export Model
+
+```bash
+$ python3 image_retraining_tpu.py --tpu $CTPU_NAME --use_tpu \
+--modeldir gs://$CTPU_NAME/modeldir \
+--datadir gs://$CTPU_NAME/datadir \
+--logdir gs://$CTPU_NAME/logdir \
+--dataset horses_or_humans \
+--export_only \
+--export_path modeldir/model
+```
+Exporting SavedModel of trained model
+----------------------------
+The trained model gets saved at `gs://$CTPU_NAME/modeldir/model` by default if the path is not explicitly stated using `--export_path`
diff --git a/modules/GSOC/E1_TPU_Sample/image_retraining_tpu_strategy.py b/modules/GSOC/E1_TPU_Sample/image_retraining_tpu_strategy.py
new file mode 100644
index 0000000000000000000000000000000000000000..c820b1e67db4918759c13bd0a9b9cc881f60827a
--- /dev/null
+++ b/modules/GSOC/E1_TPU_Sample/image_retraining_tpu_strategy.py
@@ -0,0 +1,268 @@
+# Copyright 2019 The TensorFlow Hub Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+""" TensorFlow Sample for running TPU Training """
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+
+import argparse
+from absl import logging
+import tensorflow as tf
+import tensorflow_hub as hub
+import tensorflow_datasets as tfds
+
+tf.compat.v2.enable_v2_behavior()
+os.environ["TFHUB_DOWNLOAD_PROGRESS"] = "True"
+
+PRETRAINED_KERAS_LAYER = "https://tfhub.dev/google/tf2-preview/inception_v3/feature_vector/4"
+BATCH_SIZE = 32  # In case of TPU, Must be a multiple of 8
+
+
+class SingleDeviceStrategy(object):
+  """ Dummy Class to mimic tf.distribute.Strategy for Single Devices """
+
+  def __enter__(self, *args, **kwargs):
+    pass
+
+  def __exit__(self, *args, **kwargs):
+    pass
+
+  def scope(self):
+    return self
+
+  def experimental_distribute_dataset(self, dataset):
+    return dataset
+
+  def experimental_run_v2(self, func, args, kwargs):
+    return func(*args, **kwargs)
+
+  def reduce(self, reduction_type, distributed_data, axis):  # pylint: disable=unused-argument
+    return distributed_data
+
+
+class Model(tf.keras.models.Model):
+  """ Keras Model class for Image Retraining """
+
+  def __init__(self, num_classes):
+    super(Model, self).__init__()
+    logging.info("Loading Pretrained Image Vectorizer")
+    self._pretrained_layer = hub.KerasLayer(
+        PRETRAINED_KERAS_LAYER,
+        output_shape=[2048],
+        trainable=False)
+    self._dense_1 = tf.keras.layers.Dense(num_classes, activation="sigmoid")
+
+  @tf.function(
+      input_signature=[
+          tf.TensorSpec(
+              shape=[None, None, None, 3],
+              dtype=tf.float32)])
+  def call(self, inputs):
+    return self.unsigned_call(inputs)
+
+  def unsigned_call(self, inputs):
+    intermediate = self._pretrained_layer(inputs)
+    return self._dense_1(intermediate)
+
+
+def connect_to_tpu(tpu=None):
+  if tpu:
+    cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu)
+    tf.config.experimental_connect_to_host(cluster_resolver.get_master())
+    tf.tpu.experimental.initialize_tpu_system(cluster_resolver)
+    strategy = tf.distribute.experimental.TPUStrategy(cluster_resolver)
+    return strategy, "/task:1" if os.environ.get("COLAB_TPU_ADDR") else "/job:worker"
+  return SingleDeviceStrategy(), ""
+
+
+def load_dataset(name, datadir, batch_size=32, shuffle=None):
+  """
+    Loads and preprocesses dataset from TensorFlow dataset.
+    Args:
+      name: Name of the dataset to load
+      datadir: Directory to the dataset in.
+      batch_size: size of each minibatch. Must be a multiple of 8.
+      shuffle: size of shuffle buffer to use. Not shuffled if set to None.
+  """
+  dataset, info = tfds.load(
+      name,
+      try_gcs=True,
+      data_dir=datadir,
+      split="train",
+      as_supervised=True,
+      with_info=True)
+  num_classes = info.features["label"].num_classes
+
+  def _scale_fn(image, label):
+    image = tf.cast(image, tf.float32)
+    image = image / 127.5
+    image -= 1.
+    label = tf.one_hot(label, num_classes)
+    label = tf.cast(label, tf.float32)
+    return image, label
+
+  options = tf.data.Options()
+  if not hasattr(tf.data.Options, "auto_shard"):
+    options.experimental_distribute.auto_shard = False
+  else:
+    options.auto_shard = False
+
+  dataset = (
+      dataset.map(
+          _scale_fn,
+          num_parallel_calls=tf.data.experimental.AUTOTUNE)
+      .with_options(options)
+      .batch(batch_size, drop_remainder=True))
+  if shuffle:
+    dataset = dataset.shuffle(shuffle, reshuffle_each_iteration=True)
+  return dataset.repeat(), num_classes
+
+
+def train_and_export(**kwargs):
+  """
+    Trains the model and exports as SavedModel.
+    Args:
+      tpu: Name or GRPC address of the TPU to use.
+      logdir: Path to a bucket or directory to store TensorBoard logs.
+      modeldir: Path to a bucket or directory to store the model.
+      datadir: Path to store the downloaded datasets to.
+      dataset: Name of the dataset to load from TensorFlow Datasets.
+      num_steps: Number of steps to train the model for.
+  """
+  if kwargs["tpu"]:
+    # For TPU Training the Files must be stored in
+    # Cloud Buckets for the TPU to access
+    if not kwargs["logdir"].startswith("gs://"):
+      raise ValueError("To train on TPU. `logdir` must be cloud bucket")
+    if not kwargs["modeldir"].startswith("gs://"):
+      raise ValueError("To train on TPU. `modeldir` must be cloud bucket")
+    if kwargs["datadir"]:
+      if not kwargs["datadir"].startswith("gs://"):
+        raise ValueError("To train on TPU. `datadir` must be a cloud bucket")
+
+  os.environ["TFHUB_CACHE_DIR"] = os.path.join(
+      kwargs["modeldir"], "tfhub_cache")
+
+  strategy, device = connect_to_tpu((not kwargs["export_only"]) and kwargs["tpu"])
+  with tf.device(device), strategy.scope():
+    summary_writer = tf.summary.create_file_writer(kwargs["logdir"])
+    dataset, num_classes = load_dataset(
+        kwargs["dataset"],
+        kwargs["datadir"],
+        shuffle=3 * 32,
+        batch_size=BATCH_SIZE)
+    dataset = iter(strategy.experimental_distribute_dataset(dataset))
+    model = Model(num_classes)
+    loss_metric = tf.keras.metrics.Mean()
+    optimizer = tf.keras.optimizers.Adam()
+    ckpt = tf.train.Checkpoint(model=model)
+    def distributed_step(images, labels):
+      with tf.GradientTape() as tape:
+        logging.info("Taking predictions")
+        predictions = model.unsigned_call(images)
+        logging.info("Calculating loss")
+        loss = tf.nn.sigmoid_cross_entropy_with_logits(labels, predictions)
+        loss_metric(loss)
+        loss = loss * (1.0 / BATCH_SIZE)
+      logging.info("Calculating gradients")
+      gradient = tape.gradient(loss, model.trainable_variables)
+      logging.info("Applying gradients")
+      train_op = optimizer.apply_gradients(zip(gradient, model.trainable_variables))
+      with tf.control_dependencies([train_op]):
+        return tf.cast(optimizer.iterations, tf.float32)
+
+    @tf.function
+    def train_step(image, label):
+      distributed_metric = strategy.experimental_run_v2(
+          distributed_step, args=[image, label])
+      step = strategy.reduce(
+          tf.distribute.ReduceOp.MEAN, distributed_metric, axis=None)
+      return step
+    if not kwargs["export_only"]:
+      logging.info("Starting Training")
+    while not kwargs["export_only"]:
+      image, label = next(dataset)
+      step = tf.cast(train_step(image, label), tf.uint8)
+      with summary_writer.as_default():
+        tf.summary.scalar("loss", loss_metric.result(), step=optimizer.iterations)
+      if step % 100:
+        logging.info("Step: #%f\tLoss: %f" % (step, loss_metric.result()))
+      if step >= kwargs["num_steps"]:
+        ckpt.save(file_prefix=os.path.join(kwargs["modeldir"], "checkpoint"))
+        break
+    logging.info("Exporting Saved Model")
+    export_path = (kwargs["export_path"]
+                   or os.path.join(kwargs["modeldir"], "model"))
+    ckpt.restore(
+        tf.train.latest_checkpoint(
+          os.path.join(kwargs["modeldir"], "checkpoint")))
+    logging.info("Consuming checkpoint and tracing function")
+    model(tf.random.normal([1, 200, 200, 3]))
+    tf.saved_model.save(model, export_path)
+
+
+if __name__ == "__main__":
+  parser = argparse.ArgumentParser()
+  parser.add_argument(
+      "--dataset",
+      default=None,
+      help="Name of the Dataset to use")
+  parser.add_argument(
+      "--datadir",
+      default=None,
+      help="Directory to store the downloaded Dataset")
+  parser.add_argument(
+      "--modeldir",
+      default=None,
+      help="Directory to store the SavedModel to")
+  parser.add_argument(
+      "--logdir",
+      default=None,
+      help="Directory to store the Tensorboard logs")
+  parser.add_argument(
+      "--tpu",
+      default=None,
+      help="name or GRPC address of the TPU")
+  parser.add_argument(
+      "--num_steps",
+      default=1000,
+      type=int,
+      help="Number of Steps to train the model for")
+  parser.add_argument(
+      "--export_path",
+      default=None,
+      help="Explicitly specify the export path of the model."
+      "Else `modeldir/model` wil be used.")
+  parser.add_argument(
+      "--export_only",
+      default=False,
+      action="store_true",
+      help="Only export the SavedModel from presaved checkpoints")
+  parser.add_argument(
+      "--verbose",
+      "-v",
+      default=0,
+      action="count",
+      help="increase verbosity. multiple tags to increase more")
+  flags, unknown = parser.parse_known_args()
+  log_levels = [logging.FATAL, logging.WARNING, logging.INFO, logging.DEBUG]
+  log_level = log_levels[min(flags.verbose, len(log_levels) - 1)]
+  if not flags.modeldir:
+    logging.fatal("`--modeldir` must be specified")
+    sys.exit(1)
+  logging.set_verbosity(log_level)
+  train_and_export(**vars(flags))
diff --git a/modules/GSOC/E2_ESRGAN/README.md b/modules/GSOC/E2_ESRGAN/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..3bc459acf267b6e32a6004f3bfb526adec58a32e
--- /dev/null
+++ b/modules/GSOC/E2_ESRGAN/README.md
@@ -0,0 +1,50 @@
+# Enhanced Super Resolution GAN
+Tensorflow 2.0 Implementation of Enhanced Super Resolution Generative Adversarial Network (Xintao et. al.)
+[https://arxiv.org/pdf/1809.00219.pdf](https://arxiv.org/pdf/1809.00219.pdf)
+
+Enhanced Super Resolution GAN implemented as a part of Google Summer of Code 2019. [https://summerofcode.withgoogle.com/projects/#4662790671826944](https://summerofcode.withgoogle.com/projects/#4662790671826944)
+The SavedModel is expected to be shipped as a Tensorflow Hub Module. [https://tfhub.dev/](https://tfhub.dev/)
+
+## Overview
+Enhanced Super Resolution GAN is an improved version of Super Resolution GAN (Ledig et.al.) [https://arxiv.org/abs/1609.04802](https://arxiv.org/abs/1609.04802).
+The Model uses Residual-in-Residual Block, as the basic convolutional block instead of the basic Residual Network or simple Convolution trunk to provide a better flow of gradients at the microscopic level.
+In addition to that the model lacks Batch Normalization layers, in the Generator to prevent smoothing out of the artifacts in the image. This allows
+ESRGAN to produce images having better approximation of the sharp edges of the image artifacts.
+ESRGAN uses a Relativistic Discriminator [https://arxiv.org/pdf/1807.00734.pdf](https://arxiv.org/pdf/1807.00734.pdf) to better approximate the probability of an
+image being real or fake thus producing better result.
+The generator uses a linear combination of Perceptual difference between real and fake image (using pretrained VGG19 Network), Pixelwise absolute difference between real and fake image
+and Relativistic Average Loss between the real and fake image as loss function during adversarial training.
+The generator is trained in a two phase training setup.
+- First Phase focuses on reducing the Pixelwise L1 Distance of the input and target high resolution image to prevent local minimas
+obtained while starting from complete randomness.
+- Second Phase focuses on creating sharper and better reconstruction of minute artifacts.
+
+The final trained model is then interpolated between the L1 loss model and adversarially trained model, to produce photo realistic
+reconstruction.
+## Example Usage
+```python3
+import tensorflow_hub as hub
+import tensorflow as tf
+model = hub.load("https://github.com/captain-pool/GSOC/releases/download/1.0.0/esrgan.tar.gz")
+super_resolution = model(LOW_RESOLUTION_IMAGE_OF_SHAPE=[BATCH, HEIGHT, WIDTH, 3])
+# Output Shape: [BATCH, 4 x HEIGHT, 4 x WIDTH, 3]
+# Output DType: tf.float32.
+# NOTE:
+# The values are needed to be clipped between [0, 255]
+# using tf.clip_by_value(...) and casted to tf.uint8 using tf.cast(...)
+# before plotting or saving as image
+```
+## Results
+
+The model trained on DIV2K dataset on reconstructing 128 x 128 image by a scaling factor 4, yielded the following images.
+
+![ESRGAN_DIV2K](https://user-images.githubusercontent.com/13994201/63384084-c7ce5680-c3bb-11e9-96cc-99d9b8cb6804.jpg)
+
+**The Model gives 32.6 PSNR on 512 x 512 image patches.**
+## Evaluate
+```bash
+python3 evaluate_psnr --lr_files "/path/to/images/*.png" --hr_files "/path/to/images/*.png"
+```
+For options, `python3 evaluate_psnr.py -h`
+## SavedModel 2.0
+Loadable SavedModel can be found at https://github.com/captain-pool/GSOC/releases/tag/1.0.0
diff --git a/modules/GSOC/E2_ESRGAN/config/config.yaml b/modules/GSOC/E2_ESRGAN/config/config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ab4afa5fda4c29bf064dd9425db58c5ed40206c8
--- /dev/null
+++ b/modules/GSOC/E2_ESRGAN/config/config.yaml
@@ -0,0 +1,46 @@
+checkpoint_path:
+  phase_1: "checkpoints/phase_1"
+  phase_2: "checkpoints/phase_2"
+
+dataset: 
+  name: "div2k"
+  scale_method: "bicubic"
+  hr_dimension: 512
+
+print_step: 1000
+
+# The following configuration is taken from ESRGAN Paper
+# Paper Available at: https://arxiv.org/abs/1809.00219
+interpolation_parameter: 0.8 # Interpolation parameter: 0 <= value <= 1
+RDB:
+  residual_scale_beta: 0.2
+#Training
+# warmup_num_iter: 1000 # Number of steps to train the PSNR model
+batch_size: 32
+train_psnr:
+  num_steps: 600000
+  adam:
+    initial_lr: 0.0002 # Theory 0.0002
+    decay:
+      factor: 0.5
+      step: 200000 # Theory: 2 x 10^5 Steps
+    beta_1: 0.9
+    beta_2: 0.999
+train_combined:
+  perceptual_loss_type: "L1" # Can be either L1 or L2
+  num_steps: 400000
+  lambda: !!float 5e-3
+  eta: !!float 1e-2
+  adam:
+    initial_lr: !!float 5e-5
+    beta_1: 0.9
+    beta_2: 0.999
+    decay:
+      factor: 0.5
+      step:
+        - 9000
+        - 30000
+        - 50000
+        - 100000
+        - 200000
+        - 300000
diff --git a/modules/GSOC/E2_ESRGAN/config/stats.yaml b/modules/GSOC/E2_ESRGAN/config/stats.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..81ef8d3e4e6f2ebbad21b3d1d0212e0a35c2811e
--- /dev/null
+++ b/modules/GSOC/E2_ESRGAN/config/stats.yaml
@@ -0,0 +1,2 @@
+train_step_1: true
+train_step_2: true
diff --git a/modules/GSOC/E2_ESRGAN/evaluate_psnr.py b/modules/GSOC/E2_ESRGAN/evaluate_psnr.py
new file mode 100644
index 0000000000000000000000000000000000000000..cfcbdd85ba7d5784d8b2854da87118981c238862
--- /dev/null
+++ b/modules/GSOC/E2_ESRGAN/evaluate_psnr.py
@@ -0,0 +1,125 @@
+""" Evaluates the SavedModel of ESRGAN """
+import os
+import itertools
+import functools
+import argparse
+from absl import logging
+import tensorflow.compat.v2 as tf
+import tensorflow_hub as hub
+tf.enable_v2_behavior()
+
+def build_dataset(
+    lr_glob,
+    hr_glob,
+    lr_crop_size=[128, 128],
+    scale=4):
+  """
+    Builds a tf.data.Dataset from directory path.
+    Args:
+      lr_glob: Pattern to match Low Resolution images.
+      hr_glob: Pattern to match High resolution images.
+      lr_crop_size: Size of Low Resolution images to work on.
+      scale: Scaling factor of the images to work on.
+  """
+
+  def _read_images_fn(low_res_images, high_res_images):
+    for lr_image_path, hr_image_path in zip(low_res_images, high_res_images):
+      lr_image = tf.image.decode_image(tf.io.read_file(lr_image_path))
+      hr_image = tf.image.decode_image(tf.io.read_file(hr_image_path))
+      for height in range(0, lr_image.shape[0] - lr_crop_size[0] + 1, 40):
+        for width in range(0, lr_image.shape[1] - lr_crop_size[1] + 1, 40):
+          lr_sub_image = tf.image.crop_to_bounding_box(
+              lr_image,
+              height, width,
+              lr_crop_size[0], lr_crop_size[1])
+          hr_sub_image = tf.image.crop_to_bounding_box(
+              hr_image,
+              height * scale, width * scale,
+              lr_crop_size[0] * scale, lr_crop_size[1] * scale)
+          yield (tf.cast(lr_sub_image, tf.float32),
+                 tf.cast(hr_sub_image, tf.float32))
+
+  hr_images = tf.io.gfile.glob(hr_glob)
+  lr_images = tf.io.gfile.glob(lr_glob)
+  hr_images = sorted(hr_images)
+  lr_images = sorted(lr_images)
+  dataset = tf.data.Dataset.from_generator(
+      functools.partial(_read_images_fn, lr_images, hr_images),
+      (tf.float32, tf.float32),
+      (tf.TensorShape([None, None, 3]), tf.TensorShape([None, None, 3])))
+  return dataset
+
+
+def main(**kwargs):
+  total = kwargs["total"]
+  dataset = build_dataset(kwargs["lr_files"], kwargs["hr_files"])
+  dataset = dataset.batch(kwargs["batch_size"])
+  count = itertools.count(start=1, step=kwargs["batch_size"])
+  os.environ["TFHUB_DOWNLOAD_PROGRESS"] = "True"
+  metrics = tf.keras.metrics.Mean()
+  for lr_image, hr_image in dataset:
+    # Loading the model multiple time is the only
+    # way to preserve the quality, since the quality
+    # degrades at every inference for no obvious reaons
+    model = hub.load(kwargs["model"])
+    super_res_image = model.call(lr_image)
+    super_res_image = tf.clip_by_value(super_res_image, 0, 255)
+    metrics(
+        tf.reduce_mean(
+            tf.image.psnr(
+                super_res_image,
+                hr_image,
+                max_val=256)))
+    c = next(count)
+    if c >= total:
+      break
+    if not (c // 100) % 10:
+      print(
+          "%d Images Processed. Mean PSNR yet: %f" %
+          (c, metrics.result().numpy()))
+  print(
+      "%d Images processed. Mean PSNR: %f" %
+      (total, metrics.result().numpy()))
+  with tf.io.gfile.GFile("PSNR_result.txt", "w") as f:
+    f.write("%d Images processed. Mean PSNR: %f" %
+            (c, metrics.result().numpy()))
+
+
+if __name__ == "__main__":
+  parser = argparse.ArgumentParser()
+  parser.add_argument(
+      "--total",
+      default=10000,
+      help="Total number of sub images to work on. (default: 1000)")
+  parser.add_argument(
+      "--batch_size",
+      default=16,
+      type=int,
+      help="Number of images per batch (default: 16)")
+  parser.add_argument(
+      "--lr_files",
+      default=None,
+      help="Pattern to match low resolution files")
+  parser.add_argument(
+    "--hr_files",
+    default=None,
+    help="Pattern to match High resolution images")
+  parser.add_argument(
+      "--model",
+      default="https://github.com/captain-pool/GSOC/"
+              "releases/download/1.0.0/esrgan.tar.gz",
+      help="URL or Path to the SavedModel")
+  parser.add_argument(
+      "--verbose", "-v",
+      default=0,
+      action="count",
+      help="Increase Verbosity of logging")
+
+  flags, unknown = parser.parse_known_args()
+  if not (flags.lr_files and flags.hr_files):
+    logging.error("Must set flag --lr_files and --hr_files")
+    sys.exit(1)
+  log_levels = [logging.FATAL, logging.WARN, logging.INFO, logging.DEBUG]
+  log_level = log_levels[min(flags.verbose, len(log_levels) - 1)]
+  logging.set_verbosity(log_level)
+  main(**vars(flags))
diff --git a/modules/GSOC/E2_ESRGAN/lib/dataset.py b/modules/GSOC/E2_ESRGAN/lib/dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..b7485b7f1347df6dd118e93e3daea2f5e7afb219
--- /dev/null
+++ b/modules/GSOC/E2_ESRGAN/lib/dataset.py
@@ -0,0 +1,185 @@
+import os
+import numpy as np
+from absl import logging
+from functools import partial
+import tensorflow as tf
+# import tensorflow_datasets as tfds
+from util.util import scale, make_tf_callable_generator
+from util.setup import ancillary_path
+import pickle
+
+""" Dataset Handlers for ESRGAN """
+
+
+def scale_down(method="bicubic", dimension=512, size=None, factor=4):
+  """ Scales down function based on the parameters provided.
+      Args:
+        method (default: bicubic): Interpolation method to be used for Scaling down the image.
+        dimension (default: 256): Dimension of the high resolution counterpart.
+        size (default: None): [height, width] of the image.
+        factor (default: 4): Factor by which the model enhances the low resolution image.
+      Returns:
+        tf.data.Dataset mappable python function based on the configuration.
+  """
+  if not size:
+    size = (dimension, dimension)
+  size_ = {"size": size}
+
+  def scale_fn(image, *args, **kwargs):
+    size = size_["size"]
+    high_resolution = image
+    # if not kwargs.get("no_random_crop", None):
+    #   high_resolution = tf.image.random_crop(
+    #       image, [size[0], size[1], image.shape[-1]])
+
+    low_resolution = tf.image.resize(
+        high_resolution,
+        [size[0] // factor, size[1] // factor],
+        method=method)
+    low_resolution = tf.clip_by_value(low_resolution, 0, 255)
+    high_resolution = tf.clip_by_value(high_resolution, 0, 255)
+    return low_resolution, high_resolution
+
+  scale_fn.size = size_["size"]
+  return scale_fn
+
+
+def augment_image(
+        brightness_delta=0.05,
+        contrast_factor=[0.7, 1.3],
+        saturation=[0.6, 1.6]):
+    """ Helper function used for augmentation of images in the dataset.
+      Args:
+        brightness_delta: maximum value for randomly assigning brightness of the image.
+        contrast_factor: list / tuple of minimum and maximum value of factor to set random contrast.
+                          None, if not to be used.
+        saturation: list / tuple of minimum and maximum value of factor to set random saturation.
+                    None, if not to be used.
+      Returns:
+        tf.data.Dataset mappable function for image augmentation
+    """
+
+    def augment_fn(data, label, *args, **kwargs):
+        # Augmenting data (~ 80%)
+
+        def augment_steps_fn(data, label):
+            # Randomly rotating image (~50%)
+            def rotate_fn(data, label):
+                times = tf.random.uniform(minval=1, maxval=4, dtype=tf.int32, shape=[])
+                return (tf.image.rot90(data, times),
+                        tf.image.rot90(label, times))
+
+            data, label = tf.cond(
+                tf.less_equal(tf.random.uniform([]), 0.5),
+                lambda: rotate_fn(data, label),
+                lambda: (data, label))
+
+            # Randomly flipping image (~50%)
+            def flip_fn(data, label):
+                return (tf.image.flip_left_right(data),
+                        tf.image.flip_left_right(label))
+
+            data, label = tf.cond(
+                tf.less_equal(tf.random.uniform([]), 0.5),
+                lambda: flip_fn(data, label),
+                lambda: (data, label))
+
+            return data, label
+
+        # Randomly returning unchanged data (~20%)
+        return tf.cond(
+            tf.less_equal(tf.random.uniform([]), 0.2),
+            lambda: (data, label),
+            partial(augment_steps_fn, data, label))
+
+    return augment_fn
+
+
+# def reform_dataset(dataset, types, size, num_elems=None):
+#   """ Helper function to convert the output_dtype of the dataset
+#       from (tf.float32, tf.uint8) to desired dtype
+#       Args:
+#         dataset: Source dataset(image-label dataset) to convert.
+#         types: tuple / list of target datatype.
+#         size: [height, width] threshold of the images.
+#         num_elems: Number of Data points to store
+#       Returns:
+#         tf.data.Dataset with the images of dimension >= Args.size and types = Args.types
+#   """
+#   _carrier = {"num_elems": num_elems}
+#
+#   def generator_fn():
+#     for idx, data in enumerate(dataset, 1):
+#       if _carrier["num_elems"]:
+#         if not idx % _carrier["num_elems"]:
+#           raise StopIteration
+#       if data[0].shape[0] >= size[0] and data[0].shape[1] >= size[1]:
+#         yield data[0], data[1]
+#       else:
+#         continue
+#   return tf.data.Dataset.from_generator(
+#       generator_fn, types, (tf.TensorShape([None, None, 3]), tf.TensorShape(None)))
+
+
+# setup scaling parameters dictionary
+mean_std_dct = {}
+mean_std_file = ancillary_path+'mean_std_lo_hi_l2.pkl'
+f = open(mean_std_file, 'rb')
+mean_std_dct_l2 = pickle.load(f)
+f.close()
+
+
+class OpdNpyDataset:
+
+    def __init__(self, filenames, hr_size, lr_size, batch_size=128):
+        self.filenames = filenames
+        self.num_files = len(filenames)
+        self.hr_size = hr_size
+        self.lr_size = lr_size
+
+        def integer_gen(limit):
+            n = 0
+            while n < limit:
+                yield n
+                n += 1
+
+        num_gen = integer_gen(self.num_files)
+        gen = make_tf_callable_generator(num_gen)
+        dataset = tf.data.Dataset.from_generator(gen, output_types=tf.int32)
+        dataset = dataset.batch(batch_size)
+        dataset = dataset.map(self.data_function, num_parallel_calls=8)
+        # These execute w/o an iteration on dataset?
+        # dataset = dataset.map(scale_down(), num_parallel_calls=1)
+        # dataset = dataset.map(augment_image(), num_parallel_calls=1)
+
+        dataset = dataset.cache()
+        dataset = dataset.prefetch(buffer_size=1)
+
+        self.dataset = dataset
+
+    def read_numpy_file_s(self, f_idxs):
+        data_s = []
+        for fi in f_idxs:
+                fname = self.filenames[fi]
+                data = np.load(fname)
+                data = data[0, ]
+                data = scale(data, 'cld_opd_dcomp', mean_std_dct)
+                data = data.astype(np.float32)
+                data_s.append(data)
+        hr_image = np.concatenate(data_s)
+        hr_image = tf.expand_dims(hr_image, axis=3)
+        hr_image = tf.image.crop_to_bounding_box(hr_image, 0, 0, self.hr_size, self.hr_size)
+        low_resolution = tf.image.resize(hr_image, [self.lr_size, self.lr_size], method='bicubic')
+        low_resolution = tf.math.multiply(low_resolution, 255.0)
+        hr_image = tf.math.multiply(hr_image, 255.0)
+        low_resolution = tf.clip_by_value(low_resolution, 0, 255)
+        high_resolution = tf.clip_by_value(hr_image, 0, 255)
+
+        low_resolution, high_resolution = augment_image()(low_resolution, high_resolution)
+
+        return low_resolution, high_resolution
+
+    @tf.function(input_signature=[tf.TensorSpec(None, tf.int32)])
+    def data_function(self, indexes):
+        out = tf.numpy_function(self.read_numpy_file_s, [indexes], [tf.float32, tf.float32])
+        return out
\ No newline at end of file
diff --git a/modules/GSOC/E2_ESRGAN/lib/model.py b/modules/GSOC/E2_ESRGAN/lib/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..4bddd99c636b3bfeff49016718a406f8dbf88b22
--- /dev/null
+++ b/modules/GSOC/E2_ESRGAN/lib/model.py
@@ -0,0 +1,136 @@
+from collections import OrderedDict
+from functools import partial
+import tensorflow as tf
+from GSOC.E2_ESRGAN.lib import utils
+
+""" Keras Models for ESRGAN
+    Classes:
+      RRDBNet: Generator of ESRGAN. (Residual in Residual Network)
+      VGGArch: VGG28 Architecture making the Discriminator ESRGAN
+"""
+
+
+class RRDBNet(tf.keras.Model):
+  """ Residual in Residual Network consisting of:
+      - Convolution Layers
+      - Residual in Residual Block as the trunk of the model
+      - Pixel Shuffler layers (tf.nn.depth_to_space)
+      - Upscaling Convolutional Layers
+
+      Args:
+        out_channel: number of channels of the fake output image.
+        num_features (default: 32): number of filters to use in the convolutional layers.
+        trunk_size (default: 3): number of Residual in Residual Blocks to form the trunk.
+        growth_channel (default: 32): number of filters to use in the internal convolutional layers.
+        use_bias (default: True): boolean to indicate if bias is to be used in the conv layers.
+  """
+
+  def __init__(
+          self,
+          out_channel,
+          num_features=32,
+          trunk_size=11,
+          growth_channel=32,
+          use_bias=True,
+          first_call=True):
+    super(RRDBNet, self).__init__()
+    self.rrdb_block = partial(utils.RRDB, growth_channel, first_call=first_call)
+    conv = partial(
+        tf.keras.layers.Conv2D,
+        kernel_size=[3, 3],
+        strides=[1, 1],
+        padding="same",
+        use_bias=use_bias)
+    conv_transpose = partial(
+        tf.keras.layers.Conv2DTranspose,
+        kernel_size=[3, 3],
+        strides=[2, 2],
+        padding="same",
+        use_bias=use_bias)
+    self.conv_first = conv(filters=num_features)
+    self.rdb_trunk = tf.keras.Sequential(
+        [self.rrdb_block() for _ in range(trunk_size)])
+    self.conv_trunk = conv(filters=num_features)
+    # Upsample
+    self.upsample1 = conv_transpose(num_features)
+    self.upsample2 = conv_transpose(num_features)
+    self.conv_last_1 = conv(num_features)
+    self.conv_last_2 = conv(out_channel)
+    self.lrelu = tf.keras.layers.LeakyReLU(alpha=0.2)
+
+  # @tf.function(
+  #    input_signature=[
+  #        tf.TensorSpec(shape=[None, None, None, 3],
+  #                      dtype=tf.float32)])
+  def call(self, inputs):
+    return self.unsigned_call(inputs)
+
+  def unsigned_call(self, input_):
+    feature = self.lrelu(self.conv_first(input_))
+    trunk = self.conv_trunk(self.rdb_trunk(feature))
+    feature = trunk + feature
+    feature = self.lrelu(
+            self.upsample1(feature))
+    feature = self.lrelu(
+          self.upsample2(feature))
+    feature = self.lrelu(self.conv_last_1(feature))
+    out = self.conv_last_2(feature)
+    return out
+
+
+class VGGArch(tf.keras.Model):
+  """ Keras Model for VGG28 Architecture needed to form
+      the discriminator of the architecture.
+      Args:
+        output_shape (default: 1): output_shape of the generator
+        num_features (default: 64): number of features to be used in the convolutional layers
+                                    a factor of 2**i will be multiplied as per the need
+        use_bias (default: True): Boolean to indicate whether to use biases for convolution layers
+  """
+
+  def __init__(
+          self,
+          batch_size=8,
+          output_shape=1,
+          num_features=64,
+          use_bias=False):
+
+    super(VGGArch, self).__init__()
+    conv = partial(
+        tf.keras.layers.Conv2D,
+        kernel_size=[3, 3], use_bias=use_bias, padding="same")
+    batch_norm = partial(tf.keras.layers.BatchNormalization)
+    def no_batch_norm(x): return x
+    self._lrelu = tf.keras.layers.LeakyReLU(alpha=0.2)
+    self._dense_1 = tf.keras.layers.Dense(100)
+    self._dense_2 = tf.keras.layers.Dense(output_shape)
+    self._conv_layers = OrderedDict()
+    self._batch_norm = OrderedDict()
+    self._conv_layers["conv_0_0"] = conv(filters=num_features, strides=1)
+    self._conv_layers["conv_0_1"] = conv(filters=num_features, strides=2)
+    self._batch_norm["bn_0_1"] = batch_norm()
+    for i in range(1, 4):
+      for j in range(1, 3):
+        self._conv_layers["conv_%d_%d" % (i, j)] = conv(
+            filters=num_features * (2**i), strides=j)
+        self._batch_norm["bn_%d_%d" % (i, j)] = batch_norm()
+
+  def call(self, inputs):
+    return self.unsigned_call(inputs)
+  def unsigned_call(self, input_):
+
+    features = self._lrelu(self._conv_layers["conv_0_0"](input_))
+    features = self._lrelu(
+        self._batch_norm["bn_0_1"](
+            self._conv_layers["conv_0_1"](features)))
+    # VGG Trunk
+    for i in range(1, 4):
+      for j in range(1, 3):
+        features = self._lrelu(
+            self._batch_norm["bn_%d_%d" % (i, j)](
+                self._conv_layers["conv_%d_%d" % (i, j)](features)))
+
+    flattened = tf.keras.layers.Flatten()(features)
+    dense = self._lrelu(self._dense_1(flattened))
+    out = self._dense_2(dense)
+    return out
diff --git a/modules/GSOC/E2_ESRGAN/lib/settings.py b/modules/GSOC/E2_ESRGAN/lib/settings.py
new file mode 100644
index 0000000000000000000000000000000000000000..42ccf98612dc3e6a94c53b89dbdbb7dabb1a851a
--- /dev/null
+++ b/modules/GSOC/E2_ESRGAN/lib/settings.py
@@ -0,0 +1,55 @@
+import os
+import yaml
+
+
+def singleton(cls):
+  instances = {}
+
+  def getinstance(*args, **kwargs):
+    if cls not in instances:
+      instances[cls] = cls(*args, **kwargs)
+    return instances[cls]
+  return getinstance
+
+
+@singleton
+class Settings(object):
+  def __init__(self, filename="config.yaml"):
+    self.__path = os.path.abspath(filename)
+
+  @property
+  def path(self):
+    return os.path.dirname(self.__path)
+
+  def __getitem__(self, index):
+    with open(self.__path, "r") as file_:
+      return yaml.load(file_.read(), Loader=yaml.FullLoader)[index]
+
+  def get(self, index, default=None):
+    with open(self.__path, "r") as file_:
+      return yaml.load(
+          file_.read(),
+          Loader=yaml.FullLoader).get(
+          index,
+          default)
+
+
+class Stats(object):
+  def __init__(self, filename="stats.yaml"):
+    if os.path.exists(filename):
+      with open(filename, "r") as file_:
+        self.__data = yaml.load(file_.read(), Loader=yaml.FullLoader)
+    else:
+      self.__data = {}
+    self.file = filename
+
+  def get(self, index, default=None):
+    self.__data.get(index, default)
+
+  def __getitem__(self, index):
+    return self.__data[index]
+
+  def __setitem__(self, index, data):
+    self.__data[index] = data
+    with open(self.file, "w") as file_:
+      yaml.dump(self.__data, file_, default_flow_style=False)
diff --git a/modules/GSOC/E2_ESRGAN/lib/train.py b/modules/GSOC/E2_ESRGAN/lib/train.py
new file mode 100644
index 0000000000000000000000000000000000000000..60710ceb72e5828182018e3cfc3097e6bca5b1ad
--- /dev/null
+++ b/modules/GSOC/E2_ESRGAN/lib/train.py
@@ -0,0 +1,314 @@
+import os
+import time
+from functools import partial
+# from absl import logging
+import logging
+import tensorflow as tf
+# from lib import utils, dataset
+from GSOC.E2_ESRGAN.lib import utils, dataset
+import glob
+
+logging.basicConfig(filename='/Users/tomrink/esrgan_log.txt', level=logging.DEBUG)
+
+
+class Trainer(object):
+  """ Trainer class for ESRGAN """
+
+  def __init__(
+          self,
+          summary_writer,
+          summary_writer_2,
+          settings,
+          model_dir='/Users/tomrink/tf_model_esgan/',
+          data_dir=None,
+          manual=False,
+          strategy=None):
+    """ Setup the values and variables for Training.
+        Args:
+          summary_writer: tf.summary.SummaryWriter object to write summaries for Tensorboard.
+          settings: settings object for fetching data from config files.
+          data_dir (default: None): path where the data downloaded should be stored / accessed.
+          manual (default: False): boolean to represent if data_dir is a manual dir.
+    """
+    self.settings = settings
+    self.model_dir = model_dir
+    self.summary_writer = summary_writer
+    self.summary_writer_2 = summary_writer_2
+    self.strategy = strategy
+    dataset_args = self.settings["dataset"]
+    self.batch_size = self.settings["batch_size"]
+    hr_size = dataset_args['hr_dimension']
+    lr_size = int(hr_size // 4)
+
+    filenames = glob.glob('/Users/tomrink/data/opd_singles/valid_ires_??1?.npy')
+    self.dataset = dataset.OpdNpyDataset(filenames, hr_size, lr_size, batch_size=self.batch_size).dataset
+    print('OPD dataset initialized...')
+
+  def warmup_generator(self, generator):
+    """ Training on L1 Loss to warmup the Generator.
+
+    Minimizing the L1 Loss will reduce the Peak Signal to Noise Ratio (PSNR)
+    of the generated image from the generator.
+    This trained generator is then used to bootstrap the training of the
+    GAN, creating better image inputs instead of random noises.
+    Args:
+      generator: Model Object for the Generator
+    """
+    # Loading up phase parameters
+    print('begin warmup_generator...')
+    warmup_num_iter = self.settings.get("warmup_num_iter", None)
+    phase_args = self.settings["train_psnr"]
+    decay_params = phase_args["adam"]["decay"]
+    decay_step = decay_params["step"]
+    decay_factor = decay_params["factor"]
+    total_steps = phase_args["num_steps"]
+    metric = tf.keras.metrics.Mean()
+    psnr_metric = tf.keras.metrics.Mean()
+
+    # Generator Optimizer
+    G_optimizer = tf.optimizers.Adam(
+        learning_rate=phase_args["adam"]["initial_lr"],
+        beta_1=phase_args["adam"]["beta_1"],
+        beta_2=phase_args["adam"]["beta_2"])
+
+    checkpoint = tf.train.Checkpoint(
+        G=generator,
+        G_optimizer=G_optimizer)
+
+    status = utils.load_checkpoint(checkpoint, "phase_1", self.model_dir)
+    logging.debug("phase_1 status object: {}".format(status))
+    previous_loss = 0
+    start_time = time.time()
+    # Training starts
+
+    def _step_fn(image_lr, image_hr):
+      logging.debug("Starting Distributed Step")
+      with tf.GradientTape() as tape:
+        fake = generator.unsigned_call(image_lr)
+        loss = utils.pixel_loss(image_hr, fake) * (1.0 / self.batch_size)
+      psnr_metric(tf.reduce_mean(tf.image.psnr(fake, image_hr, max_val=256.0)))
+      # gen_vars = list(set(generator.trainable_variables))
+      gen_vars = generator.trainable_variables
+      gradient = tape.gradient(loss, gen_vars)
+      G_optimizer.apply_gradients(zip(gradient, gen_vars))
+      mean_loss = metric(loss)
+      logging.debug("Ending Distributed Step")
+      return tf.cast(G_optimizer.iterations, tf.float32)
+
+    @tf.function
+    def train_step(image_lr, image_hr):
+      # distributed_metric = self.strategy.experimental_run_v2(_step_fn, args=[image_lr, image_hr])
+      distributed_metric = _step_fn(image_lr, image_hr)
+      # mean_metric = self.strategy.reduce(tf.distribute.ReduceOp.MEAN, distributed_metric, axis=None)
+      # return mean_metric
+      return distributed_metric
+
+    for image_lr, image_hr in self.dataset:
+      print(image_lr.shape, image_hr.shape)
+      num_steps = train_step(image_lr, image_hr)
+
+      if num_steps >= total_steps:
+        return
+      if status:
+        status.assert_consumed()
+        logging.info(
+            "consumed checkpoint for phase_1 successfully")
+        status = None
+
+      if not num_steps % decay_step:  # Decay Learning Rate
+        logging.debug(
+            "Learning Rate: %s" %
+            G_optimizer.learning_rate.numpy)
+        G_optimizer.learning_rate.assign(
+            G_optimizer.learning_rate * decay_factor)
+        logging.debug(
+            "Decayed Learning Rate by %f."
+            "Current Learning Rate %s" % (
+                decay_factor, G_optimizer.learning_rate))
+      with self.summary_writer.as_default():
+        tf.summary.scalar(
+            "warmup_loss", metric.result(), step=G_optimizer.iterations)
+        tf.summary.scalar("mean_psnr", psnr_metric.result(), G_optimizer.iterations)
+
+      # if not num_steps % self.settings["print_step"]: # test
+      if True:
+        logging.info(
+            "[WARMUP] Step: {}\tGenerator Loss: {}"
+            "\tPSNR: {}\tTime Taken: {} sec".format(
+                num_steps,
+                metric.result(),
+                psnr_metric.result(),
+                time.time() -
+                start_time))
+        if psnr_metric.result() > previous_loss:
+          utils.save_checkpoint(checkpoint, "phase_1", self.model_dir)
+        previous_loss = psnr_metric.result()
+        start_time = time.time()
+
+  def train_gan(self, generator, discriminator):
+    """ Implements Training routine for ESRGAN
+        Args:
+          generator: Model object for the Generator
+          discriminator: Model object for the Discriminator
+    """
+    print('begin train_gan...')
+    phase_args = self.settings["train_combined"]
+    decay_args = phase_args["adam"]["decay"]
+    decay_factor = decay_args["factor"]
+    decay_steps = decay_args["step"]
+    lambda_ = phase_args["lambda"]
+    hr_dimension = self.settings["dataset"]["hr_dimension"]
+    eta = phase_args["eta"]
+    total_steps = phase_args["num_steps"]
+    optimizer = partial(
+        tf.optimizers.Adam,
+        learning_rate=phase_args["adam"]["initial_lr"],
+        beta_1=phase_args["adam"]["beta_1"],
+        beta_2=phase_args["adam"]["beta_2"])
+
+    G_optimizer = optimizer()
+    D_optimizer = optimizer()
+
+    ra_gen = utils.RelativisticAverageLoss(discriminator, type_="G")
+    ra_disc = utils.RelativisticAverageLoss(discriminator, type_="D")
+
+    # The weights of generator trained during Phase #1
+    # is used to initialize or "hot start" the generator
+    # for phase #2 of training
+    status = None
+    checkpoint = tf.train.Checkpoint(
+        G=generator,
+        G_optimizer=G_optimizer,
+        D=discriminator,
+        D_optimizer=D_optimizer)
+    if not tf.io.gfile.exists(
+        os.path.join(
+            self.model_dir,
+            self.settings["checkpoint_path"]["phase_2"],
+            "checkpoint")):
+      hot_start = tf.train.Checkpoint(
+          G=generator,
+          G_optimizer=G_optimizer)
+      status = utils.load_checkpoint(hot_start, "phase_1", self.model_dir)
+      # consuming variable from checkpoint
+      G_optimizer.learning_rate.assign(phase_args["adam"]["initial_lr"])
+    else:
+      status = utils.load_checkpoint(checkpoint, "phase_2", self.model_dir)
+
+    logging.debug("phase status object: {}".format(status))
+
+    gen_metric = tf.keras.metrics.Mean()
+    disc_metric = tf.keras.metrics.Mean()
+    psnr_metric = tf.keras.metrics.Mean()
+
+    # May need to create a Perceptual Model for Optical Depth?
+    # logging.debug("Loading Perceptual Model")
+    # perceptual_loss = utils.PerceptualLoss(
+    #     weights="imagenet",
+    #     input_shape=[hr_dimension, hr_dimension, 1],
+    #     loss_type=phase_args["perceptual_loss_type"])
+    # logging.debug("Loaded Model")
+
+    def _step_fn(image_lr, image_hr):
+      logging.debug("Starting Distributed Step")
+      with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
+        fake = generator.unsigned_call(image_lr)
+        logging.debug("Fetched Generator Fake")
+        fake = utils.preprocess_input(fake)
+        image_lr = utils.preprocess_input(image_lr)
+        image_hr = utils.preprocess_input(image_hr)
+        # percep_loss = tf.reduce_mean(perceptual_loss(image_hr, fake))
+        # logging.debug("Calculated Perceptual Loss")
+        l1_loss = utils.pixel_loss(image_hr, fake)
+        logging.debug("Calculated Pixel Loss")
+        loss_RaG = ra_gen(image_hr, fake)
+        logging.debug("Calculated Relativistic"
+                      "Averate (RA) Loss for Generator")
+        disc_loss = ra_disc(image_hr, fake)
+        logging.debug("Calculated RA Loss Discriminator")
+        # gen_loss = percep_loss + lambda_ * loss_RaG + eta * l1_loss
+        gen_loss = lambda_ * loss_RaG + eta * l1_loss
+        logging.debug("Calculated Generator Loss")
+        disc_metric(disc_loss)
+        gen_metric(gen_loss)
+        gen_loss = gen_loss * (1.0 / self.batch_size)
+        disc_loss = disc_loss * (1.0 / self.batch_size)
+        psnr_metric(tf.reduce_mean(tf.image.psnr(fake, image_hr, max_val=256.0)))
+
+      disc_grad = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
+      logging.debug("Calculated gradient for Discriminator")
+
+      D_optimizer.apply_gradients(zip(disc_grad, discriminator.trainable_variables))
+      logging.debug("Applied gradients to Discriminator")
+
+      gen_grad = gen_tape.gradient(gen_loss, generator.trainable_variables)
+      logging.debug("Calculated gradient for Generator")
+
+      G_optimizer.apply_gradients(zip(gen_grad, generator.trainable_variables))
+      logging.debug("Applied gradients to Generator")
+
+      return tf.cast(D_optimizer.iterations, tf.float32)
+
+    @tf.function
+    def train_step(image_lr, image_hr):
+      # distributed_iterations = self.strategy.experimental_run_v2(step_fn, args=(image_lr, image_hr))
+      distributed_iterations = _step_fn(image_lr, image_hr)
+      # num_steps = self.strategy.reduce(tf.distribute.ReduceOp.MEAN, distributed_iterations, axis=None)
+      # return num_steps
+      return distributed_iterations
+    start = time.time()
+    last_psnr = 0
+
+    for image_lr, image_hr in self.dataset:
+      print(image_lr.shape, image_hr.shape)
+      num_step = train_step(image_lr, image_hr)
+      if num_step >= total_steps:
+        return
+      if status:
+        status.assert_consumed()
+        logging.info("consumed checkpoint successfully!")
+        status = None
+
+      # Decaying Learning Rate
+      for _step in decay_steps.copy():
+        if num_step >= _step:
+          decay_steps.pop(0)
+          g_current_lr = self.strategy.reduce(
+              tf.distribute.ReduceOp.MEAN,
+              G_optimizer.learning_rate, axis=None)
+
+          d_current_lr = self.strategy.reduce(
+              tf.distribute.ReduceOp.MEAN,
+              D_optimizer.learning_rate, axis=None)
+
+          logging.debug(
+              "Current LR: G = %s, D = %s" %
+              (g_current_lr, d_current_lr))
+          logging.debug(
+              "[Phase 2] Decayed Learing Rate by %f." % decay_factor)
+          G_optimizer.learning_rate.assign(G_optimizer.learning_rate * decay_factor)
+          D_optimizer.learning_rate.assign(D_optimizer.learning_rate * decay_factor)
+
+      # Writing Summary
+      with self.summary_writer_2.as_default():
+        tf.summary.scalar(
+            "gen_loss", gen_metric.result(), step=D_optimizer.iterations)
+        tf.summary.scalar(
+            "disc_loss", disc_metric.result(), step=D_optimizer.iterations)
+        tf.summary.scalar("mean_psnr", psnr_metric.result(), step=D_optimizer.iterations)
+
+      # Logging and Checkpointing
+      # if not num_step % self.settings["print_step"]: # testing
+      if True:
+        logging.info(
+            "Step: {}\tGen Loss: {}\tDisc Loss: {}"
+            "\tPSNR: {}\tTime Taken: {} sec".format(
+                num_step,
+                gen_metric.result(),
+                disc_metric.result(),
+                psnr_metric.result(),
+                time.time() - start))
+        # if psnr_metric.result() > last_psnr:
+        last_psnr = psnr_metric.result()
+        utils.save_checkpoint(checkpoint, "phase_2", self.model_dir)
+        start = time.time()
diff --git a/modules/GSOC/E2_ESRGAN/lib/utils.py b/modules/GSOC/E2_ESRGAN/lib/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..e2c497b297e54a9af0df48aa4221d5fa102cb719
--- /dev/null
+++ b/modules/GSOC/E2_ESRGAN/lib/utils.py
@@ -0,0 +1,302 @@
+import os
+from functools import partial
+import tensorflow as tf
+# from absl import logging
+import logging
+from GSOC.E2_ESRGAN.lib import settings
+
+""" Utility functions needed for training ESRGAN model. """
+
+# Checkpoint Utilities
+
+
+def save_checkpoint(checkpoint, training_phase, basepath=""):
+  """ Saves checkpoint.
+      Args:
+        checkpoint: tf.train.Checkpoint object
+        training_phase: The training phase of the model to load/store the checkpoint for.
+                        can be one of the two "phase_1" or "phase_2"
+        basepath: Base path to load checkpoints from.
+  """
+  dir_ = settings.Settings()["checkpoint_path"][training_phase]
+  if basepath:
+    dir_ = os.path.join(basepath, dir_)
+  dir_ = os.path.join(dir_, os.path.basename(dir_))
+  checkpoint.save(file_prefix=dir_)
+  logging.debug("Prefix: %s. checkpoint saved successfully!" % dir_)
+
+
+def load_checkpoint(checkpoint, training_phase, basepath=""):
+  """ Saves checkpoint.
+      Args:
+        checkpoint: tf.train.Checkpoint object
+        training_phase: The training phase of the model to load/store the checkpoint for.
+                        can be one of the two "phase_1" or "phase_2"
+        basepath: Base Path to load checkpoints from.
+  """
+  logging.info("Loading check point for: %s" % training_phase)
+  dir_ = settings.Settings()["checkpoint_path"][training_phase]
+  if basepath:
+    dir_ = os.path.join(basepath, dir_)
+  if tf.io.gfile.exists(os.path.join(dir_, "checkpoint")):
+    logging.info("Found checkpoint at: %s" % dir_)
+    status = checkpoint.restore(tf.train.latest_checkpoint(dir_))
+    return status
+
+# Network Interpolation utility
+
+
+def interpolate_generator(
+    generator_fn,
+    discriminator,
+    alpha,
+    dimension,
+    factor=4,
+    basepath=""):
+  """ Interpolates between the weights of the PSNR model and GAN model
+
+       Refer to Section 3.4 of https://arxiv.org/pdf/1809.00219.pdf (Xintao et. al.)
+
+       Args:
+         generator_fn: function which returns the keras model the generator used.
+         discriminiator: Keras model of the discriminator.
+         alpha: interpolation parameter between both the weights of both the models.
+         dimension: dimension of the high resolution image
+         factor: scale factor of the model
+         basepath: Base directory to load checkpoints from.
+       Returns:
+         Keras model of a generator with weights interpolated between the PSNR and GAN model.
+  """
+  assert 0 <= alpha <= 1
+  size = dimension
+  if not tf.nest.is_nested(dimension):
+    size = [dimension, dimension]
+  logging.debug("Interpolating generator. Alpha: %f" % alpha)
+  optimizer = partial(tf.keras.optimizers.Adam)
+  gan_generator = generator_fn()
+  psnr_generator = generator_fn()
+  # building generators
+  gan_generator(tf.random.normal(
+      [1, size[0] // factor, size[1] // factor, 1]))
+  psnr_generator(tf.random.normal(
+      [1, size[0] // factor, size[1] // factor, 1]))
+
+  phase_1_ckpt = tf.train.Checkpoint(
+      G=psnr_generator, G_optimizer=optimizer())
+  phase_2_ckpt = tf.train.Checkpoint(
+      G=gan_generator,
+      G_optimizer=optimizer(),
+      D=discriminator,
+      D_optimizer=optimizer())
+
+
+  load_checkpoint(phase_1_ckpt, "phase_1", basepath)
+  load_checkpoint(phase_2_ckpt, "phase_2", basepath)
+
+  # Consuming Checkpoint
+  logging.debug("Consuming Variables: Adervsarial generator") 
+  gan_generator.unsigned_call(tf.random.normal(
+      [1, size[0] // factor, size[1] // factor, 1]))
+  input_layer = tf.keras.Input(shape=[None, None, 1], name="input_0")
+  output = gan_generator(input_layer)
+  gan_generator = tf.keras.Model(
+      inputs=[input_layer],
+      outputs=[output])
+
+  logging.debug("Consuming Variables: PSNR generator")
+  psnr_generator.unsigned_call(tf.random.normal(
+      [1, size[0] // factor, size[1] // factor, 1]))
+
+  for variables_1, variables_2 in zip(
+          gan_generator.trainable_variables, psnr_generator.trainable_variables):
+    variables_1.assign((1 - alpha) * variables_2 + alpha * variables_1)
+
+  return gan_generator
+
+# Losses
+
+
+def preprocess_input(image):
+  image = image[..., ::-1]
+  # mean = -tf.constant([103.939, 116.779, 123.68])
+  mean = -tf.constant([0.0])
+  return tf.nn.bias_add(image, mean)
+
+
+def PerceptualLoss(weights=None, input_shape=None, loss_type="L1"):
+  """ Perceptual Loss using VGG19
+      Args:
+        weights: Weights to be loaded.
+        input_shape: Shape of input image.
+        loss_type: Loss type for features. (L1 / L2)
+  """
+  vgg_model = tf.keras.applications.vgg19.VGG19(
+      input_shape=input_shape, weights=weights, include_top=False)
+  for layer in vgg_model.layers:
+    layer.trainable = False
+  # Removing Activation Function
+  vgg_model.get_layer("block5_conv4").activation = lambda x: x
+  phi = tf.keras.Model(
+      inputs=[vgg_model.input],
+      outputs=[
+          vgg_model.get_layer("block5_conv4").output])
+
+  def loss(y_true, y_pred):
+    if loss_type.lower() == "l1":
+      return tf.compat.v1.losses.absolute_difference(phi(y_true), phi(y_pred))
+
+    if loss_type.lower() == "l2":
+      return tf.reduce_mean(
+          tf.reduce_mean(
+              (phi(y_true) - phi(y_pred))**2,
+              axis=0))
+    raise ValueError(
+        "Loss Function: \"%s\" not defined for Perceptual Loss" %
+        loss_type)
+  return loss
+
+
+def pixel_loss(y_true, y_pred):
+  y_true = tf.cast(y_true, tf.float32)
+  y_pred = tf.cast(y_pred, tf.float32)
+  return tf.reduce_mean(tf.reduce_mean(tf.abs(y_true - y_pred), axis=0))
+
+
+def RelativisticAverageLoss(non_transformed_disc, type_="G"):
+  """ Relativistic Average Loss based on RaGAN
+      Args:
+      non_transformed_disc: non activated discriminator Model
+      type_: type of loss to Ra loss to produce.
+             'G': Relativistic average loss for generator
+             'D': Relativistic average loss for discriminator
+  """
+  loss = None
+
+  def D_Ra(x, y):
+    return non_transformed_disc(
+        x) - tf.reduce_mean(non_transformed_disc(y))
+
+  def loss_D(y_true, y_pred):
+    """
+      Relativistic Average Loss for Discriminator
+      Args:
+        y_true: Real Image
+        y_pred: Generated Image
+    """
+    real_logits = D_Ra(y_true, y_pred)
+    fake_logits = D_Ra(y_pred, y_true)
+    real_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
+        labels=tf.ones_like(real_logits), logits=real_logits))
+    fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
+        labels=tf.zeros_like(fake_logits), logits=fake_logits))
+    return real_loss + fake_loss
+
+  def loss_G(y_true, y_pred):
+    """
+     Relativistic Average Loss for Generator
+     Args:
+       y_true: Real Image
+       y_pred: Generated Image
+    """
+    real_logits = D_Ra(y_true, y_pred)
+    fake_logits = D_Ra(y_pred, y_true)
+    real_loss = tf.nn.sigmoid_cross_entropy_with_logits(
+        labels=tf.zeros_like(real_logits), logits=real_logits)
+    fake_loss = tf.nn.sigmoid_cross_entropy_with_logits(
+        labels=tf.ones_like(fake_logits), logits=fake_logits)
+    return real_loss + fake_loss
+  if type_ == "G":
+    loss = loss_G
+  elif type_ == "D":
+    loss = loss_D
+  return loss
+
+
+# Strategy Utils
+
+def assign_to_worker(use_tpu):
+  if use_tpu:
+    return "/job:worker"
+  return ""
+
+
+class SingleDeviceStrategy(object):
+  """ Dummy Strategy when Outside TPU """
+
+  def __enter__(self, *args, **kwargs):
+    pass
+
+  def __exit__(self, *args, **kwargs):
+    pass
+
+  def experimental_distribute_dataset(self, dataset, *args, **kwargs):
+    return dataset
+
+  def experimental_run_v2(self, fn, args, kwargs):
+    return fn(*args, **kwargs)
+
+  def reduce(reduction_type, distributed_data, axis):
+    return distributed_data
+
+  def scope(self):
+    return self
+
+
+# Model Utils
+
+
+class RDB(tf.keras.layers.Layer):
+  """ Residual Dense Block Layer """
+
+  def __init__(self, out_features=32, bias=True, first_call=True):
+    super(RDB, self).__init__()
+    _create_conv2d = partial(
+        tf.keras.layers.Conv2D,
+        out_features,
+        kernel_size=[3, 3],
+        kernel_initializer="he_normal",
+        bias_initializer="zeros",
+        strides=[1, 1], padding="same", use_bias=bias)
+    self._conv2d_layers = {
+        "conv_1": _create_conv2d(),
+        "conv_2": _create_conv2d(),
+        "conv_3": _create_conv2d(),
+        "conv_4": _create_conv2d(),
+        "conv_5": _create_conv2d()}
+    self._lrelu = tf.keras.layers.LeakyReLU(alpha=0.2)
+    self._beta = settings.Settings()["RDB"].get("residual_scale_beta", 0.2)
+    self._first_call = first_call
+
+  def call(self, input_):
+    x1 = self._lrelu(self._conv2d_layers["conv_1"](input_))
+    x2 = self._lrelu(self._conv2d_layers["conv_2"](
+        tf.concat([input_, x1], -1)))
+    x3 = self._lrelu(self._conv2d_layers["conv_3"](
+        tf.concat([input_, x1, x2], -1)))
+    x4 = self._lrelu(self._conv2d_layers["conv_4"](
+        tf.concat([input_, x1, x2, x3], -1)))
+    x5 = self._conv2d_layers["conv_5"](tf.concat([input_, x1, x2, x3, x4], -1))
+    if self._first_call:
+      logging.debug("Initializing with MSRA")
+      for _, layer in self._conv2d_layers.items():
+        for variable in layer.trainable_variables:
+          variable.assign(0.1 * variable)
+      self._first_call = False
+    return input_ + self._beta * x5
+
+
+class RRDB(tf.keras.layers.Layer):
+  """ Residual in Residual Block Layer """
+
+  def __init__(self, out_features=32, first_call=True):
+    super(RRDB, self).__init__()
+    self.RDB1 = RDB(out_features, first_call=first_call)
+    self.RDB2 = RDB(out_features, first_call=first_call)
+    self.RDB3 = RDB(out_features, first_call=first_call)
+    self.beta = settings.Settings()["RDB"].get("residual_scale_beta", 0.2)
+
+  def call(self, input_):
+    out = self.RDB1(input_)
+    out = self.RDB2(out)
+    out = self.RDB3(out)
+    return input_ + self.beta * out
diff --git a/modules/GSOC/E2_ESRGAN/main.py b/modules/GSOC/E2_ESRGAN/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..d49e4fbdf7546860fb83fe923c44212a6d415c13
--- /dev/null
+++ b/modules/GSOC/E2_ESRGAN/main.py
@@ -0,0 +1,169 @@
+import os
+from functools import partial
+import argparse
+# from absl import logging
+import logging
+from GSOC.E2_ESRGAN.lib import settings, train, model, utils
+# from tensorflow.python.eager import profiler
+# import tensorflow.compat.v2 as tf
+# tf.enable_v2_behavior()
+import tensorflow as tf
+tf.compat.v1.disable_v2_behavior
+
+logging.basicConfig(filename='/Users/tomrink/esrgan_log.txt', level=logging.DEBUG)
+
+
+""" Enhanced Super Resolution GAN.
+    Citation:
+      @article{DBLP:journals/corr/abs-1809-00219,
+        author    = {Xintao Wang and
+                     Ke Yu and
+                     Shixiang Wu and
+                     Jinjin Gu and
+                     Yihao Liu and
+                     Chao Dong and
+                     Chen Change Loy and
+                     Yu Qiao and
+                     Xiaoou Tang},
+        title     = {{ESRGAN:} Enhanced Super-Resolution Generative Adversarial Networks},
+        journal   = {CoRR},
+        volume    = {abs/1809.00219},
+        year      = {2018},
+        url       = {http://arxiv.org/abs/1809.00219},
+        archivePrefix = {arXiv},
+        eprint    = {1809.00219},
+        timestamp = {Fri, 05 Oct 2018 11:34:52 +0200},
+        biburl    = {https://dblp.org/rec/bib/journals/corr/abs-1809-00219},
+        bibsource = {dblp computer science bibliography, https://dblp.org}
+      }
+"""
+
+
+#def main(**kwargs):
+def run(kwargs):
+  """ Main function for training ESRGAN model and exporting it as a SavedModel2.0
+      Args:
+        config: path to config yaml file.
+        log_dir: directory to store summary for tensorboard.
+        data_dir: directory to store / access the dataset.
+        manual: boolean to denote if data_dir is a manual directory.
+        model_dir: directory to store the model into.
+  """
+
+  for physical_device in tf.config.experimental.list_physical_devices("GPU"):
+    tf.config.experimental.set_memory_growth(physical_device, True)
+
+  strategy = utils.SingleDeviceStrategy()
+  #scope = utils.assign_to_worker(kwargs["tpu"])
+  sett = settings.Settings(kwargs["config"])
+  Stats = settings.Stats(os.path.join(sett.path, "stats.yaml"))
+  tf.random.set_seed(10)
+  # if kwargs["tpu"]:
+  #   cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
+  #       kwargs["tpu"])
+  #   tf.config.experimental_connect_to_host(cluster_resolver.get_master())
+  #   tf.tpu.experimental.initialize_tpu_system(cluster_resolver)
+  #   strategy = tf.distribute.experimental.TPUStrategy(cluster_resolver)
+  # with tf.device(scope), strategy.scope():
+  with strategy.scope():
+    summary_writer_1 = tf.summary.create_file_writer(os.path.join(kwargs["log_dir"], "phase1"))
+    summary_writer_2 = tf.summary.create_file_writer(os.path.join(kwargs["log_dir"], "phase2"))
+
+    # profiler.start_profiler_server(6009)
+    discriminator = model.VGGArch(batch_size=sett["batch_size"], num_features=64)
+
+    if not kwargs["export_only"]:
+      generator = model.RRDBNet(out_channel=1)
+      logging.debug("Initiating Convolutions")
+      generator.unsigned_call(tf.random.normal([1, 128, 128, 1]))
+      training = train.Trainer(
+          summary_writer=summary_writer_1,
+          summary_writer_2=summary_writer_2,
+          settings=sett,
+          model_dir=kwargs["model_dir"],
+          data_dir=kwargs["data_dir"],
+          manual=kwargs["manual"],
+          strategy=strategy)
+      phases = list(map(lambda x: x.strip(), kwargs["phases"].lower().split("_")))
+      if not Stats["train_step_1"] and "phase1" in phases:
+        logging.info("starting phase 1")
+        training.warmup_generator(generator)
+        Stats["train_step_1"] = True
+      if not Stats["train_step_2"] and "phase2" in phases:
+        logging.info("starting phase 2")
+        training.train_gan(generator, discriminator)
+        Stats["train_step_2"] = True
+
+  if Stats["train_step_1"] and Stats["train_step_2"]:
+    # Attempting to save "Interpolated" Model as SavedModel2.0
+    interpolated_generator = utils.interpolate_generator(
+        partial(model.RRDBNet, out_channel=1, first_call=False),
+        discriminator,
+        sett["interpolation_parameter"],
+        [720, 1080],
+        basepath=kwargs["model_dir"])
+
+    tf.saved_model.save(
+        interpolated_generator, os.path.join(
+            kwargs["model_dir"], "esrgan"))
+
+
+#if __name__ == '__main__':
+parser = argparse.ArgumentParser()
+parser.add_argument(
+  "--config",
+  default="config/config.yaml",
+  help="path to configuration file. (default: %(default)s)")
+parser.add_argument(
+  "--data_dir",
+  default=None,
+  help="directory to put the data. (default: %(default)s)")
+parser.add_argument(
+  "--manual",
+  default=False,
+  help="specify if data_dir is a manual directory. (default: %(default)s)",
+  action="store_true")
+parser.add_argument(
+  "--model_dir",
+  default='/Users/tomrink/tf_model_esrgan',
+  help="directory to put the model in.")
+parser.add_argument(
+  "--log_dir",
+  default='/Users/tomrink/esrgan_log',
+  help="directory to story summaries for tensorboard.")
+parser.add_argument(
+  "--phases",
+  default="phase1_phase2",
+  help="phases to train for seperated by '_'")
+parser.add_argument(
+  "--export_only",
+  default=False,
+  action="store_true",
+  help="Exports to SavedModel")
+parser.add_argument(
+  "--tpu",
+  default="",
+  help="Name of the TPU to be used")
+parser.add_argument(
+  "-v",
+  "--verbose",
+  action="count",
+  default=0,
+  help="each 'v' increases vebosity of logging.")
+# FLAGS, unparsed = parser.parse_known_args()
+
+FLAGS = {'config': 'config/config.yaml',
+         'data_dir': None,
+         'manual': False,
+         'model_dir': '/Users/tomrink/tf_model_esrgan/',
+         'log_dir': '/Users/tomrink/tf_logs_esrgan/',
+         'phases': 'phase1_phase2',
+         'export_only': False,
+         'tpu': '',
+         'verbose': 0}
+
+log_levels = [logging.WARNING, logging.INFO, logging.DEBUG]
+log_level = log_levels[min(FLAGS['verbose'], len(log_levels) - 1)]
+# logging.set_verbosity(log_level)
+# FLAGS = vars(FLAGS)
+# main(**vars(FLAGS))
diff --git a/modules/GSOC/E2_ESRGAN/train.sh b/modules/GSOC/E2_ESRGAN/train.sh
new file mode 100755
index 0000000000000000000000000000000000000000..1c46613efab4212ca060bbd7aa96b944f3d6cbaa
--- /dev/null
+++ b/modules/GSOC/E2_ESRGAN/train.sh
@@ -0,0 +1,125 @@
+#!/bin/bash
+function loading() {
+  wheel=("-" "\\" "|" "/")
+  for i in ${wheel[*]}
+    do 
+      printf "\r[$i] $1"; 
+      sleep 0.1;
+    done;
+}
+function wait_for_process() {
+  while [[ `ps aux | grep $1 | wc -l` -gt 1 ]]
+  do
+    loading "$2"
+  done
+  wait $1
+  return "$?"
+}
+function report_error(){
+[[ $1 -ne 0 ]] && printf "\r\033[K[X] Error occurred! Check $2" && exit 1 || rm -f "$2"
+}
+
+# Script Starts Here
+
+# Param 1: Base Directory
+# Param 2: Phases to Work on
+# Param 3: coco2014 / Extract Location of the Dataset
+
+[[ ${#1} -gt 0 ]] && BASEDIR="$1" || BASEDIR="$HOME"
+[[ ${#2} -gt 0 ]] && PHASES="$2" || PHASES="phase1_phase2"
+[[ "${3,,}" == "coco2014" ]] || EXTRACTDIR="$3"
+
+python3 -c "import tensorflow" &> /dev/null &
+wait_for_process $! "Checking Tensorflow Installation"
+report_error $? "Tensorflow Installation"
+
+pushd "$BASEDIR"
+python3 -c "import tensorflow_datasets" &>/dev/null &
+wait_for_process $! "Checking TFDS Installation"
+if [[ $? -ne 0 ]]
+then
+	pip3 -q install -U tfds-nightly &> pip.log &
+	wait_for_process $! "Installing Tensorflow Datasets"
+	report_error $? "pip.log"
+	printf "\r\033[K[-] Installed Tensorflow Datasets\n"
+fi
+
+python3 -c "import tensorflow_hub" &>/dev/null &
+wait_for_process $! "Checking TF-Hub Installation"
+if [[ $? -ne 0 ]]
+then
+	pip3 -q install -U tf-hub-nightly &> pip.log &
+	wait_for_process $! "Installing Tensorflow Hub"
+	report_error $? "pip.log"
+	printf "\r\033[K[-] Installed Tensorflow Hub\n"
+fi
+
+if [[ ${#EXTRACTDIR} -eq 0 ]]
+then
+  DATASET="gs://images.cocodataset.org/train2014"
+  EXTRACTDIR="$BASEDIR/datadir/coco2014/train/none"
+fi
+IFS=/ read -ra ds <<<"$EXTRACTDIR"
+DATADIR=$(dirname $(dirname $(dirname $EXTRACTDIR)))
+CODE="import os;
+  import tensorflow_datasets as tfds;
+  dl_config = tfds.download.DownloadConfig(manual_dir=os.path.expanduser('$DATADIR'));
+  ds = tfds.load('image_label_folder/dataset_name=${ds[-3],,}',
+    split='train',
+    as_supervised=True,
+    download_and_prepare_kwargs={'download_config':dl_config});"
+
+CODE=$(echo $CODE | sed -r s/"[[:space:]]*(;|,|=)[[:space:]]*"/"\1"/g)
+
+if [ ! -d "$EXTRACTDIR" ]
+then
+  if [[ ${#DATASET} -gt 0 ]]
+  then
+    mkdir -p "$EXTRACTDIR"
+    printf " [*] Downloading and Extracting Images from: $DATASET"
+    gsutil -m rsync $DATASET $EXTRACTDIR &>download.log &
+    wait_for_process $! "Downloading and Extracting Images from: $DATASET"
+    report_error $? "download.log"
+    printf "\r\033[K[-] Done Downloading and Extracting.\n" 
+  else
+    echo "Data Directory Doesn't Exist! Exiting..." && exit 1
+  fi
+fi
+python3 -u -c "$CODE" &>parse.log &
+wait_for_process $! "Parsing Dataset to TF Records."
+report_error $? "parse.log"
+printf "\r\033[K[-] Done Parsing to TF Records\n"
+
+# Creating Log and  Model Dump Directories"
+[ ! -d "logdir" ] && mkdir logdir
+[ ! -d "modeldir" ] && mkdir modeldir
+
+if [ $(ps aux|grep tensorboard|wc -l) -le 1 ]
+then
+	sudo tensorboard --logdir "$BASEDIR/logdir" \
+		--port 80 &> "$BASEDIR/logdir/access.log" &
+fi
+
+if [ ! -d "GSOC" ]
+then
+	git clone https://captain-pool/GSOC
+fi
+pushd "$BASEDIR/GSOC/E2_ESRGAN"
+
+rm -f cache/*.lockfile
+
+python3 -u main.py --data_dir "$DATADIR" \
+	--model_dir "$BASEDIR/modeldir" \
+	--manual --log_dir "$BASEDIR/logdir" \
+	--phases $PHASES -vvv&>"$BASEDIR/logdir/main.log" &
+popd
+
+TB_PID=$(IFS=' ' pgrep -u root -f tensorboard)
+TB_PID=$(printf ",%s" "${TB_PID[@]}")
+echo "[-] Log of training code: $BASEDIR/logdir/main.log"
+echo "[-] Logs of tensorboard: $BASEDIR/logdir/access.log"
+printf "\033[1mPIDs of created jobs\033[0m\n"
+echo "[-] PID Tensorboard (root): "${TB_PID:1}
+PY_PID=$(pgrep -u `whoami` -f python3)
+PY_PID=$(printf ",%s" "${PY_PID[@]}")
+echo "[-] PID Python3 ($(whoami)): "${PY_PID:1}
diff --git a/modules/GSOC/E3_Distill_ESRGAN/README.md b/modules/GSOC/E3_Distill_ESRGAN/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..b84ba6bef96f8849caf0c03d889cb5b7bef41e89
--- /dev/null
+++ b/modules/GSOC/E3_Distill_ESRGAN/README.md
@@ -0,0 +1,74 @@
+# GAN Distillation on Enhanced Super Resolution GAN
+Link to ESRGAN Tranining Codes: [Click Here](../E2_ESRGAN)
+
+Knowledge Distillation on Enhanced Super Resolution GAN to perform Super Resolution on model with much smaller number of
+variables.
+The Training Algorithm is **inspired** from https://arxiv.org/abs/1902.00159, with a custom loss function specific to the 
+problem of image super resolution.
+
+Results
+------------------
+
+### ESRGAN
+**Latency**: 17.117 Seconds
+
+**Mean PSNR Achieved**: 28.2
+
+**Sample**:
+
+Input Image Shape: 180 x 320
+
+Output image shape: 720 x 1280
+
+![esrgan](https://user-images.githubusercontent.com/13994201/63640629-251a1e80-c6c0-11e9-98bc-04432c7064e2.jpg "ESRGAN")
+
+**PSNR of the Image**: 30.462
+
+### Compressed ESRGAN
+**Latency**: _**0.4 Seconds**_
+
+**Mean PSNR Achieved**: 25.3
+
+**Sample**
+
+Input Image Shape: 180 x 320
+
+Output image shape: 720 x 1280
+
+![compressed_esrgan](https://user-images.githubusercontent.com/13994201/63640526-1121ed00-c6bf-11e9-99f5-0b48069fe784.jpg "Compressed ESRGAN")
+**PSNR of the Image**: 26.942
+
+Student Model
+----------------
+The Residual in Residual Architecture of ESRGAN was followed. With much shallower trunk.
+Specifically,
+
+|Name of Node|Depth|
+|:-:|:-:|
+|Residual Dense Blocks(RDB)|2 Depthwise Convolutions|
+|Residual in Residual Blocks(RRDB)|2 RDB units|
+|Trunk|3 RRDB units|
+|UpSample Layer|1 ConvTranspose unit with a stride length of 4|
+
+**Size of Growth Channel (intermediate channel) used:** 32
+
+Trained Saved Model and TF Lite File
+-----------------------------------------
+
+#### Specification of the Saved Model
+Input Dimension: `[None, 180, 320, 3]`
+
+Input Data Type: `Float32`
+
+Output Dimension: `[None, 180, 320, 3]`
+
+TensorFlow loadable link: https://github.com/captain-pool/GSOC/releases/download/2.0.0/compressed_esrgan.tar.gz
+
+#### Specification of TF Lite
+Input Dimension: `[1, 180, 320, 3]`
+
+Input Data Type: `Float32`
+
+Output Dimension: `[1, 720, 1280, 3]`
+
+TensorFlow Lite: https://github.com/captain-pool/GSOC/releases/download/2.0.0/compressed_esrgan.tflite
diff --git a/modules/GSOC/E3_Distill_ESRGAN/config/config.yaml b/modules/GSOC/E3_Distill_ESRGAN/config/config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..86959f207b1059380e535b7b3114d9a7a4fbd703
--- /dev/null
+++ b/modules/GSOC/E3_Distill_ESRGAN/config/config.yaml
@@ -0,0 +1,52 @@
+# Config File for Distillation
+teacher_directory: "../E2_ESRGAN"
+teacher_config: "../E2_ESRGAN/config/config.yaml"
+checkpoint_path:
+  comparative_checkpoint: "checkpoints/mse_checkpoints/"
+  adversarial_checkpoint: "checkpoints/adversarial_checkpoints/"
+
+hr_size: [512, 512, 3]
+train:
+  comparative:
+    print_step: 1000
+    num_steps: 200000
+    checkpoint_step: 2000
+    decay_rate: 0.5
+    decay_steps:
+      - 20000
+      - 50000
+      - 90000
+  adversarial:
+    initial_lr: !!float 5e-5
+    num_steps: 400000
+    print_step: 1000
+    checkpoint_step: 2000
+    decay_rate: 0.5
+    decay_steps:
+      - 50000
+      - 100000
+      - 200000
+    lambda: !!float 5e-2
+    alpha: !!float 1e-4
+
+scale_factor: 1
+scale_value: 4
+student_network: "rrdb_student"
+student_config:
+  vgg_student:
+    trunk_depth: 3 # Minimum 2
+    # growth_channels: 32
+    use_bias: True
+  residual_student:
+    trunk_depth: 2
+    use_bias: True
+    residual_scale_beta: 0.8
+  rrdb_student:
+    rdb_config:
+      depth: 3
+      residual_scale_beta: 0.2
+    rrdb_config:
+      rdb_units: 2
+      residual_scale_beta: 0.2
+    trunk_size: 3
+    growth_channels: 32
diff --git a/modules/GSOC/E3_Distill_ESRGAN/export_tflite.py b/modules/GSOC/E3_Distill_ESRGAN/export_tflite.py
new file mode 100644
index 0000000000000000000000000000000000000000..e237bbb55786f6064f79e3671b8302309bdc4641
--- /dev/null
+++ b/modules/GSOC/E3_Distill_ESRGAN/export_tflite.py
@@ -0,0 +1,151 @@
+
+""" Module for exporting TFLite of the Student Model """
+
+from __future__ import print_function
+from __future__ import absolute_import
+from __future__ import division
+
+
+import os
+import argparse
+from tqdm import tqdm
+from absl import logging
+import tensorflow as tf
+import numpy as np
+from libs import settings
+from libs import model
+from libs import lazy_loader
+from PIL import Image
+
+
+def representative_dataset_gen(
+    num_calibartion_steps,
+    datadir,
+    hr_size,
+    tflite_size):
+  """ Creates Generator for Representative datasets for Quantizing TFLite
+      Args:
+        num_calibartion_steps: Number of steps to calibrate the model for.
+        datadir: Directory containing TFRecord dataset.
+        hr_size: [height, width, channel] for high resolution images.
+        tflite_size: [height, width] for the TFLite File.
+  """
+  hr_size = tf.cast(tf.convert_to_tensor(hr_size), tf.float32)
+  lr_size = tf.cast(
+      hr_size * tf.convert_to_tensor([1. / 4, 1. / 4, 1]), tf.int32)
+  hr_size = tf.cast(hr_size, tf.int32)
+  # Loading TFRecord Dataset
+  ds = (dataset.load_dataset(
+      datadir,
+      lr_size=lr_size,
+      hr_size=hr_size)
+      .take(num_calibartion_steps)
+      .prefetch(10))
+
+  def _gen_fn():
+    for _, image_hr in tqdm(ds, total=num_calibartion_steps):
+      image_hr = tf.cast(image_hr, tf.uint8)
+      lr_image = np.asarray(
+          Image.fromarray(image_hr.numpy())
+          .resize([tflite_size[1], tflite_size[0]],
+                  Image.BICUBIC))
+      yield [tf.expand_dims(tf.cast(lr_image, tf.float32), 0).numpy()]
+
+  return _gen_fn
+
+
+def export_tflite(config="", modeldir="", mode="", **kwargs):
+  """
+    Exports SavedModel(if not present) TFLite of the student generator.
+    Args:
+      config: Path to config file of the student.
+      modeldir: Path to export the SavedModel and the TFLite to.
+      mode: Mode of training to export. (Advsersarial /  comparative)
+  """
+  # TODO (@captain-pool): Fix Quantization and mention them in the args list.
+
+  lazy = lazy_loader.LazyLoader()
+  lazy.import_("teacher_imports", parent="libs", return_=False)
+  lazy.import_("utils", parent="libs", return_=False)
+  lazy.import_("dataset", parent="libs", return_=False)
+  globals().update(lazy.import_dict)
+  status = None
+  sett = settings.Settings(config, use_student_settings=True)
+  stats = settings.Stats(os.path.join(sett.path, "stats.yaml"))
+  student_name = sett["student_network"]
+  student_generator = model.Registry.models[student_name](first_call=False)
+  ckpt = tf.train.Checkpoint(student_generator=student_generator)
+  logging.info("Initiating Variables. Tracing Function.")
+  student_generator.predict(tf.random.normal([1, 180, 320, 3]))
+  if stats.get(mode):
+    status = utils.load_checkpoint(
+        ckpt,
+        "%s_checkpoint" % mode,
+        basepath=modeldir,
+        use_student_settings=True)
+  if not status:
+    raise IOError("No checkpoint found to restore")
+  saved_model_dir = os.path.join(modeldir, "compressed_esrgan")
+
+  if not tf.io.gfile.exists(
+          os.path.join(saved_model_dir, "saved_model.pb")):
+    tf.saved_model.save(
+        student_generator,
+        saved_model_dir)
+
+  converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
+  converter.optimizations = [tf.lite.Optimize.DEFAULT]
+
+  # TODO (@captain-pool): Try to fix Qunatization
+  # Current Error: Cannot Quantize LEAKY_RELU and CONV2D_TRANSPOSE
+  # Quantization Code Fragment
+  # converter.target_spec.supported_types = [tf.float16]
+  # converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
+  # converter.representative_dataset = representative_dataset_gen(
+  #      kwargs["calibration_steps"],
+  #      kwargs["datadir"],
+  #      sett["hr_size"],
+  #      [180, 320, 3])
+
+  tflite_model = converter.convert()
+  tflite_path = os.path.join(modeldir, "tflite", "compressed_esrgan.tflite")
+
+  with tf.io.gfile.GFile(tflite_path, "wb") as f:
+    f.write(tflite_model)
+  logging.info("Successfully writen the TFLite to: %s" % tflite_path)
+
+
+if __name__ == "__main__":
+  parser = argparse.ArgumentParser()
+  parser.add_argument(
+      "--modeldir",
+      default="",
+      help="Directory of the saved checkpoints")
+  parser.add_argument(
+      "--datadir",
+      default=None,
+      help="Path to TFRecords dataset")
+  parser.add_argument(
+      "--calibration_steps",
+      default=1000,
+      type=int,
+      help="Number of Steps to calibrate on")
+  parser.add_argument(
+      "--config",
+      default="config/config.yaml",
+      help="Configuration File to be loaded")
+  parser.add_argument(
+      "--mode",
+      default="none",
+      help="mode of training to load (adversarial /  comparative)")
+  parser.add_argument(
+      "--verbose",
+      "-v",
+      default=0,
+      action="count",
+      help="Increases Verbosity. Repeat to increase more")
+  log_levels = [logging.WARNING, logging.INFO, logging.DEBUG]
+  FLAGS, unknown = parser.parse_known_args()
+  log_level = log_levels[min(FLAGS.verbose, len(log_levels) - 1)]
+  logging.set_verbosity(log_level)
+  export_tflite(**vars(FLAGS))
diff --git a/modules/GSOC/E3_Distill_ESRGAN/libs/dataset.py b/modules/GSOC/E3_Distill_ESRGAN/libs/dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..f580441c27adb802f8533be5bd30048375ce85f7
--- /dev/null
+++ b/modules/GSOC/E3_Distill_ESRGAN/libs/dataset.py
@@ -0,0 +1,104 @@
+from absl import logging
+import os
+from lib import dataset
+from libs import settings
+import tensorflow as tf
+
+
+def generate_tf_record(
+        data_dir,
+        raw_data=False,
+        tfrecord_path="serialized_dataset",
+        num_shards=8):
+
+  teacher_sett = settings.Settings(use_student_settings=False)
+  student_sett = settings.Settings(use_student_settings=True)
+  dataset_args = teacher_sett["dataset"]
+  if dataset_args["name"].lower().strip() == "div2k":
+    assert len(data_dir) == 2
+    ds = dataset.load_div2k_dataset(
+        data_dir[0],
+        data_dir[1],
+        student_sett["hr_size"],
+        shuffle=True)
+  elif raw_data:
+    ds = dataset.load_dataset_directory(
+        dataset_args["name"],
+        data_dir,
+        dataset.scale_down(
+            method=dataset_args["scale_method"],
+            size=student_sett["hr_size"]))
+  else:
+    ds = dataset.load_dataset(
+        dataset_args["name"],
+        dataset.scale_down(
+            method=dataset_args["scale_method"],
+            size=student_sett["hr_size"]),
+        data_dir=data_dir)
+  to_tfrecord(ds, tfrecord_path, num_shards)
+
+
+def load_dataset(tfrecord_path, lr_size, hr_size):
+  def _parse_tf_record(serialized_example):
+    features = {
+        "low_res_image": tf.io.FixedLenFeature([], dtype=tf.string),
+        "high_res_image": tf.io.FixedLenFeature([], dtype=tf.string)}
+    example = tf.io.parse_single_example(serialized_example, features)
+    lr_image = tf.io.parse_tensor(
+        example["low_res_image"],
+        out_type=tf.float32)
+    lr_image = tf.reshape(lr_image, lr_size)
+    hr_image = tf.io.parse_tensor(
+        example["high_res_image"],
+        out_type=tf.float32)
+    hr_image = tf.reshape(hr_image, hr_size)
+    return lr_image, hr_image
+  files = tf.io.gfile.glob(
+      os.path.join(tfrecord_path, "*.tfrecord"))
+  if len(files) == 0:
+    raise ValueError("Path Doesn't contain any file")
+  ds = tf.data.TFRecordDataset(files).map(_parse_tf_record)
+  if len(files) == 1:
+    option = tf.data.Options()
+    option.auto_shard = False
+    ds.with_options(ds)
+  ds = ds.shuffle(128, reshuffle_each_iteration=True)
+  return ds
+
+
+def to_tfrecord(ds, tfrecord_path, num_shards=8):
+  def _bytes_feature(value):
+    return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
+
+  def serialize_to_string(image_lr, image_hr):
+    features = {
+        "low_res_image": _bytes_feature(
+            tf.io.serialize_tensor(image_lr).numpy()),
+        "high_res_image": _bytes_feature(
+            tf.io.serialize_tensor(image_hr).numpy())}
+    example_proto = tf.train.Example(
+        features=tf.train.Features(feature=features))
+    return example_proto.SerializeToString()
+
+  def write_to_tfrecord(shard_id, ds):
+    filename = tf.strings.join(
+        [tfrecord_path, "/dataset.", tf.strings.as_string(shard_id),
+         ".tfrecord"])
+    writer = tf.data.experimental.TFRecordWriter(filename)
+    writer.write(ds.map(lambda _, x: x))
+    return tf.data.Dataset.from_tensors(filename)
+
+  def map_serialize_to_string(image_lr, image_hr):
+    map_fn = tf.py_function(
+        serialize_to_string,
+        (image_lr, image_hr),
+        tf.string)
+    return tf.reshape(map_fn, ())
+  ds = ds.map(map_serialize_to_string)
+  ds = ds.enumerate()
+  ds = ds.apply(tf.data.experimental.group_by_window(
+      lambda i, _: i % num_shards,
+      write_to_tfrecord,
+      tf.int64.max))
+  for data in ds:
+    logging.info("Written to: %s" % data.numpy())
diff --git a/modules/GSOC/E3_Distill_ESRGAN/libs/lazy_loader.py b/modules/GSOC/E3_Distill_ESRGAN/libs/lazy_loader.py
new file mode 100644
index 0000000000000000000000000000000000000000..60a5cab4bd7ce53fafddbf4b7f870e2796164ea5
--- /dev/null
+++ b/modules/GSOC/E3_Distill_ESRGAN/libs/lazy_loader.py
@@ -0,0 +1,33 @@
+import importlib
+from .settings import singleton
+
+
+@singleton
+class LazyLoader:
+  def __init__(self):
+    self.__import_dict = {}
+
+  @property
+  def import_dict(self):
+    return self.__import_dict
+
+  @import_dict.setter
+  def import_dict(self, key, value):
+    self.__import_dict[key] = value
+
+  def import_(self, name, alias=None, parent=None, return_=True):
+    if alias is None:
+      alias = name
+    if self.__import_dict.get(alias, None) is not None and return_:
+      return self.__import_dict[alias]
+    if parent is not None:
+      exec("from {} import {} as {}".format(parent, name, "_" + alias))
+      self.__import_dict[alias] = locals()["_" + alias]
+    else:
+      module = importlib.import_module(name)
+      self.__import_dict[alias] = module
+    if return_:
+      return self.__import_dict[alias]
+
+  def __getitem__(self, key):
+    return self.__import_dict[key]
diff --git a/modules/GSOC/E3_Distill_ESRGAN/libs/model.py b/modules/GSOC/E3_Distill_ESRGAN/libs/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..7c59a312bb08b55725ce2ae4c1d39ba281f85e0f
--- /dev/null
+++ b/modules/GSOC/E3_Distill_ESRGAN/libs/model.py
@@ -0,0 +1,3 @@
+""" Module to auto-register students and expose student registry """
+from libs import models
+from libs.models.abstract import Registry
diff --git a/modules/GSOC/E3_Distill_ESRGAN/libs/models/__init__.py b/modules/GSOC/E3_Distill_ESRGAN/libs/models/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9dbcbfa8325c12ed6c2a7c263132f35f2bfb4818
--- /dev/null
+++ b/modules/GSOC/E3_Distill_ESRGAN/libs/models/__init__.py
@@ -0,0 +1,3 @@
+from libs.models.student_vgg import VGGStudent
+from libs.models.student_rrdb import RRDBStudent
+from libs.models.student_residual import ResidualStudent
diff --git a/modules/GSOC/E3_Distill_ESRGAN/libs/models/abstract.py b/modules/GSOC/E3_Distill_ESRGAN/libs/models/abstract.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ee284814ebd30c2c276127b95299749a69397e8
--- /dev/null
+++ b/modules/GSOC/E3_Distill_ESRGAN/libs/models/abstract.py
@@ -0,0 +1,25 @@
+import re
+from tensorflow.python import keras
+
+
+class Registry(type):
+  models = {}
+
+  def _convert_to_snake(name):
+    s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
+    return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
+
+  def __init__(cls, name, bases, attrs):
+    if Registry._convert_to_snake(name) != "model":
+      Registry.models[Registry._convert_to_snake(cls.__name__)] = cls
+
+# Abstract class for Auto Registration of Kernels
+
+
+class Model(keras.models.Model, metaclass=Registry):
+  def __init__(self, *args, **kwargs):
+    super(Model, self).__init__()
+    self.init(*args, **kwargs)
+
+  def init(self, *args, **kwargs):
+    pass
diff --git a/modules/GSOC/E3_Distill_ESRGAN/libs/models/student_residual.py b/modules/GSOC/E3_Distill_ESRGAN/libs/models/student_residual.py
new file mode 100644
index 0000000000000000000000000000000000000000..8c35b8aeb3c91a01bb2ccb6799569c5b3082a4f8
--- /dev/null
+++ b/modules/GSOC/E3_Distill_ESRGAN/libs/models/student_residual.py
@@ -0,0 +1,61 @@
+""" Module containing ResNet architecture as a student Network """
+import tensorflow as tf
+from libs.models import abstract
+from libs import settings
+from functools import partial
+
+
+class ResidualStudent(abstract.Model):
+  """ ResNet Student Network Model """
+
+  def init(self):
+    self.student_settings = settings.Settings(use_student_settings=True)
+    self._scale_factor = self.student_settings["scale_factor"]
+    self._scale_value = self.student_settings["scale_value"]
+    model_args = self.student_settings["student_config"]["residual_student"]
+    depth = model_args["trunk_depth"]
+    depthwise_convolution = partial(
+        tf.keras.layers.DepthwiseConv2D,
+        kernel_size=[3, 3],
+        strides=[1, 1],
+        padding="same",
+        use_bias=model_args["use_bias"])
+    convolution = partial(
+        tf.keras.layers.Conv2D,
+        kernel_size=[3, 3],
+        strides=[1, 1],
+        padding="same",
+        use_bias=model_args["use_bias"])
+    conv_transpose = partial(
+        tf.keras.layers.Conv2DTranspose,
+        kernel_size=[3, 3],
+        strides=self._scale_value,
+        padding="same")
+    self._lrelu = tf.keras.layers.LeakyReLU(alpha=0.2)
+    self._residual_scale = model_args["residual_scale_beta"]
+    self._conv_layers = {
+        "conv_%d" % index:
+        depthwise_convolution() for index in range(1, depth + 1)}
+    self._upscale_layers = {
+        "upscale_%d" % index:
+        conv_transpose(filters=32) for index in range(1, self._scale_factor)}
+    self._last_layer = conv_transpose(filters=3)
+
+  @tf.function(
+      input_signature=[
+          tf.TensorSpec(
+              shape=[None, 180, 270, 3],  # 720x1080 Images
+              dtype=tf.float32)])
+  def call(self, inputs):
+    return self.unsigned_call(inputs)
+
+  def unsigned_call(self, inputs):
+    intermediate = inputs
+    for layer_name in self._conv_layers:
+      intermediate += self._lrelu(self._conv_layers[layer_name](intermediate))
+    intermediate = inputs + self._residual_scale * intermediate  # Residual Trunk
+    # Upsampling
+    for layer_name in self._upscale_layers:
+      intermediate = self._lrelu(
+          self._upscale_layers[layer_name](intermediate))
+    return self._last_layer(intermediate)
diff --git a/modules/GSOC/E3_Distill_ESRGAN/libs/models/student_rrdb.py b/modules/GSOC/E3_Distill_ESRGAN/libs/models/student_rrdb.py
new file mode 100644
index 0000000000000000000000000000000000000000..265e3ae4c79247f308234f430183ec4fe57f5932
--- /dev/null
+++ b/modules/GSOC/E3_Distill_ESRGAN/libs/models/student_rrdb.py
@@ -0,0 +1,124 @@
+""" Module contains Residual in Residual student network and helper layers. """
+
+from functools import partial
+from libs.models import abstract
+from libs import settings
+import tensorflow as tf
+from absl import logging
+
+
+class ResidualDenseBlock(tf.keras.layers.Layer):
+  """
+    Keras implmentation of Residual Dense Block.
+    (https://arxiv.org/pdf/1809.00219.pdf)
+  """
+
+  def __init__(self, first_call=True):
+    super(ResidualDenseBlock, self).__init__()
+    self.settings = settings.Settings(use_student_settings=True)
+    rdb_config = self.settings["student_config"]["rrdb_student"]["rdb_config"]
+    convolution = partial(
+        tf.keras.layers.DepthwiseConv2D,
+        depthwise_initializer="he_normal",
+        bias_initializer="he_normal",
+        kernel_size=[3, 3],
+        strides=[1, 1],
+        padding="same")
+    self._first_call = first_call
+    self._conv_layers = {
+        "conv_%d" % index: convolution()
+        for index in range(1, rdb_config["depth"])}
+    self._lrelu = tf.keras.layers.LeakyReLU(alpha=0.2)
+    self._beta = rdb_config["residual_scale_beta"]
+
+  def call(self, inputs):
+    intermediates = [inputs]
+    for layer_name in self._conv_layers:
+      residual_junction = tf.math.add_n(intermediates)
+      raw_intermediate = self._conv_layers[layer_name](residual_junction)
+      activated_intermediate = self._lrelu(raw_intermediate)
+      intermediates.append(activated_intermediate)
+    if self._first_call:
+      logging.debug("Initializing Layers with 0.1 x MSRA")
+      for _, layer in self._conv_layers.items():
+        for weight in layer.trainable_variables:
+          weight.assign(0.1 * weight)
+      self._first_call = False
+    return inputs + self._beta * raw_intermediate
+
+
+class ResidualInResidualBlock(tf.keras.layers.Layer):
+  """
+    Keras implmentation of Residual in Residual Block.
+    (https://arxiv.org/pdf/1809.00219.pdf)
+  """
+
+  def __init__(self, first_call=True):
+    super(ResidualInResidualBlock, self).__init__()
+    self.settings = settings.Settings(use_student_settings=True)
+    rrdb_config = self.settings["student_config"]["rrdb_student"]["rrdb_config"]
+    self._rdb_layers = {
+        "rdb_%d" % index: ResidualDenseBlock(first_call=first_call)
+        for index in range(1, rrdb_config["rdb_units"])}
+    self._beta = rrdb_config["residual_scale_beta"]
+
+  def call(self, inputs):
+    intermediate = inputs
+    for layer_name in self._rdb_layers:
+      intermediate = self._rdb_layers[layer_name](intermediate)
+    return inputs + self._beta * intermediate
+
+
+class RRDBStudent(abstract.Model):
+  """
+    Keras implmentation of Residual in Residual Student Network.
+    (https://arxiv.org/pdf/1809.00219.pdf)
+  """
+
+  def init(self, first_call=True):
+    self.settings = settings.Settings(use_student_settings=True)
+    self._scale_factor = self.settings["scale_factor"]
+    self._scale_value = self.settings["scale_value"]
+    rrdb_student_config = self.settings["student_config"]["rrdb_student"]
+    rrdb_block = partial(ResidualInResidualBlock, first_call=first_call)
+    growth_channels = rrdb_student_config["growth_channels"]
+    depthwise_conv = partial(
+        tf.keras.layers.DepthwiseConv2D,
+        kernel_size=[3, 3],
+        strides=[1, 1],
+        use_bias=True,
+        padding="same")
+    convolution = partial(
+        tf.keras.layers.Conv2D,
+        kernel_size=[3, 3],
+        use_bias=True,
+        strides=[1, 1],
+        padding="same")
+    conv_transpose = partial(
+        tf.keras.layers.Conv2DTranspose,
+        kernel_size=[3, 3],
+        use_bias=True,
+        strides=self._scale_value,
+        padding="same")
+    self._rrdb_trunk = tf.keras.Sequential(
+        [rrdb_block() for _ in range(rrdb_student_config["trunk_size"])])
+    self._first_conv = convolution(filters=64)
+    self._upsample_layers = {
+        "upsample_%d" % index: conv_transpose(filters=growth_channels)
+        for index in range(1, self._scale_factor)}
+    key = "upsample_%d" % self._scale_factor
+    self._upsample_layers[key] = conv_transpose(filters=3)
+    self._conv_last = depthwise_conv()
+    self._lrelu = tf.keras.layers.LeakyReLU(alpha=0.2)
+
+  def call(self, inputs):
+    return self.unsigned_call(inputs)
+
+  def unsigned_call(self, inputs):
+    residual_start = self._first_conv(inputs)
+    intermediate = residual_start + self._rrdb_trunk(residual_start)
+    for layer_name in self._upsample_layers:
+      intermediate = self._lrelu(
+          self._upsample_layers[layer_name](intermediate))
+    out = self._conv_last(intermediate)
+    return out
diff --git a/modules/GSOC/E3_Distill_ESRGAN/libs/models/student_vgg.py b/modules/GSOC/E3_Distill_ESRGAN/libs/models/student_vgg.py
new file mode 100644
index 0000000000000000000000000000000000000000..b7a3fac5398653a243393b413117f9790259237b
--- /dev/null
+++ b/modules/GSOC/E3_Distill_ESRGAN/libs/models/student_vgg.py
@@ -0,0 +1,57 @@
+""" Student Network with VGG Architecture """
+import tensorflow as tf
+from libs import settings
+from libs.models import abstract
+from functools import partial
+
+
+class VGGStudent(abstract.Model):
+  def init(self):
+    sett = settings.Settings(use_student_settings=True)
+    model_args = sett["student_config"]["vgg_student"]
+    self._scale_factor = sett["scale_factor"]
+    self._scale_value = sett["scale_value"]
+    depth = model_args["trunk_depth"]  # Minimum 2 for scale factor of 4
+    depthwise_convolution = partial(
+        tf.keras.layers.DepthwiseConv2D,
+        kernel_size=[3, 3],
+        padding="same",
+        use_bias=model_args["use_bias"])
+    conv_transpose = partial(
+        tf.keras.layers.Conv2DTranspose,
+        kernel_size=[3, 3],
+        strides=self._scale_value,
+        padding="same")
+    convolution = partial(
+        tf.keras.layers.Conv2D,
+        kernel_size=[3, 3],
+        strides=[1, 1],
+        use_bias=model_args["use_bias"],
+        padding="same")
+    trunk_depth = depth - self._scale_factor
+    self._conv_layers = {
+        "conv_%d" % index: depthwise_convolution()
+        for index in range(1, trunk_depth + 1)}
+
+    self._upsample_layers = {
+        "upsample_%d" % index: conv_transpose(filters=32)
+        for index in range(1, self._scale_factor)}
+    self._last_layer = conv_transpose(filters=3)
+
+  @tf.function(
+      input_signature=[
+          tf.TensorSpec(
+              shape=[None, 180, 270, 3],  # For 720x1080 images
+              dtype=tf.float32)])
+  def call(self, inputs):
+    return self.unsigned_call(self, inputs)
+
+  def unsigned_call(self, inputs):
+    intermediate = inputs
+    for layer_name in self._conv_layers:
+      intermediate = self._conv_layers[layer_name](intermediate)
+      intermediate = tf.keras.layers.LeakyReLU(alpha=0.2)(intermediate)
+    for layer_name in self._upsample_layers:
+      intermediate = self._upsample_layers[layer_name](intermediate)
+      intermediate = tf.keras.layers.LeakyReLU(alpha=0.2)(intermediate)
+    return self._last_layer(intermediate)
diff --git a/modules/GSOC/E3_Distill_ESRGAN/libs/models/teacher.py b/modules/GSOC/E3_Distill_ESRGAN/libs/models/teacher.py
new file mode 100644
index 0000000000000000000000000000000000000000..2c2b94a753d248dc9e105dc8380fb1339b932cfa
--- /dev/null
+++ b/modules/GSOC/E3_Distill_ESRGAN/libs/models/teacher.py
@@ -0,0 +1,8 @@
+""" Module to load Teacher Models from Teacher Directory """
+import os
+import sys
+from libs import teacher_imports
+from lib import settings
+settings.Settings("%s/config/config.yaml" % teacher_imports.TEACHER_DIR)
+from lib.model import VGGArch as discriminator
+from lib.model import RRDBNet as generator
diff --git a/modules/GSOC/E3_Distill_ESRGAN/libs/settings.py b/modules/GSOC/E3_Distill_ESRGAN/libs/settings.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ef1b11cfc6e2cd44883270f8227cb85690e9650
--- /dev/null
+++ b/modules/GSOC/E3_Distill_ESRGAN/libs/settings.py
@@ -0,0 +1,71 @@
+""" Module containing settings and training statistics file handler."""
+import os
+import yaml
+
+
+def singleton(cls):
+  instances = {}
+
+  def getinstance(*args, **kwargs):
+    distill_config = kwargs.get("use_student_settings", "")
+    key = cls.__name__
+    if distill_config:
+      key = "%s_student" % (cls.__name__)
+    if key not in instances:
+      instances[key] = cls(*args, **kwargs)
+    return instances[key]
+  return getinstance
+
+
+@singleton
+class Settings(object):
+  """ Settings class to handle yaml config files """
+
+  def __init__(
+      self,
+      filename="config/config.yaml",
+      use_student_settings=False):
+
+    self.__path = os.path.abspath(filename)
+
+  @property
+  def path(self):
+    return os.path.dirname(self.__path)
+
+  def __getitem__(self, index):
+    with open(self.__path, "r") as file_:
+      return yaml.load(file_.read(), Loader=yaml.FullLoader)[index]
+
+  def get(self, index, default=None):
+    with open(self.__path, "r") as file_:
+      return yaml.load(
+          file_.read(),
+          Loader=yaml.FullLoader).get(
+          index,
+          default)
+
+
+class Stats(object):
+  """ Class to handle Training Statistics File """
+
+  def __init__(self, filename="stats.yaml"):
+    if os.path.exists(filename):
+      with open(filename, "r") as file_:
+        self.__data = yaml.load(file_.read(), Loader=yaml.FullLoader)
+    else:
+      self.__data = {}
+    self.file = filename
+
+  def get(self, index, default=None):
+    return self.__data.get(index, default)
+
+  def __getitem__(self, index):
+    return self.__data[index]
+
+  def __repr__(self):
+    return "Stats(" + str(self.__data) + ")"
+
+  def __setitem__(self, index, data):
+    self.__data[index] = data
+    with open(self.file, "w") as file_:
+      yaml.dump(self.__data, file_, default_flow_style=False)
diff --git a/modules/GSOC/E3_Distill_ESRGAN/libs/teacher_imports.py b/modules/GSOC/E3_Distill_ESRGAN/libs/teacher_imports.py
new file mode 100644
index 0000000000000000000000000000000000000000..3cb7348db21cbcaa8daacb156e18e91e643d851e
--- /dev/null
+++ b/modules/GSOC/E3_Distill_ESRGAN/libs/teacher_imports.py
@@ -0,0 +1,8 @@
+import sys
+import os
+from libs import settings
+_teacher_directory = settings.Settings(use_student_settings=True)[
+    "teacher_directory"]
+TEACHER_DIR = os.path.abspath(_teacher_directory)
+if _teacher_directory not in sys.path:
+  sys.path.insert(0, TEACHER_DIR)
diff --git a/modules/GSOC/E3_Distill_ESRGAN/libs/train.py b/modules/GSOC/E3_Distill_ESRGAN/libs/train.py
new file mode 100644
index 0000000000000000000000000000000000000000..c0cefb8dcd29bc96e215e5ebf611946be1fefbc8
--- /dev/null
+++ b/modules/GSOC/E3_Distill_ESRGAN/libs/train.py
@@ -0,0 +1,360 @@
+""" Trainer class to train student network to compress ESRGAN """
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from absl import logging
+from libs import dataset
+from libs import settings
+from libs import utils
+import tensorflow as tf
+
+
+class Trainer(object):
+  """Trainer Class for Knowledge Distillation of ESRGAN"""
+
+  def __init__(
+          self,
+          teacher,
+          discriminator,
+          summary_writer,
+          summary_writer_2=None,
+          model_dir="",
+          data_dir="",
+          strategy=None):
+    """
+      Args:
+        teacher: Keras Model of pre-trained teacher generator.
+                 (Generator of ESRGAN)
+        discriminator: Keras Model of pre-trained teacher discriminator.
+                       (Discriminator of ESRGAN)
+        summary_writer: tf.summary.SummaryWriter object for writing
+                         summary for Tensorboard.
+        data_dir: Location of the stored dataset.
+        raw_data: Indicate if data_dir contains Raw Data or TFRecords.
+        model_dir: Location to store checkpoints and SavedModel directory.
+    """
+    self.teacher_generator = teacher
+    self.teacher_discriminator = discriminator
+    self.teacher_settings = settings.Settings(use_student_settings=False)
+    self.student_settings = settings.Settings(use_student_settings=True)
+    self.model_dir = model_dir
+    self.strategy = strategy
+    self.train_args = self.student_settings["train"]
+    self.batch_size = self.teacher_settings["batch_size"]
+    self.hr_size = self.student_settings["hr_size"]
+    self.lr_size = tf.unstack(self.hr_size)[:-1]
+    self.lr_size.append(tf.gather(self.hr_size, len(self.hr_size) - 1) * 4)
+    self.lr_size = tf.stack(self.lr_size) // 4
+    self.summary_writer = summary_writer
+    self.summary_writer_2 = summary_writer_2
+    # Loading TFRecord Dataset
+    self.dataset = dataset.load_dataset(
+        data_dir,
+        lr_size=self.lr_size,
+        hr_size=self.hr_size)
+    self.dataset = (
+        self.dataset.repeat()
+        .batch(self.batch_size, drop_remainder=True)
+        .prefetch(1024))
+    self.dataset = iter(self.strategy.experimental_distribute_dataset(
+        self.dataset))
+    # Reloading Checkpoint from Phase 2 Training of ESRGAN
+    checkpoint = tf.train.Checkpoint(
+      G=self.teacher_generator,
+      D=self.teacher_discriminator)
+    utils.load_checkpoint(
+        checkpoint,
+        "phase_2",
+        basepath=model_dir,
+        use_student_settings=False)
+
+  def train_comparative(self, student, export_only=False):
+    """
+      Trains the student using a comparative loss function (Mean Squared Error)
+      based on the output of Teacher.
+      Args:
+        student: Keras model of the student.
+    """
+    train_args = self.train_args["comparative"]
+    total_steps = train_args["num_steps"]
+    decay_rate = train_args["decay_rate"]
+    decay_steps = train_args["decay_steps"]
+    optimizer = tf.optimizers.Adam()
+    checkpoint = tf.train.Checkpoint(
+        student_generator=student,
+        student_optimizer=optimizer)
+    status = utils.load_checkpoint(
+        checkpoint,
+        "comparative_checkpoint",
+        basepath=self.model_dir,
+        use_student_settings=True)
+    if export_only:
+      return
+    loss_fn = tf.keras.losses.MeanSquaredError(reduction="none")
+    metric_fn = tf.keras.metrics.Mean()
+    student_psnr = tf.keras.metrics.Mean()
+    teacher_psnr = tf.keras.metrics.Mean()
+
+    def step_fn(image_lr, image_hr):
+      """
+        Function to be replicated among the worker nodes
+        Args:
+          image_lr: Distributed Batch of Low Resolution Images
+          image_hr: Distributed Batch of High Resolution Images
+      """
+      with tf.GradientTape() as tape:
+        teacher_fake = self.teacher_generator.unsigned_call(image_lr)
+        student_fake = student.unsigned_call(image_lr)
+        student_fake = tf.clip_by_value(student_fake, 0, 255)
+        teacher_fake = tf.clip_by_value(teacher_fake, 0, 255)
+        image_hr = tf.clip_by_value(image_hr, 0, 255)
+        student_psnr(tf.reduce_mean(tf.image.psnr(student_fake, image_hr, max_val=256.0)))
+        teacher_psnr(tf.reduce_mean(tf.image.psnr(teacher_fake, image_hr, max_val=256.0)))
+        loss = utils.pixelwise_mse(teacher_fake, student_fake)
+        loss = tf.reduce_mean(loss) * (1.0 / self.batch_size)
+        metric_fn(loss)
+      student_vars = list(set(student.trainable_variables))
+      gradient = tape.gradient(loss, student_vars)
+      train_op = optimizer.apply_gradients(
+          zip(gradient, student_vars))
+      with tf.control_dependencies([train_op]):
+        return tf.cast(optimizer.iterations, tf.float32)
+
+    @tf.function
+    def train_step(image_lr, image_hr):
+      """
+        In Graph Function to assign trainer function to
+        replicate among worker nodes.
+        Args:
+          image_lr: Distributed batch of Low Resolution Images
+          image_hr: Distributed batch of High Resolution Images
+      """
+      distributed_metric = self.strategy.experimental_run_v2(
+          step_fn, args=(image_lr, image_hr))
+      mean_metric = self.strategy.reduce(
+          tf.distribute.ReduceOp.MEAN, distributed_metric, axis=None)
+      return mean_metric
+    logging.info("Starting comparative loss training")
+    while True:
+      image_lr, image_hr = next(self.dataset)
+      step = train_step(image_lr, image_hr)
+      if step >= total_steps:
+        return
+      for _step in decay_steps.copy():
+        if step >= _step:
+          decay_steps.pop(0)
+          logging.debug("Reducing Learning Rate by: %f" % decay_rate)
+          optimizer.learning_rate.assign(optimizer.learning_rate * decay_rate)
+      if status:
+        status.assert_consumed()
+        logging.info("Checkpoint loaded successfully")
+        status = None
+      # Writing Summary
+      with self.summary_writer.as_default():
+        tf.summary.scalar("student_loss", metric_fn.result(), step=optimizer.iterations)
+        tf.summary.scalar("mean_psnr", student_psnr.result(), step=optimizer.iterations)
+      if self.summary_writer_2:
+        with self.summary_writer_2.as_default():
+          tf.summary.scalar("mean_psnr", teacher_psnr.result(), step=optimizer.iterations)
+
+      if not step % train_args["print_step"]:
+        logging.info("[COMPARATIVE LOSS] Step: %s\tLoss: %s" %
+                     (step, metric_fn.result()))
+      # Saving Checkpoint
+      if not step % train_args["checkpoint_step"]:
+        utils.save_checkpoint(
+            checkpoint,
+            "comparative_checkpoint",
+            basepath=self.model_dir,
+            use_student_settings=True)
+
+  def train_adversarial(self, student, export_only=False):
+    """
+      Train the student adversarially using a joint loss between teacher discriminator
+      and mean squared error between the output of the student-teacher generator pair.
+      Args:
+        student: Keras model of the student to train.
+    """
+    train_args = self.train_args["adversarial"]
+    total_steps = train_args["num_steps"]
+    decay_steps = train_args["decay_steps"]
+    decay_rate = train_args["decay_rate"]
+
+    lambda_ = train_args["lambda"]
+    alpha = train_args["alpha"]
+
+    generator_metric = tf.keras.metrics.Mean()
+    discriminator_metric = tf.keras.metrics.Mean()
+    generator_optimizer = tf.optimizers.Adam(learning_rate=train_args["initial_lr"])
+    dummy_optimizer = tf.optimizers.Adam()
+    discriminator_optimizer = tf.optimizers.Adam(learning_rate=train_args["initial_lr"])
+    checkpoint = tf.train.Checkpoint(
+        student_generator=student,
+        student_optimizer=generator_optimizer,
+        teacher_optimizer=discriminator_optimizer,
+        teacher_generator=self.teacher_generator,
+        teacher_discriminator=self.teacher_discriminator)
+
+    status = None
+    if not utils.checkpoint_exists(
+        names="adversarial_checkpoint",
+        basepath=self.model_dir,
+        use_student_settings=True):
+      if export_only:
+        raise ValueError("Adversarial checkpoints not found")
+    else:
+      status = utils.load_checkpoint(
+          checkpoint,
+          "adversarial_checkpoint",
+          basepath=self.model_dir,
+        use_student_settings=True)
+    if export_only:
+      if not status:
+        raise ValueError("No Checkpoint Loaded!")
+      return
+    ra_generator = utils.RelativisticAverageLoss(
+        self.teacher_discriminator, type_="G")
+    ra_discriminator = utils.RelativisticAverageLoss(
+        self.teacher_discriminator, type_="D")
+    perceptual_loss = utils.PerceptualLoss(
+        weights="imagenet",
+        input_shape=self.hr_size,
+        loss_type="L2")
+    student_psnr = tf.keras.metrics.Mean()
+    teacher_psnr = tf.keras.metrics.Mean()
+
+    def step_fn(image_lr, image_hr):
+      """
+        Function to be replicated among the worker nodes
+        Args:
+          image_lr: Distributed Batch of Low Resolution Images
+          image_hr: Distributed Batch of High Resolution Images
+      """
+      with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
+        teacher_fake = self.teacher_generator.unsigned_call(image_lr)
+        logging.debug("Fetched Fake: Teacher")
+        teacher_fake = tf.clip_by_value(teacher_fake, 0, 255)
+        student_fake = student.unsigned_call(image_lr)
+        logging.debug("Fetched Fake: Student")
+        student_fake = tf.clip_by_value(student_fake, 0, 255)
+        image_hr = tf.clip_by_value(image_hr, 0, 255)
+        psnr = tf.image.psnr(student_fake, image_hr, max_val=255.0)
+        student_psnr(tf.reduce_mean(psnr))
+        psnr = tf.image.psnr(teacher_fake, image_hr, max_val=255.0)
+        teacher_psnr(tf.reduce_mean(psnr))
+        mse_loss = utils.pixelwise_mse(teacher_fake, student_fake)
+
+        image_lr = utils.preprocess_input(image_lr)
+        image_hr = utils.preprocess_input(image_hr)
+        student_fake = utils.preprocess_input(student_fake)
+        teacher_fake = utils.preprocess_input(teacher_fake)
+
+        student_ra_loss = ra_generator(teacher_fake, student_fake)
+        logging.debug("Relativistic Average Loss: Student")
+        discriminator_loss = ra_discriminator(teacher_fake, student_fake)
+        discriminator_metric(discriminator_loss)
+        discriminator_loss = tf.reduce_mean(
+            discriminator_loss) * (1.0 / self.batch_size)
+        logging.debug("Relativistic Average Loss: Teacher")
+        percep_loss = perceptual_loss(image_hr, student_fake)
+        generator_loss = lambda_ * percep_loss + alpha * student_ra_loss + (1 - alpha) * mse_loss
+        generator_metric(generator_loss)
+        logging.debug("Calculated Joint Loss for Generator")
+        generator_loss = tf.reduce_mean(
+            generator_loss) * (1.0 / self.batch_size)
+      generator_gradient = gen_tape.gradient(
+          generator_loss, student.trainable_variables)
+      logging.debug("calculated gradient: generator")
+      discriminator_gradient = disc_tape.gradient(
+          discriminator_loss, self.teacher_discriminator.trainable_variables)
+      logging.debug("calculated gradient: discriminator")
+      generator_op = generator_optimizer.apply_gradients(
+          zip(generator_gradient, student.trainable_variables))
+      logging.debug("applied generator gradients")
+      discriminator_op = discriminator_optimizer.apply_gradients(
+          zip(discriminator_gradient, self.teacher_discriminator.trainable_variables))
+      logging.debug("applied discriminator gradients")
+
+      with tf.control_dependencies(
+              [generator_op, discriminator_op]):
+        return tf.cast(discriminator_optimizer.iterations, tf.float32)
+
+    @tf.function
+    def train_step(image_lr, image_hr):
+      """
+        In Graph Function to assign trainer function to
+        replicate among worker nodes.
+        Args:
+          image_lr: Distributed batch of Low Resolution Images
+          image_hr: Distributed batch of High Resolution Images
+      """
+      distributed_metric = self.strategy.experimental_run_v2(
+          step_fn,
+          args=(image_lr, image_hr))
+      mean_metric = self.strategy.reduce(
+          tf.distribute.ReduceOp.MEAN,
+          distributed_metric, axis=None)
+      return mean_metric
+
+    logging.info("Starting Adversarial Training")
+
+    while True:
+      image_lr, image_hr = next(self.dataset)
+      step = train_step(image_lr, image_hr)
+      if status:
+        status.assert_consumed()
+        logging.info("Loaded Checkpoint successfully!")
+        status = None
+      if not isinstance(decay_steps, list):
+        if not step % decay_steps:
+          logging.debug("Decaying Learning Rate by: %s" % decay_rate)
+          generator_optimizer.learning_rate.assign(
+              generator_optimizer.learning_rate * decay_rate)
+          discriminator_optimizer.learning_rate.assign(
+              discriminator_optimizer.learning_rate * decay_rate)
+      else:
+        for decay_step in decay_steps.copy():
+          if decay_step <= step:
+            decay_steps.pop(0)
+            logging.debug("Decaying Learning Rate by: %s" % decay_rate)
+            generator_optimizer.learning_rate.assign(
+                generator_optimizer.learning_rate * decay_rate)
+            discriminator_optimizer.learning_rate.assign(
+                discriminator_optimizer.learning_rate * decay_rate)
+      # Setting Up Logging
+      with self.summary_writer.as_default():
+        tf.summary.scalar(
+            "student_loss",
+            generator_metric.result(),
+            step=discriminator_optimizer.iterations)
+        tf.summary.scalar(
+            "teacher_discriminator_loss",
+            discriminator_metric.result(),
+            step=discriminator_optimizer.iterations)
+        tf.summary.scalar(
+            "mean_psnr",
+            student_psnr.result(),
+            step=discriminator_optimizer.iterations)
+      if self.summary_writer_2:
+        with self.summary_writer_2.as_default():
+          tf.summary.scalar(
+              "mean_psnr",
+              teacher_psnr.result(),
+              step=discriminator_optimizer.iterations)
+      if not step % train_args["print_step"]:
+        logging.info(
+            "[ADVERSARIAL] Step: %s\tStudent Loss: %s\t"
+            "Discriminator Loss: %s" %
+            (step, generator_metric.result(),
+             discriminator_metric.result()))
+      # Setting Up Checkpoint
+      if not step % train_args["checkpoint_step"]:
+        utils.save_checkpoint(
+            checkpoint,
+            "adversarial_checkpoint",
+            basepath=self.model_dir,
+            use_student_settings=True)
+      if step >= total_steps:
+        return
diff --git a/modules/GSOC/E3_Distill_ESRGAN/libs/utils.py b/modules/GSOC/E3_Distill_ESRGAN/libs/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..edddac0e538a6eccca30543e97cfa1b5b97912a1
--- /dev/null
+++ b/modules/GSOC/E3_Distill_ESRGAN/libs/utils.py
@@ -0,0 +1,65 @@
+""" Module containing utility functions for the trainer """
+import os
+
+from absl import logging
+from libs import settings
+import tensorflow as tf
+# Loading utilities from ESRGAN
+from lib.utils import assign_to_worker
+from lib.utils import PerceptualLoss
+from lib.utils import RelativisticAverageLoss
+from lib.utils import SingleDeviceStrategy
+from lib.utils import preprocess_input
+
+def checkpoint_exists(names, basepath="", use_student_settings=False):
+  sett = settings.Settings(use_student_settings=use_student_settings)
+  if tf.nest.is_nested(names):
+    if isinstance(names, dict):
+      return False
+  else:
+    names = [names]
+  values = []
+  for name in names:
+    dir_ = os.path.join(basepath, sett["checkpoint_path"][name], "checkpoint")
+    values.append(tf.io.gfile.exists(dir_))
+  return any(values)
+
+
+def save_checkpoint(checkpoint, name, basepath="", use_student_settings=False):
+  """ Saves Checkpoint
+      Args:
+        checkpoint: tf.train.Checkpoint object to save.
+        name: name of the checkpoint to save.
+        basepath: base directory where checkpoint should be saved
+        student: boolean to indicate if settings of the student should be used.
+  """
+  sett = settings.Settings(use_student_settings=use_student_settings)
+  dir_ = os.path.join(basepath, sett["checkpoint_path"][name])
+  logging.info("Saving checkpoint: %s Path: %s" % (name, dir_))
+  prefix = os.path.join(dir_, os.path.basename(dir_))
+  checkpoint.save(file_prefix=prefix)
+
+
+def load_checkpoint(checkpoint, name, basepath="", use_student_settings=False):
+  """ Restores Checkpoint
+      Args:
+        checkpoint: tf.train.Checkpoint object to restore.
+        name: name of the checkpoint to restore.
+        basepath: base directory where checkpoint is located.
+        student: boolean to indicate if settings of the student should be used.
+  """
+  sett = settings.Settings(use_student_settings=use_student_settings)
+  dir_ = os.path.join(basepath, sett["checkpoint_path"][name])
+  if tf.io.gfile.exists(os.path.join(dir_, "checkpoint")):
+    logging.info("Found checkpoint: %s Path: %s" % (name, dir_))
+    status = checkpoint.restore(tf.train.latest_checkpoint(dir_))
+    return status
+  logging.info("No Checkpoint found for %s" % name)
+
+# Losses
+
+
+def pixelwise_mse(y_true, y_pred):
+  mean_squared_error = tf.reduce_mean(tf.reduce_mean(
+      (y_true - y_pred)**2, axis=0))
+  return mean_squared_error
diff --git a/modules/GSOC/E3_Distill_ESRGAN/main.py b/modules/GSOC/E3_Distill_ESRGAN/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..4316c00ca9a47b299545191ab0e532f9d1f755be
--- /dev/null
+++ b/modules/GSOC/E3_Distill_ESRGAN/main.py
@@ -0,0 +1,156 @@
+"""
+  Compressing GANs using Knowledge Distillation.
+  Teacher GAN: ESRGAN (https://github.com/captain-pool/E2_ESRGAN)
+
+Citation:
+  @article{DBLP:journals/corr/abs-1902-00159,
+    author    = {Angeline Aguinaldo and
+    Ping{-}Yeh Chiang and
+    Alexander Gain and
+    Ameya Patil and
+    Kolten Pearson and
+    Soheil Feizi},
+    title     = {Compressing GANs using Knowledge Distillation},
+    journal   = {CoRR},
+    volume    = {abs/1902.00159},
+    year      = {2019},
+    url       = {http://arxiv.org/abs/1902.00159},
+    archivePrefix = {arXiv},
+    eprint    = {1902.00159},
+    timestamp = {Tue, 21 May 2019 18:03:39 +0200},
+    biburl    = {https://dblp.org/rec/bib/journals/corr/abs-1902-00159},
+    bibsource = {dblp computer science bibliography, https://dblp.org}
+  }
+"""
+import os
+from absl import logging
+import argparse
+from libs import lazy_loader
+from libs import model
+from libs import settings
+import tensorflow as tf
+
+
+def train_and_export(**kwargs):
+  """ Train and Export Compressed ESRGAN
+      Args:
+        config: path to config file.
+        logdir: path to logging directory
+        modeldir: Path to store the checkpoints and exported model.
+        datadir: Path to custom data directory.
+        manual: Boolean to indicate if `datadir` contains Raw Files(True) / TFRecords (False)
+  """
+  lazy = lazy_loader.LazyLoader()
+
+  student_settings = settings.Settings(
+      kwargs["config"], use_student_settings=True)
+
+  # Lazy importing dependencies from teacher
+  lazy.import_("teacher_imports", parent="libs", return_=False)
+  lazy.import_("teacher", parent="libs.models", return_=False)
+  lazy.import_("train", parent="libs", return_=False)
+  lazy.import_("utils", parent="libs", return_=False)
+  globals().update(lazy.import_dict)
+  tf.random.set_seed(10)
+  teacher_settings = settings.Settings(
+      student_settings["teacher_config"], use_student_settings=False)
+  stats = settings.Stats(os.path.join(student_settings.path, "stats.yaml"))
+  strategy = utils.SingleDeviceStrategy()
+
+  if kwargs["tpu"]:
+    cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
+        kwargs["tpu"])
+    tf.config.experimental_connect_to_host(cluster_resolver.get_master())
+    tf.tpu.experimental.initialize_tpu_system(cluster_resolver)
+    strategy = tf.distribute.experimental.TPUStrategy(cluster_resolver)
+
+  device_name = utils.assign_to_worker(kwargs["tpu"])
+
+  with tf.device(device_name), strategy.scope():
+    summary_writer = tf.summary.create_file_writer(
+        os.path.join(kwargs["logdir"], "student"))
+    teacher_summary_writer = tf.summary.create_file_writer(
+        os.path.join(kwargs["logdir"], "teacher"))
+
+    teacher_generator = teacher.generator(out_channel=3, first_call=False)
+    teacher_discriminator = teacher.discriminator(
+        batch_size=teacher_settings["batch_size"])
+
+    student_generator = (
+        model.Registry
+        .models[student_settings["student_network"]]())
+
+    hr_size = tf.cast(tf.convert_to_tensor([1] + student_settings['hr_size']), tf.float32)
+    lr_size = tf.cast(hr_size * tf.convert_to_tensor([1, 1/4, 1/4, 1]), tf.int32)
+
+    logging.debug("Initializing Convolutions")
+    student_generator.unsigned_call(tf.random.normal(lr_size))
+
+    trainer = train.Trainer(
+        teacher_generator,
+        teacher_discriminator,
+        summary_writer,
+        summary_writer_2=teacher_summary_writer,
+        model_dir=kwargs["modeldir"],
+        data_dir=kwargs["datadir"],
+        strategy=strategy)
+    phase_name = None
+    if kwargs["type"].lower().startswith("comparative"):
+      trainer.train_comparative(
+          student_generator,
+          export_only=stats.get("comparative") or kwargs["export_only"])
+      if not kwargs["export_only"]:
+        stats["comparative"] = True
+    elif kwargs["type"].lower().startswith("adversarial"):
+      trainer.train_adversarial(
+          student_generator,
+          export_only=stats.get("adversarial") or kwargs["export_only"])
+      if not kwargs["export_only"]:
+        stats["adversarial"] = True
+  # Tracing Graph to put input signature
+  _ = student_generator.predict(
+      tf.random.normal([1, 180, 320, 3]))
+  tf.saved_model.save(
+      student_generator,
+      os.path.join(
+          kwargs["modeldir"],
+          "compressed_esrgan"))
+
+
+if __name__ == "__main__":
+  parser = argparse.ArgumentParser()
+  parser.add_argument("--tpu", default=None, help="Name of the TPU to use")
+  parser.add_argument("--logdir", default=None, help="Path to log directory")
+  parser.add_argument(
+      "--export_only",
+      default=False,
+      action="store_true",
+      help="Do not train, only export the model")
+  parser.add_argument(
+      "--config",
+      default="config/config.yaml",
+      help="path to config file")
+  parser.add_argument(
+      "--datadir",
+      default=None,
+      help="Path to custom data directory containing sharded TFRecords")
+  parser.add_argument(
+      "--modeldir",
+      default=None,
+      help="directory to store checkpoints and SavedModel")
+  parser.add_argument(
+      "--type",
+      default=None,
+      help="Train Student 'adversarial'-ly / 'comparative'-ly")
+  parser.add_argument(
+      "--verbose",
+      "-v",
+      default=0,
+      action="count",
+      help="Increases Verbosity. Repeat to increase more")
+
+  FLAGS, unparsed = parser.parse_known_args()
+  log_levels = [logging.FATAL, logging.WARNING, logging.INFO, logging.DEBUG]
+  log_level = log_levels[min(FLAGS.verbose, len(log_levels) - 1)]
+  logging.set_verbosity(log_level)
+  train_and_export(**vars(FLAGS))
diff --git a/modules/GSOC/E3_Distill_ESRGAN/train.sh b/modules/GSOC/E3_Distill_ESRGAN/train.sh
new file mode 100755
index 0000000000000000000000000000000000000000..0d687d0bdcff1d813ee71b829cfb9e66bd857ea6
--- /dev/null
+++ b/modules/GSOC/E3_Distill_ESRGAN/train.sh
@@ -0,0 +1,11 @@
+#! /bin/bash
+# $BASE contains the path to the cloud bucket.
+# $TPU_NAME contains the name of the cloud bucket.
+gsutil -m rm -r $BASE/distillation/logdir/** &>/dev/null
+sudo tensorboard --logdir $BASE/distillation/logdir --port 80 &> $HOME/logdir/access.log &
+python3 main.py --logdir $BASE/distillation/logdir \
+	--datadir $BASE/datadir/coco2014_esrgan\
+	--tpu $TPU_NAME \
+	--modeldir $BASE/distillation/modeldir \
+	--type "comparative" -vvv &> $HOME/logdir/main.log &
+
diff --git a/modules/GSOC/E3_Streamer/README.md b/modules/GSOC/E3_Streamer/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..a09a46250a459b9223aa389f83044065957741b0
--- /dev/null
+++ b/modules/GSOC/E3_Streamer/README.md
@@ -0,0 +1,28 @@
+## Super Resolution Video Player
+With success from Distilling ESRGAN to produce high resolution images in milliseconds, (0.3 seconds to be specific). This
+application tests the model on the extreme last level. This proof of concept video player takes in normal videos and downscales the frames
+bicubically and then perform super resolution on the video frames on CPU and displays them in realtime.
+
+Here's a sample.
+
+Video used: Slash's Anastatia Live in Sydney. (https://www.youtube.com/watch?v=bC8EmPA6H6g)
+
+![sr_player](https://user-images.githubusercontent.com/13994201/63652070-cdd88480-c779-11e9-8890-8dc379755926.png)
+
+Usage
+-----------
+
+```bash
+$ python3 player.py --file /path/to/video/file \
+                    --saved_model /path/to/saved_model
+```
+
+Issues
+-------
+Check the issue tracker with "Super Resolution Player" as issue label or
+[click here](https://github.com/captain-pool/GSOC/issues?q=is%3Aissue+is%3Aopen+label%3A%22Super+Resolution+Player%22)
+
+Further Concepts
+------------------
+Building an online video streamer sending low resolution videos running on Super Resolved video on the client side.
+Check the [experimental](experimental) folder for the codes.
diff --git a/modules/GSOC/E3_Streamer/experimental/datapacket.proto b/modules/GSOC/E3_Streamer/experimental/datapacket.proto
new file mode 100644
index 0000000000000000000000000000000000000000..acff9cafbb0b6e6f6e67e1347c7d9601f0c5ef54
--- /dev/null
+++ b/modules/GSOC/E3_Streamer/experimental/datapacket.proto
@@ -0,0 +1,15 @@
+syntax = "proto2";
+
+// Protocol Buffer for Video
+// Packets
+message Metadata{
+  required int32 duration = 1;
+  required int32 video_fps = 2;
+  required int32 audio_fps = 3;
+  repeated int32 dimension = 4;
+}
+
+message FramePacket{
+  repeated bytes video_frames = 3;
+  optional bytes audio_chunk = 4;
+}
diff --git a/modules/GSOC/E3_Streamer/experimental/stream_client.py b/modules/GSOC/E3_Streamer/experimental/stream_client.py
new file mode 100644
index 0000000000000000000000000000000000000000..3d3e046b38e23fb7501da3030a5271691d6771a7
--- /dev/null
+++ b/modules/GSOC/E3_Streamer/experimental/stream_client.py
@@ -0,0 +1,150 @@
+from __future__ import print_function
+import io
+import socket
+import threading
+import time
+
+from absl import logging
+import argparse
+import queue
+import numpy as np
+import datapacket_pb2
+import pyaudio as pya
+import pygame
+import tensorflow as tf
+from pygame.locals import *  # pylint: disable=wildcard-import
+pygame.init()
+
+
+class Client(object):
+  SYN = b'SYN'
+  SYNACK = b'SYN/ACK'
+  ACK = b'ACK'
+
+  def __init__(self, ip, port):
+    self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+    self._metadata = datapacket_pb2.Metadata()
+    self._audio_queue = queue.Queue()
+    self._video_queue = queue.Queue()
+    self._audio_thread = threading.Thread(
+        target=self.write_to_stream, args=(True,))
+    self._video_thread = threading.Thread(
+        target=self.write_to_stream, args=(False,))
+    self._running = False
+    self.tolerance = 30  # Higher Tolerance Higher Frame Rate
+    self._lock = threading.Lock()
+    pyaudio = pya.PyAudio()
+    self._audio_stream = pyaudio.open(
+        format=pya.paFloat32,
+        channels=2,
+        rate=44100,
+        output=True,
+        frames_per_buffer=1024)
+    self._socket.connect((ip, port))
+    self.fetch_metadata()
+
+  def readpacket(self, buffersize=2**32):
+    buffer_ = io.BytesIO()
+    done = False
+    eof = False
+    while not done:
+      data = self._socket.recv(buffersize)
+      if data:
+        logging.debug("Reading Stream: Buffer Size: %d" % buffersize)
+      if data[-5:] == b'<EOF>':
+        logging.debug("Found EOF")
+        data = data[:-5]
+        eof = True
+        done = True
+      if data[-5:] == b'<END>':
+        logging.debug("Find End of Message")
+        data = data[:-5]
+        done = True
+      buffer_.write(data)
+    buffer_.seek(0)
+    return buffer_.read(), eof
+
+  def fetch_metadata(self):
+    logging.debug("Sending SYN...")
+    self._socket.send(b'SYN')
+    logging.debug("Sent Syn. Awating Metadata")
+    data, eof = self.readpacket(8)
+    self._metadata.ParseFromString(data)
+    dimension = self._metadata.dimension
+    self.screen = pygame.display.set_mode(dimension[:-1][::-1], 0, 32)
+
+  def fetch_video(self):
+    data, eof = self.readpacket()
+    framedata = datapacket_pb2.FramePacket()
+    framedata.ParseFromString(data)
+    frames = []
+    for frame in framedata.video_frames:
+      frames.append(self.parse_frames(frame, False))
+    self._audio_queue.put(framedata.audio_chunk)
+    self._video_queue.put(frames)
+    return eof
+
+  def parse_frames(self, bytestring, superresolve=False):
+    frame = np.asarray(bytestring)
+    if superresolve:
+      # Perform super resolution here
+      pass
+    frame = tf.cast(tf.clip_by_value(frame, 0, 255), tf.float32)
+    return frame.numpy()
+
+  def start(self):
+    with self._lock:
+      self._running = True
+    if not self._audio_thread.isAlive():
+      self._audio_thread.start()
+    if not self._video_thread.isAlive():
+      self._video_thread.start()
+    self._socket.send(b'ACK')
+    while not self.fetch_video():
+      pass  # Wait till the end
+    self.wait_to_end()
+
+  def wait_to_end(self):
+    self._audio_thread.join()
+    self._video_thread.join()
+
+  def stop(self):
+    with self.lock:
+      self._running = False
+
+  def write_to_stream(self, isaudio=False):
+    while self._running:
+      try:
+        if isaudio:
+          if self._audio_queue.qsize() < 5:
+            continue
+          audio_chunk = self._audio_queue.get(timeout=10)
+          self._audio_stream.write(audio_chunk)
+        else:
+          if self._video_queue.qsize() < 5:
+            continue
+          for video_frame in self._video_queue.get(timeout=10):
+            video_frame = pygame.surfarray.make_surface(
+                np.rot90(np.fliplr(video_frame)))
+            self.screen.fill((0, 0, 2))
+            self.screen.blit(video_frame, (0, 0))
+            pygame.display.update()
+            time.sleep(
+                (1000 / self._metadata.video_fps - self.tolerance) / 1000)
+      except StopIteration:
+        pass
+
+
+if __name__ == "__main__":
+  parser = argparse.ArgumentParser()
+  parser.add_argument(
+      "--server",
+      default="127.0.0.1",
+      help="Address of stream server.")
+  parser.add_argument(
+      "--port",
+      default=8001,
+      help="Port of the server to connect to.")
+  logging.set_verbosity(logging.DEBUG)
+  client = Client("127.0.0.1", 8001)
+  client.start()
diff --git a/modules/GSOC/E3_Streamer/experimental/stream_server.py b/modules/GSOC/E3_Streamer/experimental/stream_server.py
new file mode 100644
index 0000000000000000000000000000000000000000..b9aed69b57208914cf2c3df88373502035b5bea5
--- /dev/null
+++ b/modules/GSOC/E3_Streamer/experimental/stream_server.py
@@ -0,0 +1,103 @@
+import socket
+from absl import logging
+from functools import partial
+import threading
+import datapacket_pb2
+import tensorflow as tf
+from moviepy import editor
+
+
+class StreamClient(object):
+  SYN = b'SYN'
+  SYNACK = b'SYN/ACK'
+  ACK = b'ACK'
+
+  def __init__(self, client_socket, client_address, video_path):
+    self.video_object = editor.VideoFileClip(video_path)
+    self.audio_object = self.video_object.audio
+    dimension = list(self.video_object.get_frame(0).shape)
+    self.metadata = datapacket_pb2.Metadata(
+        duration=int(self.video_object.duration),
+        video_fps=int(self.video_object.fps),
+        audio_fps=int(self.audio_object.fps),
+        dimension=dimension)
+    self._client_socket = client_socket
+    self._client_address = client_address
+    self._video_iterator = self.video_object.iter_frames()
+    self._audio_iterator = self.audio_object.iter_chunks(self.metadata.audio_fps)
+    self.send_video()
+  def _handshake(self):
+    logging.debug("Awaiting Handshake")
+    data = self._client_socket.recv(128)
+    if data == StreamClient.SYN:
+      logging.debug("SYN Recieved. Sending Media Metadata")
+      num_bytes = self._client_socket.send(self.metadata.SerializeToString()+b'<END>')
+      logging.debug("Metadata sent: Num Bytes Written: %d" % num_bytes)
+      logging.debug("Awaiting ACK")
+      data = self._client_socket.recv(128)
+      logging.debug("Data Recieved")
+      if data == StreamClient.ACK:
+        logging.debug("ACK Recieved")
+        return True
+    return data
+
+  def _video_second(self):
+    def shrink_fn(image):
+      image = tf.convert_to_tensor(image)
+      return image.numpy().tostring()
+    frames = []
+    for _ in range(int(self.metadata.video_fps)):
+      frames.append(shrink_fn(next(self._video_iterator)))
+    return frames
+
+  def _fetch_video(self):
+    try:
+      audio = next(self._audio_iterator).astype("float32").tostring()
+      video = self._video_second()
+      frame_packet = datapacket_pb2.FramePacket(
+          video_frames=video,
+          audio_chunk=audio)
+      return frame_packet
+    except StopIteration:
+      pass
+
+  def send_video(self):
+    sent = False
+    while not sent:
+      handshake = self._handshake()
+      if handshake:
+        if handshake is not True:
+          logging.info("[%s] Says: %s" % (self._client_address, handshake))
+        else:
+          video_packet = self._fetch_video()
+          while video_packet:
+            num_bytes = self._client_socket.send(
+                video_packet.SerializeToString() + b'<END>')
+            video_packet = self._fetch_video()
+            logging.debug("Sending: %d" % num_bytes)
+          sent = True
+      self._client_socket.send(b'<EOF>')
+
+
+class Server(object):
+  def __init__(self, ip, port):
+    self._server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+    self._server_socket.bind((ip, port))
+    self._server_socket.listen()
+    self._client_template = partial(threading.Thread, target=StreamClient)
+
+  def run(self):
+    while True:
+      client_socket, client_addr = self._server_socket.accept()
+      client_addr = list(map(str, client_addr))
+      video_path = "/home/rick/video.mp4"
+      self._client_template(
+          args=(client_socket, client_addr, video_path)
+      ).start()
+      logging.info("[SERVER]: %s connected." % ":".join(client_addr))
+
+
+if __name__ == "__main__":
+  logging.set_verbosity(logging.DEBUG)
+  server = Server("127.0.0.1", 8001)
+  server.run()
diff --git a/modules/GSOC/E3_Streamer/player.py b/modules/GSOC/E3_Streamer/player.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d287f3f035544b9457c3466f0ff0bbc02377400
--- /dev/null
+++ b/modules/GSOC/E3_Streamer/player.py
@@ -0,0 +1,208 @@
+from absl import logging
+import argparse
+import tensorflow as tf
+import tensorflow_hub as hub
+import os
+from PIL import Image
+import multiprocessing
+from functools import partial
+import time
+import pyaudio as pya
+import threading
+import queue
+import numpy as np
+from moviepy import editor
+import pygame
+pygame.init()
+os.environ["TFHUB_DOWNLOAD_PROGRESS"] = "True"
+BUFFER_SIZE = 8
+
+
+class Player(object):
+  def __init__(self, videofile, tflite="", saved_model=""):
+    """
+      Player Class for the Video
+      Args:
+        videofile: Path to the video file
+        tflite: Path to the Super Resolution TFLite
+        saved_model: path to Super Resolution SavedModel
+    """
+    self.video = editor.VideoFileClip(videofile)
+    self.audio = self.video.audio
+    self.tolerance = 2.25  # Higher Tolerance Faster Video
+    self.running = False
+    self.interpreter = None
+    self.saved_model = None
+    if saved_model:
+      self.saved_model = hub.load(saved_model)
+    if tflite:
+      self.interpreter = tf.lite.Interpreter(model_path=tflite)
+      self.interpreter.allocate_tensors()
+      self.input_details = self.interpreter.get_input_details()
+      self.output_details = self.interpreter.get_output_details()
+    self.lock = threading.Lock()
+    self.audio_thread = threading.Thread(target=self.write_audio_stream)
+    self.video_thread = threading.Thread(target=self.write_video_stream)
+    self.video_iterator = self.video.iter_frames()
+    self.audio_iterator = self.audio.iter_chunks(int(self.audio.fps))
+    self.video_queue = queue.Queue()
+    self.audio_queue = queue.Queue()
+    pyaudio = pya.PyAudio()
+    issmallscreen = 1 if saved_model or tflite else 0.25
+    self.screen = pygame.display.set_mode(
+        (int(1280 * issmallscreen),
+         int(720 * issmallscreen)), 0, 32)
+    self.stream = pyaudio.open(
+        format=pya.paFloat32,
+        channels=2,
+        rate=44100,
+        output=True,
+        frames_per_buffer=1024)
+
+  def tflite_super_resolve(self, frame):
+    """
+      Super Resolve bicubically downsampled image frames
+      using the TFLite of the model.
+      Args:
+        frame: Image frame to scale up.
+    """
+    self.interpreter.set_tensor(self.input_details[0]['index'], frame)
+    self.interpreter.invoke()
+    frame = self.interpreter.get_tensor(self.output_details[0]['index'])
+    frame = tf.squeeze(tf.cast(tf.clip_by_value(frame, 0, 255), "uint8"))
+    return frame.numpy()
+
+  def saved_model_super_resolve(self, frame):
+    """
+      Super Resolve using exported SavedModel.
+      Args:
+        frames: Batch of Frames to Scale Up.
+    """
+    if self.saved_model:
+      start = time.time()
+      frame = self.saved_model.call(frame)
+      logging.debug("[SAVED_MODEL] Super Resolving Time: %f" % (time.time() - start))
+    logging.debug("Returning Modified Frames")
+    return np.squeeze(np.clip(frame.numpy(), 0, 255).astype("uint8"))
+
+  def video_second(self):
+    """
+      Fetch Video Frames for each second
+      and super resolve them accordingly.
+    """
+    frames = []
+    logging.debug("Fetching Frames")
+    start = time.time()
+    loop_time = time.time()
+    for _ in range(int(self.video.fps)):
+      logging.debug("Fetching Video Frame. %f" % (time.time() - loop_time))
+      loop_time = time.time()
+      frame = next(self.video_iterator)
+      frame = np.asarray(
+          Image.fromarray(frame)
+          .resize(
+              [1280 // 4, 720 // 4],
+              Image.BICUBIC), dtype="float32")
+
+      frames.append(tf.expand_dims(frame, 0))
+    logging.debug("Frame Fetching Time: %f" % (time.time() - start))
+    if self.interpreter and not self.saved_model:
+      resolution_fn = self.tflite_super_resolve
+    else:
+      resolution_fn = self.saved_model_super_resolve
+    start = time.time()
+    with multiprocessing.pool.ThreadPool(30) as pool:
+      frames = pool.map(resolution_fn, frames)
+    logging.debug("Fetched Frames. Time: %f" % (time.time() - start))
+    return frames
+
+  def fetch_video(self):
+    """
+      Fetches audio and video frames from the file.
+      And put them in player cache.
+    """
+    audio = next(self.audio_iterator)
+    video = self.video_second()
+    self.audio_queue.put(audio)
+    self.video_queue.put(video)
+
+  def write_audio_stream(self):
+    """
+      Write Audio Frames to default audio device.
+    """
+    try:
+      while self.audio_queue.qsize() < BUFFER_SIZE:
+        continue
+      while self.running:
+        audio = self.audio_queue.get(timeout=10)
+        self.stream.write(audio.astype("float32").tostring())
+    except BaseException:
+      raise
+
+  def write_video_stream(self):
+    """
+      Write Video frames to the player display.
+    """
+    try:
+      while self.video_queue.qsize() < BUFFER_SIZE:
+        continue
+      while self.running:
+        logging.info("Displaying Frame")
+        for video_frame in self.video_queue.get(timeout=10):
+          video_frame = pygame.surfarray.make_surface(
+              np.rot90(np.fliplr(video_frame)))
+          self.screen.fill((0, 0, 2))
+          self.screen.blit(video_frame, (0, 0))
+          pygame.display.update()
+          time.sleep((1000 / self.video.fps - self.tolerance) / 1000)
+    except BaseException:
+      raise
+
+  def run(self):
+    """
+      Start the player threads and the frame streaming simulator.
+    """
+    with self.lock:
+      if not self.running:
+        self.running = True
+    self.audio_thread.start()
+    self.video_thread.start()
+    for _ in range(int(self.video.duration)):
+      logging.debug("Fetching Video")
+      self.fetch_video()
+      time.sleep(0.1)
+    with self.lock:
+      if not self.running:
+        self.running = True
+    self.audio_thread.join()
+    self.video_thread.join()
+
+
+if __name__ == "__main__":
+  parser = argparse.ArgumentParser()
+  parser.add_argument(
+      "-v", "--verbose",
+      action="count",
+      default=0,
+      help="Increases Verbosity of Logging")
+  parser.add_argument(
+      "--file",
+      default=None,
+      help="File to play")
+  parser.add_argument(
+      "--tflite",
+      default="",
+      help="Path to TFLite File")
+  parser.add_argument(
+      "--saved_model",
+      default="",
+      help="Path to Saved Model File")
+  FLAGS, unknown_args = parser.parse_known_args()
+  log_levels = [logging.FATAL, logging.WARNING, logging.INFO, logging.DEBUG]
+  current_log_level = log_levels[min(len(log_levels) - 1, FLAGS.verbose)]
+  logging.set_verbosity(current_log_level)
+  player = Player(
+      videofile=FLAGS.file,
+      saved_model=FLAGS.saved_model,
+      tflite=FLAGS.tflite)
+  player.run()
diff --git a/modules/GSOC/LICENSE b/modules/GSOC/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..539d4b485ca917cc3aa1e8c48f284fa3428dd17b
--- /dev/null
+++ b/modules/GSOC/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2020 Adrish Dey
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/modules/GSOC/README.md b/modules/GSOC/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..fc76476a6537ad0189f41e39f65d382b2cacd048
--- /dev/null
+++ b/modules/GSOC/README.md
@@ -0,0 +1,26 @@
+# GSOC
+Repository for Google Summer of Code 2019 at TensorFlow
+---------------------------------------------
+### GSOC Project Page
+https://summerofcode.withgoogle.com/projects/#4662790671826944
+### Mentors
+- Sachin Joglekar ([@srjoglekar246](https://github.com/srjoglekar246))
+- Vojtech Bardiovsky ([@vbardiovskyg](https://github.com/vbardiovskyg))
+
+### Blogs Written
+- Training TF2.0 models on TPUs: https://github.com/captain-pool/GSOC/wiki/Training-TF-2.0-Models-on-TPUs
+### Tasks
+|Evaluation|Task|Link|Done|
+|:-:|:-:|:-:|:-:|
+|E1|Sample TF Hub Module Deploy.|[Here](E1_TFHub_Sample_Deploy)| :heavy_check_mark: |
+|E1|Image Retraining with TF Hub, TF 2.0 and Cloud TPU|[Here](E1_TPU_Sample)|  :heavy_check_mark: |
+|E1|Convert ShuffleNet from ONNX to SavedModel 2.0|[Here](E1_ShuffleNet)| [:warning:](https://github.com/captain-pool/GSOC/issues/3) |
+|E2|Train ESRGAN Model from Scratch and Export as TF Hub Module|[Here](E2_ESRGAN)|:heavy_check_mark:|
+|E2|Adding Support to SavedModel 2.0 in `saved_model_cli`|[Here](https://github.com/tensorflow/tensorflow/pull/30752)|:heavy_check_mark:|
+|E3|Add Sample Notebook demonstrating usage of ESRGAN TF Hub Module|[Here](https://www.tensorflow.org/hub/tutorials/image_enhancing)|:heavy_check_mark:|
+|E3|Knowledge Distillation of ESRGAN|[Here](E3_Distill_ESRGAN)|:heavy_check_mark:|
+|E3| Proof of concept video player for real time video frame Super Resolution|[Here](E3_Streamer)|:heavy_check_mark:|
+
+## UPDATE
+ESRGAN just got published on [tfhub.dev](https://tfhub.dev)
+Link: [https://tfhub.dev/captain-pool/esrgan-tf2/1](https://tfhub.dev/captain-pool/esrgan-tf2/1)
diff --git a/modules/GSOC/WORKSPACE b/modules/GSOC/WORKSPACE
new file mode 100644
index 0000000000000000000000000000000000000000..c3cd5d7fb7c77ec287e45c0113a414de99338861
--- /dev/null
+++ b/modules/GSOC/WORKSPACE
@@ -0,0 +1,76 @@
+# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+workspace(name = "gsoc")
+
+load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
+load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
+
+git_repository(
+    name = "org_tensorflow_hub",
+    commit = "eeefe60480d8471b1662259f8eb5a965eba13d18",
+    remote = "https://github.com/tensorflow/hub.git",
+)
+
+bind(
+    name = "tensorflow_hub",
+    actual = "@org_tensorflow_hub//tensorflow_hub",
+)
+
+git_repository(
+    name = "protobuf_bzl",
+    # v3.6.1.3
+    commit = "66dc42d891a4fc8e9190c524fd67961688a37bbe",
+    remote = "https://github.com/google/protobuf.git",
+)
+
+bind(
+    name = "protobuf",
+    actual = "@protobuf_bzl//:protobuf",
+)
+
+bind(
+    name = "protobuf_python",
+    actual = "@protobuf_bzl//:protobuf_python",
+)
+
+bind(
+    name = "protobuf_python_genproto",
+    actual = "@protobuf_bzl//:protobuf_python_genproto",
+)
+
+bind(
+    name = "protoc",
+    actual = "@protobuf_bzl//:protoc",
+)
+
+# Using protobuf version 3.6.1.3
+http_archive(
+    name = "com_google_protobuf",
+    strip_prefix = "protobuf-3.6.1.3",
+    urls = ["https://github.com/google/protobuf/archive/v3.6.1.3.zip"],
+)
+
+# required by protobuf_python
+http_archive(
+    name = "six_archive",
+    build_file = "@protobuf_bzl//:six.BUILD",
+    sha256 = "105f8d68616f8248e24bf0e9372ef04d3cc10104f1980f54d57b2ce73a5ad56a",
+    url = "https://pypi.python.org/packages/source/s/six/six-1.10.0.tar.gz#md5=34eed507548117b2ab523ab14b2f8b55",
+)
+
+bind(
+    name = "six",
+    actual = "@six_archive//:six",
+)