Skip to content

Commit

Permalink
chore(models): Finishes annotated typing.
Browse files Browse the repository at this point in the history
  • Loading branch information
gugarosa committed Apr 22, 2022
1 parent cac4f71 commit 81426d1
Show file tree
Hide file tree
Showing 7 changed files with 347 additions and 261 deletions.
24 changes: 13 additions & 11 deletions nalp/models/dcgan.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
"""Deep Convolutional Generative Adversarial Network.
"""

from typing import Optional, Tuple

from nalp.core import Adversarial
from nalp.models.discriminators import ConvDiscriminator
from nalp.models.generators import ConvGenerator
Expand All @@ -21,20 +23,20 @@ class DCGAN(Adversarial):

def __init__(
self,
input_shape=(28, 28, 1),
noise_dim=100,
n_samplings=3,
alpha=0.3,
dropout_rate=0.3,
):
input_shape: Optional[Tuple[int, int, int]] = (28, 28, 1),
noise_dim: Optional[int] = 100,
n_samplings: Optional[int] = 3,
alpha: Optional[float] = 0.3,
dropout_rate: Optional[float] = 0.3,
) -> None:
"""Initialization method.
Args:
input_shape (tuple): An input shape for the Generator.
noise_dim (int): Amount of noise dimensions for the Generator.
n_samplings (int): Number of down/up samplings to perform.
alpha (float): LeakyReLU activation threshold.
dropout_rate (float): Dropout activation rate.
input_shape: An input shape for the Generator.
noise_dim: Amount of noise dimensions for the Generator.
n_samplings: Number of down/up samplings to perform.
alpha: LeakyReLU activation threshold.
dropout_rate: Dropout activation rate.
"""

Expand Down
18 changes: 13 additions & 5 deletions nalp/models/gan.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
"""Generative Adversarial Network.
"""

from typing import Optional, Tuple

from nalp.core import Adversarial
from nalp.models.discriminators import LinearDiscriminator
from nalp.models.generators import LinearGenerator
Expand All @@ -18,14 +20,20 @@ class GAN(Adversarial):
"""

def __init__(self, input_shape=(784,), noise_dim=100, n_samplings=3, alpha=0.01):
def __init__(
self,
input_shape: Optional[Tuple[int, ...]] = (784,),
noise_dim: Optional[int] = 100,
n_samplings: Optional[int] = 3,
alpha: Optional[float] = 0.01,
) -> None:
"""Initialization method.
Args:
input_shape (tuple): An input shape for the Generator.
noise_dim (int): Amount of noise dimensions for the Generator.
n_samplings (int): Number of down/up samplings to perform.
alpha (float): LeakyReLU activation threshold.
input_shape: An input shape for the Generator.
noise_dim: Amount of noise dimensions for the Generator.
n_samplings: Number of down/up samplings to perform.
alpha: LeakyReLU activation threshold.
"""

Expand Down
90 changes: 52 additions & 38 deletions nalp/models/gsgan.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,14 @@
"""Gumbel-Softmax Generative Adversarial Network.
"""

from typing import Optional, Tuple

import tensorflow as tf
from tensorflow.keras.utils import Progbar

from nalp.core import Adversarial
from nalp.core.dataset import Dataset
from nalp.encoders.integer import IntegerEncoder
from nalp.models.discriminators import LSTMDiscriminator
from nalp.models.generators import GumbelLSTMGenerator
from nalp.utils import logging
Expand All @@ -24,16 +28,21 @@ class GSGAN(Adversarial):
"""

def __init__(
self, encoder=None, vocab_size=1, embedding_size=32, hidden_size=64, tau=5
):
self,
encoder: Optional[IntegerEncoder] = None,
vocab_size: Optional[int] = 1,
embedding_size: Optional[int] = 32,
hidden_size: Optional[int] = 64,
tau: Optional[float] = 5,
) -> None:
"""Initialization method.
Args:
encoder (IntegerEncoder): An index to vocabulary encoder for the generator.
vocab_size (int): The size of the vocabulary for both discriminator and generator.
embedding_size (int): The size of the embedding layer for both discriminator and generator.
hidden_size (int): The amount of hidden neurons for the generator.
tau (float): Gumbel-Softmax temperature parameter.
encoder: An index to vocabulary encoder for the generator.
vocab_size: The size of the vocabulary for both discriminator and generator.
embedding_size: The size of the embedding layer for both discriminator and generator.
hidden_size: The amount of hidden neurons for the generator.
tau: Gumbel-Softmax temperature parameter.
"""

Expand All @@ -56,32 +65,37 @@ def __init__(
logger.info("Class overrided.")

@property
def vocab_size(self):
"""int: The size of the vocabulary."""
def vocab_size(self) -> int:
"""The size of the vocabulary."""

return self._vocab_size

@vocab_size.setter
def vocab_size(self, vocab_size):
def vocab_size(self, vocab_size: int) -> None:
self._vocab_size = vocab_size

@property
def init_tau(self):
"""float: Gumbel-Softmax initial temperature."""
def init_tau(self) -> float:
"""Gumbel-Softmax initial temperature."""

return self._init_tau

@init_tau.setter
def init_tau(self, init_tau):
def init_tau(self, init_tau: float) -> None:
self._init_tau = init_tau

def compile(self, pre_optimizer, d_optimizer, g_optimizer):
def compile(
self,
pre_optimizer: tf.keras.optimizers,
d_optimizer: tf.keras.optimizers,
g_optimizer: tf.keras.optimizers,
) -> None:
"""Main building method.
Args:
pre_optimizer (tf.keras.optimizers): An optimizer instance for pre-training the generator.
d_optimizer (tf.keras.optimizers): An optimizer instance for the discriminator.
g_optimizer (tf.keras.optimizers): An optimizer instance for the generator.
pre_optimizer: An optimizer instance for pre-training the generator.
d_optimizer: An optimizer instance for the discriminator.
g_optimizer: An optimizer instance for the generator.
"""

Expand All @@ -102,15 +116,15 @@ def compile(self, pre_optimizer, d_optimizer, g_optimizer):
self.history["D_loss"] = []
self.history["G_loss"] = []

def generate_batch(self, x):
def generate_batch(self, x: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
"""Generates a batch of tokens by feeding to the network the
current token (t) and predicting the next token (t+1).
Args:
x (tf.tensor): A tensor containing the inputs.
Returns:
A (batch_size, length) tensor of generated tokens and a
(Tuple[tf.Tensor, tf.Tensor]): A (batch_size, length) tensor of generated tokens and a
(batch_size, length, vocab_size) tensor of predictions.
"""
Expand Down Expand Up @@ -146,15 +160,15 @@ def generate_batch(self, x):

return sampled_batch, sampled_preds

def _discriminator_loss(self, y_real, y_fake):
def _discriminator_loss(self, y_real: tf.Tensor, y_fake: tf.Tensor) -> tf.Tensor:
"""Calculates the loss out of the discriminator architecture.
Args:
y_real (tf.tensor): A tensor containing the real data targets.
y_fake (tf.tensor): A tensor containing the fake data targets.
y_real: A tensor containing the real data targets.
y_fake: A tensor containing the fake data targets.
Returns:
The loss based on the discriminator network.
(tf.Tensor): The loss based on the discriminator network.
"""

Expand All @@ -163,14 +177,14 @@ def _discriminator_loss(self, y_real, y_fake):

return tf.reduce_mean(real_loss) + tf.reduce_mean(fake_loss)

def _generator_loss(self, y_fake):
def _generator_loss(self, y_fake: tf.Tensor) -> tf.Tensor:
"""Calculates the loss out of the generator architecture.
Args:
y_fake (tf.tensor): A tensor containing the fake data targets.
y_fake: A tensor containing the fake data targets.
Returns:
The loss based on the generator network.
(tf.Tensor): The loss based on the generator network.
"""

Expand All @@ -179,12 +193,12 @@ def _generator_loss(self, y_fake):
return tf.reduce_mean(loss)

@tf.function
def G_pre_step(self, x, y):
def G_pre_step(self, x: tf.Tensor, y: tf.Tensor) -> None:
"""Performs a single batch optimization pre-fitting step over the generator.
Args:
x (tf.tensor): A tensor containing the inputs.
y (tf.tensor): A tensor containing the inputs' labels.
x: A tensor containing the inputs.
y: A tensor containing the inputs' labels.
"""

Expand All @@ -208,12 +222,12 @@ def G_pre_step(self, x, y):
self.G_loss.update_state(loss)

@tf.function
def step(self, x, y):
def step(self, x: tf.Tensor, y: tf.Tensor) -> None:
"""Performs a single batch optimization step.
Args:
x (tf.tensor): A tensor containing the inputs.
y (tf.tensor): A tensor containing the inputs' labels.
x: A tensor containing the inputs.
y: A tensor containing the inputs' labels.
"""

Expand Down Expand Up @@ -246,12 +260,12 @@ def step(self, x, y):
self.D_loss.update_state(D_loss)
self.G_loss.update_state(G_loss)

def pre_fit(self, batches, epochs=100):
def pre_fit(self, batches: Dataset, epochs: Optional[int] = 100) -> None:
"""Pre-trains the model.
Args:
batches (Dataset): Pre-training batches containing samples.
epochs (int): The maximum number of pre-training epochs.
batches: Pre-training batches containing samples.
epochs: The maximum number of pre-training epochs.
"""

Expand Down Expand Up @@ -281,12 +295,12 @@ def pre_fit(self, batches, epochs=100):

logger.to_file("Loss(G): %s", self.G_loss.result().numpy())

def fit(self, batches, epochs=100):
def fit(self, batches: Dataset, epochs: Optional[int] = 100) -> None:
"""Trains the model.
Args:
batches (Dataset): Training batches containing samples.
epochs (int): The maximum number of training epochs.
batches: Training batches containing samples.
epochs: The maximum number of training epochs.
"""

Expand Down
Loading

0 comments on commit 81426d1

Please sign in to comment.