Skip to content
Snippets Groups Projects
Commit cef92565 authored by Rishi Sharma's avatar Rishi Sharma
Browse files

GradientAccumulator migration to steps

parent 0d50bc39
No related branches found
No related tags found
No related merge requests found
...@@ -5,7 +5,7 @@ from decentralizepy.training.Training import Training ...@@ -5,7 +5,7 @@ from decentralizepy.training.Training import Training
class GradientAccumulator(Training): class GradientAccumulator(Training):
def __init__( def __init__(
self, model, optimizer, loss, epochs_per_round="", batch_size="", shuffle="" self, model, optimizer, loss, rounds="", full_epochs="", batch_size="", shuffle=""
): ):
""" """
Constructor Constructor
...@@ -22,9 +22,9 @@ class GradientAccumulator(Training): ...@@ -22,9 +22,9 @@ class GradientAccumulator(Training):
batch_size : int, optional batch_size : int, optional
Number of items to learn over, in one batch Number of items to learn over, in one batch
shuffle : bool shuffle : bool
True if the dataset should be shuffled before training. Not implemented yet! TODO True if the dataset should be shuffled before training.
""" """
super().__init__(model, optimizer, loss, epochs_per_round, batch_size, shuffle) super().__init__(model, optimizer, loss, rounds, full_epochs, batch_size, shuffle)
def trainstep(self, data, target): def trainstep(self, data, target):
""" """
......
...@@ -37,7 +37,7 @@ class Training: ...@@ -37,7 +37,7 @@ class Training:
batch_size : int, optional batch_size : int, optional
Number of items to learn over, in one batch Number of items to learn over, in one batch
shuffle : bool shuffle : bool
True if the dataset should be shuffled before training. Not implemented yet! TODO True if the dataset should be shuffled before training.
""" """
self.model = model self.model = model
self.optimizer = optimizer self.optimizer = optimizer
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment