From cef92565f1428e6460cd16059366ee4f6554f0ae Mon Sep 17 00:00:00 2001 From: Rishi Sharma <rishi.sharma@epfl.ch> Date: Thu, 20 Jan 2022 13:25:50 +0100 Subject: [PATCH] GradientAccumulator migration to steps --- src/decentralizepy/training/GradientAccumulator.py | 6 +++--- src/decentralizepy/training/Training.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/decentralizepy/training/GradientAccumulator.py b/src/decentralizepy/training/GradientAccumulator.py index 23d4fdc..5022308 100644 --- a/src/decentralizepy/training/GradientAccumulator.py +++ b/src/decentralizepy/training/GradientAccumulator.py @@ -5,7 +5,7 @@ from decentralizepy.training.Training import Training class GradientAccumulator(Training): def __init__( - self, model, optimizer, loss, epochs_per_round="", batch_size="", shuffle="" + self, model, optimizer, loss, rounds="", full_epochs="", batch_size="", shuffle="" ): """ Constructor @@ -22,9 +22,9 @@ class GradientAccumulator(Training): batch_size : int, optional Number of items to learn over, in one batch shuffle : bool - True if the dataset should be shuffled before training. Not implemented yet! TODO + True if the dataset should be shuffled before training. """ - super().__init__(model, optimizer, loss, epochs_per_round, batch_size, shuffle) + super().__init__(model, optimizer, loss, rounds, full_epochs, batch_size, shuffle) def trainstep(self, data, target): """ diff --git a/src/decentralizepy/training/Training.py b/src/decentralizepy/training/Training.py index d5b3e9c..7d59435 100644 --- a/src/decentralizepy/training/Training.py +++ b/src/decentralizepy/training/Training.py @@ -37,7 +37,7 @@ class Training: batch_size : int, optional Number of items to learn over, in one batch shuffle : bool - True if the dataset should be shuffled before training. Not implemented yet! TODO + True if the dataset should be shuffled before training. """ self.model = model self.optimizer = optimizer -- GitLab