Skip to content
Snippets Groups Projects
Commit 39f34441 authored by Rishi Sharma's avatar Rishi Sharma
Browse files

Add script and run.sh

parent 0ee4c0d2
No related branches found
No related tags found
No related merge requests found
......@@ -20,6 +20,15 @@ Setting up decentralizepy
pip3 install --editable .\[dev\]
----------------
Running the code
----------------
* Choose and modify one of the config files in ``eval/{step,epoch}_configs``.
* Modify the dataset paths and ``addresses_filepath`` in the config file.
* In eval/run.sh, modify ``first_machine`` (used to calculate machine_id of all machines), ``original_config``, and other arguments as required.
* Execute eval/run.sh on all the machines simultaneously. There is a synchronization barrier mechanism at the start so that all processes start training together.
Node
----
* The Manager. Optimizations at process level.
......
......@@ -17,9 +17,9 @@ lr = 0.001
[TRAIN_PARAMS]
training_package = decentralizepy.training.GradientAccumulator
training_class = GradientAccumulator
rounds = 5
rounds = 1
full_epochs = True
batch_size = 512
batch_size = 64
shuffle = True
loss_package = torch.nn
loss_class = CrossEntropyLoss
......
......@@ -17,9 +17,9 @@ lr = 0.001
[TRAIN_PARAMS]
training_package = decentralizepy.training.Training
training_class = Training
rounds = 5
rounds = 1
full_epochs = True
batch_size = 512
batch_size = 64
shuffle = True
loss_package = torch.nn
loss_class = CrossEntropyLoss
......
......@@ -17,9 +17,9 @@ lr = 0.001
[TRAIN_PARAMS]
training_package = decentralizepy.training.GradientAccumulator
training_class = GradientAccumulator
rounds = 5
rounds = 1
full_epochs = True
batch_size = 512
batch_size = 64
shuffle = True
loss_package = torch.nn
loss_class = CrossEntropyLoss
......
......@@ -16,9 +16,9 @@ lr = 0.001
[TRAIN_PARAMS]
training_package = decentralizepy.training.GradientAccumulator
training_class = GradientAccumulator
rounds = 5
rounds = 1
full_epochs = True
batch_size = 1024
batch_size = 128
shuffle = True
loss_package = torch.nn
loss_class = CrossEntropyLoss
......
......@@ -16,9 +16,9 @@ lr = 0.001
[TRAIN_PARAMS]
training_package = decentralizepy.training.Training
training_class = Training
rounds = 5
rounds = 1
full_epochs = True
batch_size = 1024
batch_size = 128
shuffle = True
loss_package = torch.nn
loss_class = CrossEntropyLoss
......
......@@ -16,9 +16,9 @@ lr = 0.001
[TRAIN_PARAMS]
training_package = decentralizepy.training.GradientAccumulator
training_class = GradientAccumulator
rounds = 5
rounds = 1
full_epochs = True
batch_size = 1024
batch_size = 128
shuffle = True
loss_package = torch.nn
loss_class = CrossEntropyLoss
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment