Skip to content
Snippets Groups Projects
Commit dcbe550a authored by Rishi Sharma's avatar Rishi Sharma
Browse files

Modularize node, add step training

parent 50507efb
No related branches found
No related tags found
No related merge requests found
Showing with 230 additions and 6 deletions
...@@ -17,7 +17,8 @@ lr = 0.001 ...@@ -17,7 +17,8 @@ lr = 0.001
[TRAIN_PARAMS] [TRAIN_PARAMS]
training_package = decentralizepy.training.GradientAccumulator training_package = decentralizepy.training.GradientAccumulator
training_class = GradientAccumulator training_class = GradientAccumulator
epochs_per_round = 5 rounds = 5
full_epochs = True
batch_size = 512 batch_size = 512
shuffle = True shuffle = True
loss_package = torch.nn loss_package = torch.nn
......
...@@ -17,7 +17,8 @@ lr = 0.001 ...@@ -17,7 +17,8 @@ lr = 0.001
[TRAIN_PARAMS] [TRAIN_PARAMS]
training_package = decentralizepy.training.Training training_package = decentralizepy.training.Training
training_class = Training training_class = Training
epochs_per_round = 5 rounds = 5
full_epochs = True
batch_size = 512 batch_size = 512
shuffle = True shuffle = True
loss_package = torch.nn loss_package = torch.nn
......
...@@ -17,7 +17,8 @@ lr = 0.001 ...@@ -17,7 +17,8 @@ lr = 0.001
[TRAIN_PARAMS] [TRAIN_PARAMS]
training_package = decentralizepy.training.GradientAccumulator training_package = decentralizepy.training.GradientAccumulator
training_class = GradientAccumulator training_class = GradientAccumulator
epochs_per_round = 5 rounds = 5
full_epochs = True
batch_size = 512 batch_size = 512
shuffle = True shuffle = True
loss_package = torch.nn loss_package = torch.nn
......
...@@ -16,7 +16,8 @@ lr = 0.001 ...@@ -16,7 +16,8 @@ lr = 0.001
[TRAIN_PARAMS] [TRAIN_PARAMS]
training_package = decentralizepy.training.GradientAccumulator training_package = decentralizepy.training.GradientAccumulator
training_class = GradientAccumulator training_class = GradientAccumulator
epochs_per_round = 5 rounds = 5
full_epochs = True
batch_size = 1024 batch_size = 1024
shuffle = True shuffle = True
loss_package = torch.nn loss_package = torch.nn
......
...@@ -16,7 +16,8 @@ lr = 0.001 ...@@ -16,7 +16,8 @@ lr = 0.001
[TRAIN_PARAMS] [TRAIN_PARAMS]
training_package = decentralizepy.training.Training training_package = decentralizepy.training.Training
training_class = Training training_class = Training
epochs_per_round = 5 rounds = 5
full_epochs = True
batch_size = 1024 batch_size = 1024
shuffle = True shuffle = True
loss_package = torch.nn loss_package = torch.nn
......
...@@ -16,7 +16,8 @@ lr = 0.001 ...@@ -16,7 +16,8 @@ lr = 0.001
[TRAIN_PARAMS] [TRAIN_PARAMS]
training_package = decentralizepy.training.GradientAccumulator training_package = decentralizepy.training.GradientAccumulator
training_class = GradientAccumulator training_class = GradientAccumulator
epochs_per_round = 5 rounds = 5
full_epochs = True
batch_size = 1024 batch_size = 1024
shuffle = True shuffle = True
loss_package = torch.nn loss_package = torch.nn
......
{
"0": "10.90.41.127",
"1": "10.90.41.128",
"2": "10.90.41.129",
"3": "10.90.41.130",
"4": "10.90.41.131",
"5": "10.90.41.132",
"6": "10.90.41.133"
}
\ No newline at end of file
[DATASET]
dataset_package = decentralizepy.datasets.Celeba
dataset_class = Celeba
model_class = CNN
n_procs = 96
images_dir = /home/risharma/leaf/data/celeba/data/raw/img_align_celeba
train_dir = /home/risharma/leaf/data/celeba/per_user_data/train
test_dir = /home/risharma/leaf/data/celeba/data/test
; python list of fractions below
sizes =
[OPTIMIZER_PARAMS]
optimizer_package = torch.optim
optimizer_class = Adam
lr = 0.001
[TRAIN_PARAMS]
training_package = decentralizepy.training.GradientAccumulator
training_class = GradientAccumulator
rounds = 20
full_epochs = False
batch_size = 64
shuffle = True
loss_package = torch.nn
loss_class = CrossEntropyLoss
[COMMUNICATION]
comm_package = decentralizepy.communication.TCP
comm_class = TCP
addresses_filepath = ip_addr_6Machines.json
[SHARING]
sharing_package = decentralizepy.sharing.PartialModel
sharing_class = PartialModel
[DATASET]
dataset_package = decentralizepy.datasets.Celeba
dataset_class = Celeba
model_class = CNN
n_procs = 96
images_dir = /home/risharma/leaf/data/celeba/data/raw/img_align_celeba
train_dir = /home/risharma/leaf/data/celeba/per_user_data/train
test_dir = /home/risharma/leaf/data/celeba/data/test
; python list of fractions below
sizes =
[OPTIMIZER_PARAMS]
optimizer_package = torch.optim
optimizer_class = Adam
lr = 0.001
[TRAIN_PARAMS]
training_package = decentralizepy.training.Training
training_class = Training
rounds = 20
full_epochs = False
batch_size = 64
shuffle = True
loss_package = torch.nn
loss_class = CrossEntropyLoss
[COMMUNICATION]
comm_package = decentralizepy.communication.TCP
comm_class = TCP
addresses_filepath = ip_addr_6Machines.json
[SHARING]
sharing_package = decentralizepy.sharing.Sharing
sharing_class = Sharing
[DATASET]
dataset_package = decentralizepy.datasets.Celeba
dataset_class = Celeba
model_class = CNN
n_procs = 96
images_dir = /home/risharma/leaf/data/celeba/data/raw/img_align_celeba
train_dir = /home/risharma/leaf/data/celeba/per_user_data/train
test_dir = /home/risharma/leaf/data/celeba/data/test
; python list of fractions below
sizes =
[OPTIMIZER_PARAMS]
optimizer_package = torch.optim
optimizer_class = Adam
lr = 0.001
[TRAIN_PARAMS]
training_package = decentralizepy.training.GradientAccumulator
training_class = GradientAccumulator
rounds = 20
full_epochs = False
batch_size = 64
shuffle = True
loss_package = torch.nn
loss_class = CrossEntropyLoss
[COMMUNICATION]
comm_package = decentralizepy.communication.TCP
comm_class = TCP
addresses_filepath = ip_addr_6Machines.json
[SHARING]
sharing_package = decentralizepy.sharing.GrowingAlpha
sharing_class = GrowingAlpha
init_alpha=0.10
max_alpha=0.75
k=6
metadata_cap=0.65
[DATASET]
dataset_package = decentralizepy.datasets.Femnist
dataset_class = Femnist
model_class = CNN
n_procs = 16
train_dir = /home/risharma/leaf/data/femnist/per_user_data/train
test_dir = /home/risharma/leaf/data/femnist/data/test
; python list of fractions below
sizes =
[OPTIMIZER_PARAMS]
optimizer_package = torch.optim
optimizer_class = Adam
lr = 0.001
[TRAIN_PARAMS]
training_package = decentralizepy.training.GradientAccumulator
training_class = GradientAccumulator
rounds = 20
full_epochs = False
batch_size = 64
shuffle = True
loss_package = torch.nn
loss_class = CrossEntropyLoss
[COMMUNICATION]
comm_package = decentralizepy.communication.TCP
comm_class = TCP
addresses_filepath = ip_addr_6Machines.json
[SHARING]
sharing_package = decentralizepy.sharing.PartialModel
sharing_class = PartialModel
[DATASET]
dataset_package = decentralizepy.datasets.Femnist
dataset_class = Femnist
model_class = CNN
n_procs = 16
train_dir = /home/risharma/leaf/data/femnist/per_user_data/train
test_dir = /home/risharma/leaf/data/femnist/data/test
; python list of fractions below
sizes =
[OPTIMIZER_PARAMS]
optimizer_package = torch.optim
optimizer_class = Adam
lr = 0.001
[TRAIN_PARAMS]
training_package = decentralizepy.training.Training
training_class = Training
rounds = 20
full_epochs = False
batch_size = 64
shuffle = True
loss_package = torch.nn
loss_class = CrossEntropyLoss
[COMMUNICATION]
comm_package = decentralizepy.communication.TCP
comm_class = TCP
addresses_filepath = ip_addr_7Machines.json
[SHARING]
sharing_package = decentralizepy.sharing.Sharing
sharing_class = Sharing
[DATASET]
dataset_package = decentralizepy.datasets.Femnist
dataset_class = Femnist
model_class = CNN
n_procs = 16
train_dir = /home/risharma/leaf/data/femnist/per_user_data/train
test_dir = /home/risharma/leaf/data/femnist/data/test
; python list of fractions below
sizes =
[OPTIMIZER_PARAMS]
optimizer_package = torch.optim
optimizer_class = Adam
lr = 0.001
[TRAIN_PARAMS]
training_package = decentralizepy.training.GradientAccumulator
training_class = GradientAccumulator
rounds = 20
full_epochs = False
batch_size = 64
shuffle = True
loss_package = torch.nn
loss_class = CrossEntropyLoss
[COMMUNICATION]
comm_package = decentralizepy.communication.TCP
comm_class = TCP
addresses_filepath = ip_addr_6Machines.json
[SHARING]
sharing_package = decentralizepy.sharing.GrowingAlpha
sharing_class = GrowingAlpha
init_alpha=0.10
max_alpha=0.75
k=8
metadata_cap=0.65
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment