diff --git a/eval/config_celeba.ini b/eval/epoch_configs/config_celeba.ini similarity index 96% rename from eval/config_celeba.ini rename to eval/epoch_configs/config_celeba.ini index 61d1b26d7e4a2439862957df524464de1709c97a..408782804c812ad5c3fbab0183f396dee1a24c65 100644 --- a/eval/config_celeba.ini +++ b/eval/epoch_configs/config_celeba.ini @@ -17,7 +17,8 @@ lr = 0.001 [TRAIN_PARAMS] training_package = decentralizepy.training.GradientAccumulator training_class = GradientAccumulator -epochs_per_round = 5 +rounds = 5 +full_epochs = True batch_size = 512 shuffle = True loss_package = torch.nn diff --git a/eval/config_celeba_100.ini b/eval/epoch_configs/config_celeba_100.ini similarity index 96% rename from eval/config_celeba_100.ini rename to eval/epoch_configs/config_celeba_100.ini index dcaff4f2146e795e58bc15af2c8253a1ebeba031..910fb2d63f93fae94e876e28e0de71cf5c7349e1 100644 --- a/eval/config_celeba_100.ini +++ b/eval/epoch_configs/config_celeba_100.ini @@ -17,7 +17,8 @@ lr = 0.001 [TRAIN_PARAMS] training_package = decentralizepy.training.Training training_class = Training -epochs_per_round = 5 +rounds = 5 +full_epochs = True batch_size = 512 shuffle = True loss_package = torch.nn diff --git a/eval/config_celeba_grow.ini b/eval/epoch_configs/config_celeba_grow.ini similarity index 96% rename from eval/config_celeba_grow.ini rename to eval/epoch_configs/config_celeba_grow.ini index 5ac10c166f69faaf88935bf794117eed29c14e96..28eaaaaa16fb06a07156df227afd96f3dd77aae1 100644 --- a/eval/config_celeba_grow.ini +++ b/eval/epoch_configs/config_celeba_grow.ini @@ -17,7 +17,8 @@ lr = 0.001 [TRAIN_PARAMS] training_package = decentralizepy.training.GradientAccumulator training_class = GradientAccumulator -epochs_per_round = 5 +rounds = 5 +full_epochs = True batch_size = 512 shuffle = True loss_package = torch.nn diff --git a/eval/config_femnist.ini b/eval/epoch_configs/config_femnist.ini similarity index 96% rename from eval/config_femnist.ini rename to eval/epoch_configs/config_femnist.ini index ec213c71b6bc14cc59d297a248c0311536176591..63ad4622d2259555d22eff710a21ca26a7f8bee6 100644 --- a/eval/config_femnist.ini +++ b/eval/epoch_configs/config_femnist.ini @@ -16,7 +16,8 @@ lr = 0.001 [TRAIN_PARAMS] training_package = decentralizepy.training.GradientAccumulator training_class = GradientAccumulator -epochs_per_round = 5 +rounds = 5 +full_epochs = True batch_size = 1024 shuffle = True loss_package = torch.nn diff --git a/eval/config_femnist_100.ini b/eval/epoch_configs/config_femnist_100.ini similarity index 96% rename from eval/config_femnist_100.ini rename to eval/epoch_configs/config_femnist_100.ini index c3f0ae2bab6e8756e438c640dcb06f149cfabb32..63c7f4f9aaf3b629ecb8cd96b7e8b82ce543c698 100644 --- a/eval/config_femnist_100.ini +++ b/eval/epoch_configs/config_femnist_100.ini @@ -16,7 +16,8 @@ lr = 0.001 [TRAIN_PARAMS] training_package = decentralizepy.training.Training training_class = Training -epochs_per_round = 5 +rounds = 5 +full_epochs = True batch_size = 1024 shuffle = True loss_package = torch.nn diff --git a/eval/config_femnist_grow.ini b/eval/epoch_configs/config_femnist_grow.ini similarity index 96% rename from eval/config_femnist_grow.ini rename to eval/epoch_configs/config_femnist_grow.ini index b9b0abead67226698276248c730fb3024627171e..e91b3aa6adb36346c07a7143ffa079c95f71e7eb 100644 --- a/eval/config_femnist_grow.ini +++ b/eval/epoch_configs/config_femnist_grow.ini @@ -16,7 +16,8 @@ lr = 0.001 [TRAIN_PARAMS] training_package = decentralizepy.training.GradientAccumulator training_class = GradientAccumulator -epochs_per_round = 5 +rounds = 5 +full_epochs = True batch_size = 1024 shuffle = True loss_package = torch.nn diff --git a/eval/ip_addr_7Machines.json b/eval/ip_addr_7Machines.json new file mode 100644 index 0000000000000000000000000000000000000000..889afa03d3f0173318e51c13c3f3f2a17cc7c88e --- /dev/null +++ b/eval/ip_addr_7Machines.json @@ -0,0 +1,9 @@ +{ + "0": "10.90.41.127", + "1": "10.90.41.128", + "2": "10.90.41.129", + "3": "10.90.41.130", + "4": "10.90.41.131", + "5": "10.90.41.132", + "6": "10.90.41.133" +} \ No newline at end of file diff --git a/eval/step_configs/config_celeba.ini b/eval/step_configs/config_celeba.ini new file mode 100644 index 0000000000000000000000000000000000000000..d58052e5cc37725c68cd04fb8b8723f1859b8917 --- /dev/null +++ b/eval/step_configs/config_celeba.ini @@ -0,0 +1,34 @@ +[DATASET] +dataset_package = decentralizepy.datasets.Celeba +dataset_class = Celeba +model_class = CNN +n_procs = 96 +images_dir = /home/risharma/leaf/data/celeba/data/raw/img_align_celeba +train_dir = /home/risharma/leaf/data/celeba/per_user_data/train +test_dir = /home/risharma/leaf/data/celeba/data/test +; python list of fractions below +sizes = + +[OPTIMIZER_PARAMS] +optimizer_package = torch.optim +optimizer_class = Adam +lr = 0.001 + +[TRAIN_PARAMS] +training_package = decentralizepy.training.GradientAccumulator +training_class = GradientAccumulator +rounds = 20 +full_epochs = False +batch_size = 64 +shuffle = True +loss_package = torch.nn +loss_class = CrossEntropyLoss + +[COMMUNICATION] +comm_package = decentralizepy.communication.TCP +comm_class = TCP +addresses_filepath = ip_addr_6Machines.json + +[SHARING] +sharing_package = decentralizepy.sharing.PartialModel +sharing_class = PartialModel diff --git a/eval/step_configs/config_celeba_100.ini b/eval/step_configs/config_celeba_100.ini new file mode 100644 index 0000000000000000000000000000000000000000..2e351a47bcb148001c3895049b7f2d14458a43f0 --- /dev/null +++ b/eval/step_configs/config_celeba_100.ini @@ -0,0 +1,34 @@ +[DATASET] +dataset_package = decentralizepy.datasets.Celeba +dataset_class = Celeba +model_class = CNN +n_procs = 96 +images_dir = /home/risharma/leaf/data/celeba/data/raw/img_align_celeba +train_dir = /home/risharma/leaf/data/celeba/per_user_data/train +test_dir = /home/risharma/leaf/data/celeba/data/test +; python list of fractions below +sizes = + +[OPTIMIZER_PARAMS] +optimizer_package = torch.optim +optimizer_class = Adam +lr = 0.001 + +[TRAIN_PARAMS] +training_package = decentralizepy.training.Training +training_class = Training +rounds = 20 +full_epochs = False +batch_size = 64 +shuffle = True +loss_package = torch.nn +loss_class = CrossEntropyLoss + +[COMMUNICATION] +comm_package = decentralizepy.communication.TCP +comm_class = TCP +addresses_filepath = ip_addr_6Machines.json + +[SHARING] +sharing_package = decentralizepy.sharing.Sharing +sharing_class = Sharing diff --git a/eval/step_configs/config_celeba_grow.ini b/eval/step_configs/config_celeba_grow.ini new file mode 100644 index 0000000000000000000000000000000000000000..a9cd92c46417dce619eb2fab64f3552722e5d8d4 --- /dev/null +++ b/eval/step_configs/config_celeba_grow.ini @@ -0,0 +1,38 @@ +[DATASET] +dataset_package = decentralizepy.datasets.Celeba +dataset_class = Celeba +model_class = CNN +n_procs = 96 +images_dir = /home/risharma/leaf/data/celeba/data/raw/img_align_celeba +train_dir = /home/risharma/leaf/data/celeba/per_user_data/train +test_dir = /home/risharma/leaf/data/celeba/data/test +; python list of fractions below +sizes = + +[OPTIMIZER_PARAMS] +optimizer_package = torch.optim +optimizer_class = Adam +lr = 0.001 + +[TRAIN_PARAMS] +training_package = decentralizepy.training.GradientAccumulator +training_class = GradientAccumulator +rounds = 20 +full_epochs = False +batch_size = 64 +shuffle = True +loss_package = torch.nn +loss_class = CrossEntropyLoss + +[COMMUNICATION] +comm_package = decentralizepy.communication.TCP +comm_class = TCP +addresses_filepath = ip_addr_6Machines.json + +[SHARING] +sharing_package = decentralizepy.sharing.GrowingAlpha +sharing_class = GrowingAlpha +init_alpha=0.10 +max_alpha=0.75 +k=6 +metadata_cap=0.65 diff --git a/eval/step_configs/config_femnist.ini b/eval/step_configs/config_femnist.ini new file mode 100644 index 0000000000000000000000000000000000000000..0bd4a55dbfd136bf70d5a1fe5ba23f3ca3d3d260 --- /dev/null +++ b/eval/step_configs/config_femnist.ini @@ -0,0 +1,33 @@ +[DATASET] +dataset_package = decentralizepy.datasets.Femnist +dataset_class = Femnist +model_class = CNN +n_procs = 16 +train_dir = /home/risharma/leaf/data/femnist/per_user_data/train +test_dir = /home/risharma/leaf/data/femnist/data/test +; python list of fractions below +sizes = + +[OPTIMIZER_PARAMS] +optimizer_package = torch.optim +optimizer_class = Adam +lr = 0.001 + +[TRAIN_PARAMS] +training_package = decentralizepy.training.GradientAccumulator +training_class = GradientAccumulator +rounds = 20 +full_epochs = False +batch_size = 64 +shuffle = True +loss_package = torch.nn +loss_class = CrossEntropyLoss + +[COMMUNICATION] +comm_package = decentralizepy.communication.TCP +comm_class = TCP +addresses_filepath = ip_addr_6Machines.json + +[SHARING] +sharing_package = decentralizepy.sharing.PartialModel +sharing_class = PartialModel diff --git a/eval/step_configs/config_femnist_100.ini b/eval/step_configs/config_femnist_100.ini new file mode 100644 index 0000000000000000000000000000000000000000..aafe7d6fcc0eab9a4a97162daae35f23177871ff --- /dev/null +++ b/eval/step_configs/config_femnist_100.ini @@ -0,0 +1,33 @@ +[DATASET] +dataset_package = decentralizepy.datasets.Femnist +dataset_class = Femnist +model_class = CNN +n_procs = 16 +train_dir = /home/risharma/leaf/data/femnist/per_user_data/train +test_dir = /home/risharma/leaf/data/femnist/data/test +; python list of fractions below +sizes = + +[OPTIMIZER_PARAMS] +optimizer_package = torch.optim +optimizer_class = Adam +lr = 0.001 + +[TRAIN_PARAMS] +training_package = decentralizepy.training.Training +training_class = Training +rounds = 20 +full_epochs = False +batch_size = 64 +shuffle = True +loss_package = torch.nn +loss_class = CrossEntropyLoss + +[COMMUNICATION] +comm_package = decentralizepy.communication.TCP +comm_class = TCP +addresses_filepath = ip_addr_7Machines.json + +[SHARING] +sharing_package = decentralizepy.sharing.Sharing +sharing_class = Sharing diff --git a/eval/step_configs/config_femnist_grow.ini b/eval/step_configs/config_femnist_grow.ini new file mode 100644 index 0000000000000000000000000000000000000000..018cdc76da7417340637703348d08dc2d3c9c3d8 --- /dev/null +++ b/eval/step_configs/config_femnist_grow.ini @@ -0,0 +1,37 @@ +[DATASET] +dataset_package = decentralizepy.datasets.Femnist +dataset_class = Femnist +model_class = CNN +n_procs = 16 +train_dir = /home/risharma/leaf/data/femnist/per_user_data/train +test_dir = /home/risharma/leaf/data/femnist/data/test +; python list of fractions below +sizes = + +[OPTIMIZER_PARAMS] +optimizer_package = torch.optim +optimizer_class = Adam +lr = 0.001 + +[TRAIN_PARAMS] +training_package = decentralizepy.training.GradientAccumulator +training_class = GradientAccumulator +rounds = 20 +full_epochs = False +batch_size = 64 +shuffle = True +loss_package = torch.nn +loss_class = CrossEntropyLoss + +[COMMUNICATION] +comm_package = decentralizepy.communication.TCP +comm_class = TCP +addresses_filepath = ip_addr_6Machines.json + +[SHARING] +sharing_package = decentralizepy.sharing.GrowingAlpha +sharing_class = GrowingAlpha +init_alpha=0.10 +max_alpha=0.75 +k=8 +metadata_cap=0.65 \ No newline at end of file