Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • sacs/decentralizepy
  • mvujas/decentralizepy
  • randl/decentralizepy
3 results
Show changes
Showing
with 7603 additions and 0 deletions
{
"0": "10.90.41.130",
"1": "10.90.41.131",
"2": "10.90.41.132",
"3": "10.90.41.133"
}
\ No newline at end of file
{
"0": "10.90.41.129",
"1": "10.90.41.130",
"2": "10.90.41.131",
"3": "10.90.41.132",
"4": "10.90.41.133"
}
\ No newline at end of file
{
"0": "10.90.41.128",
"1": "10.90.41.129",
"2": "10.90.41.130",
"3": "10.90.41.131",
"4": "10.90.41.132",
"5": "10.90.41.133"
}
\ No newline at end of file
{
"0": "10.90.41.127",
"1": "10.90.41.128",
"2": "10.90.41.129",
"3": "10.90.41.130",
"4": "10.90.41.131",
"5": "10.90.41.132",
"6": "10.90.41.133"
}
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
import json
import os
import sys
from pathlib import Path
import numpy as np
from matplotlib import pyplot as plt
from pyexpat import model
def plot(x, y, label, *args):
plt.plot(x, y, *args, label=label)
plt.legend()
def reject_outliers(data, m=2.0):
d = np.abs(data - np.median(data))
mdev = np.median(d)
s = d / (mdev if mdev else 1.0)
return data[s < m]
def plot_model(path, title):
model_path = os.path.join(path, "plots")
Path(model_path).mkdir(parents=True, exist_ok=True)
files = [f for f in os.listdir(path) if f.endswith("json")]
for file in files:
filepath = os.path.join(path, file)
with open(filepath, "r") as inf:
model_vec = json.load(inf)
del model_vec["order"]
del model_vec["shapes"]
model_vec = np.array(model_vec[list(model_vec.keys())[0]])
num_elements = model_vec.shape[0]
x_axis = np.arange(1, num_elements + 1)
plt.clf()
plt.title(title)
plot(x_axis, model_vec, "unsorted", ".")
model_vec = np.sort(model_vec)
plot(x_axis, model_vec, "sorted")
plt.savefig(os.path.join(model_path, file[0:-5]))
def plot_ratio(path_change, path_val, title):
model_path = os.path.join(path_change, "plots_ratio")
Path(model_path).mkdir(parents=True, exist_ok=True)
files_change = [f for f in os.listdir(path_change) if f.endswith("json")]
files_val = [f for f in os.listdir(path_val) if f.endswith("json")]
for i, file in enumerate(files_change):
print("Processed ", file)
filepath_change = os.path.join(path_change, file)
filepath_val = os.path.join(path_val, files_val[i])
with open(filepath_change, "r") as inf:
model_change = json.load(inf)
del model_change["order"]
del model_change["shapes"]
model_change = np.array(model_change[list(model_change.keys())[0]])
with open(filepath_val, "r") as inf:
model_val = json.load(inf)
del model_val["order"]
del model_val["shapes"]
model_val = np.array(model_val[list(model_val.keys())[0]])
num_elements = model_val.shape[0]
x_axis = np.arange(1, num_elements + 1)
plt.clf()
plt.title(title)
model_vec = np.divide(
model_change,
model_val,
out=np.zeros_like(model_change),
where=model_val != 0.0,
)
model_vec = reject_outliers(model_vec)
num_elements = model_vec.shape[0]
x_axis = np.arange(1, num_elements + 1)
plot(x_axis, model_vec, "unsorted", ".")
model_vec = np.sort(model_vec)
plot(x_axis, model_vec, "sorted")
plt.savefig(os.path.join(model_path, file[0:-5]))
if __name__ == "__main__":
assert len(sys.argv) == 3
plot_model(
os.path.join(sys.argv[1], "model_change", sys.argv[2]), "Change in Weights"
)
plot_model(os.path.join(sys.argv[1], "model_val", sys.argv[2]), "Model Parameters")
plot_ratio(
os.path.join(sys.argv[1], "model_change", sys.argv[2]),
os.path.join(sys.argv[1], "model_val", sys.argv[2]),
"Ratio",
)
import json
import os
import sys
import numpy as np
import pandas as pd
import torch
from matplotlib import pyplot as plt
def get_stats(l):
assert len(l) > 0
mean_dict, stdev_dict, min_dict, max_dict = {}, {}, {}, {}
for key in l[0].keys():
all_nodes = [i[key] for i in l]
all_nodes = np.array(all_nodes)
mean = np.mean(all_nodes)
std = np.std(all_nodes)
min = np.min(all_nodes)
max = np.max(all_nodes)
mean_dict[int(key)] = mean
stdev_dict[int(key)] = std
min_dict[int(key)] = min
max_dict[int(key)] = max
return mean_dict, stdev_dict, min_dict, max_dict
def plot(means, stdevs, mins, maxs, title, label, loc):
plt.title(title)
plt.xlabel("communication rounds")
x_axis = list(means.keys())
y_axis = list(means.values())
err = list(stdevs.values())
plt.errorbar(x_axis, y_axis, yerr=err, label=label)
plt.legend(loc=loc)
def plot_results(path):
"""
plots the percentiles
Based on plot.py
Parameters
----------
path
path to the folders from which to create the percentiles plots
"""
folders = os.listdir(path)
folders.sort()
print("Reading folders from: ", path)
print("Folders: ", folders)
for folder in folders:
folder_path = os.path.join(path, folder)
if not os.path.isdir(folder_path):
continue
results = []
all_shared_params = []
machine_folders = os.listdir(folder_path)
for machine_folder in machine_folders:
mf_path = os.path.join(folder_path, machine_folder)
if not os.path.isdir(mf_path):
continue
files = os.listdir(mf_path)
shared_params = [f for f in files if f.endswith("_shared_parameters.json")]
files = [f for f in files if f.endswith("_results.json")]
for f in files:
filepath = os.path.join(mf_path, f)
with open(filepath, "r") as inf:
results.append(json.load(inf))
for sp in shared_params:
filepath = os.path.join(mf_path, sp)
with open(filepath, "r") as spf:
all_shared_params.append(np.array(json.load(spf), dtype=np.int32))
# Plot Training loss
plt.figure(1)
# Average of the shared parameters
mean = np.mean(all_shared_params, axis=0)
std = np.std(all_shared_params, axis=0)
with open(
os.path.join(path, "shared_params_avg_" + folder + ".json"), "w"
) as mf:
json.dump(mean.tolist(), mf)
with open(
os.path.join(path, "shared_params_std_" + folder + ".json"), "w"
) as sf:
json.dump(std.tolist(), sf)
# copy jupyter notebook code
percentile = np.percentile(mean, np.arange(0, 100, 1))
plt.plot(np.arange(0, 100, 1), percentile, label=folder)
plt.title("Shared parameters Percentiles")
# plt.ylabel("Absolute frequency value")
plt.xlabel("Percentiles")
plt.xticks(np.arange(0, 110, 10))
plt.legend(loc="lower right")
plt.figure(2)
sort = torch.sort(torch.tensor(mean)).values
print(sort)
length = sort.shape[0]
length = int(length / 20)
bins = [
torch.sum(sort[length * i : length * (i + 1)]).item() for i in range(20)
]
total = np.sum(bins)
perc = bins / total # np.divide(bins, total)
print(perc)
plt.bar(np.arange(0, 97.5, 5), perc, width=5, align="edge", label=folder)
plt.title("Shared parameters Percentiles")
# plt.ylabel("Absolute frequency value")
plt.xlabel("Percentiles")
plt.legend(loc="lower right")
plt.savefig(os.path.join(path, f"percentiles_histogram_{folder}.png"), dpi=300)
plt.clf()
plt.cla()
plt.figure(1)
plt.savefig(os.path.join(path, "percentiles.png"), dpi=300)
if __name__ == "__main__":
assert len(sys.argv) == 2
plot_results(sys.argv[1])
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.