Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • sacs/decentralizepy
  • mvujas/decentralizepy
  • randl/decentralizepy
3 results
Show changes
Showing
with 1818 additions and 109 deletions
import logging
from pathlib import Path
from shutil import copy
from localconfig import LocalConfig
from torch import multiprocessing as mp
from decentralizepy import utils
from decentralizepy.graphs.Graph import Graph
from decentralizepy.mappings.Linear import Linear
from decentralizepy.node.DPSGDWithPeerSampler import DPSGDWithPeerSampler
from decentralizepy.node.PeerSampler import PeerSampler
def read_ini(file_path):
config = LocalConfig(file_path)
for section in config:
print("Section: ", section)
for key, value in config.items(section):
print((key, value))
print(dict(config.items("DATASET")))
return config
if __name__ == "__main__":
args = utils.get_args()
Path(args.log_dir).mkdir(parents=True, exist_ok=True)
log_level = {
"INFO": logging.INFO,
"DEBUG": logging.DEBUG,
"WARNING": logging.WARNING,
"ERROR": logging.ERROR,
"CRITICAL": logging.CRITICAL,
}
config = read_ini(args.config_file)
my_config = dict()
for section in config:
my_config[section] = dict(config.items(section))
copy(args.config_file, args.log_dir)
copy(args.graph_file, args.log_dir)
utils.write_args(args, args.log_dir)
g = Graph()
g.read_graph_from_file(args.graph_file, args.graph_type)
n_machines = args.machines
procs_per_machine = args.procs_per_machine
l = Linear(n_machines, procs_per_machine)
m_id = args.machine_id
sm = args.server_machine
sr = args.server_rank
processes = []
if sm == m_id:
processes.append(
mp.Process(
# target=PeerSamplerDynamic,
target=PeerSampler,
args=[
sr,
m_id,
l,
g,
my_config,
args.iterations,
args.log_dir,
log_level[args.log_level],
],
)
)
for r in range(0, procs_per_machine):
processes.append(
mp.Process(
target=DPSGDWithPeerSampler,
args=[
r,
m_id,
l,
g,
my_config,
args.iterations,
args.log_dir,
args.weights_store_dir,
log_level[args.log_level],
args.test_after,
args.train_evaluate_after,
args.reset_optimizer,
],
)
)
for p in processes:
p.start()
for p in processes:
p.join()
import logging
from pathlib import Path
from shutil import copy
from localconfig import LocalConfig
from torch import multiprocessing as mp
from decentralizepy import utils
from decentralizepy.graphs.Graph import Graph
from decentralizepy.mappings.Linear import Linear
from decentralizepy.node.DPSGDWithPeerSampler import DPSGDWithPeerSampler
from decentralizepy.node.PeerSamplerDynamic import PeerSamplerDynamic
def read_ini(file_path):
config = LocalConfig(file_path)
for section in config:
print("Section: ", section)
for key, value in config.items(section):
print((key, value))
print(dict(config.items("DATASET")))
return config
if __name__ == "__main__":
args = utils.get_args()
Path(args.log_dir).mkdir(parents=True, exist_ok=True)
log_level = {
"INFO": logging.INFO,
"DEBUG": logging.DEBUG,
"WARNING": logging.WARNING,
"ERROR": logging.ERROR,
"CRITICAL": logging.CRITICAL,
}
config = read_ini(args.config_file)
my_config = dict()
for section in config:
my_config[section] = dict(config.items(section))
copy(args.config_file, args.log_dir)
copy(args.graph_file, args.log_dir)
utils.write_args(args, args.log_dir)
g = Graph()
g.read_graph_from_file(args.graph_file, args.graph_type)
n_machines = args.machines
procs_per_machine = args.procs_per_machine
l = Linear(n_machines, procs_per_machine)
m_id = args.machine_id
sm = args.server_machine
sr = args.server_rank
processes = []
if sm == m_id:
processes.append(
mp.Process(
target=PeerSamplerDynamic,
args=[
sr,
m_id,
l,
g,
my_config,
args.iterations,
args.log_dir,
log_level[args.log_level],
],
)
)
for r in range(0, procs_per_machine):
processes.append(
mp.Process(
target=DPSGDWithPeerSampler,
args=[
r,
m_id,
l,
g,
my_config,
args.iterations,
args.log_dir,
args.weights_store_dir,
log_level[args.log_level],
args.test_after,
args.train_evaluate_after,
args.reset_optimizer,
],
)
)
for p in processes:
p.start()
for p in processes:
p.join()
......@@ -45,13 +45,16 @@ install_requires =
PyWavelets
pandas
crudini
sklearn
lz4
fpzip
include_package_data = True
python_requires = >=3.6
[options.packages.find]
where = src
[options.extras_require]
dev =
black
black>22.3.0
coverage
isort
pytest
......
import json
import logging
import lzma
import pickle
from collections import deque
......@@ -36,11 +35,19 @@ class TCP(Communication):
"""
machine_addr = self.ip_addrs[str(machine_id)]
port = rank + 20000
port = (2 * rank + 1) + self.offset
assert port > 0
return "tcp://{}:{}".format(machine_addr, port)
def __init__(
self, rank, machine_id, mapping, total_procs, addresses_filepath, compress=False
self,
rank,
machine_id,
mapping,
total_procs,
addresses_filepath,
offset=9000,
recv_timeout=50,
):
"""
Constructor
......@@ -57,6 +64,10 @@ class TCP(Communication):
Total number of processes
addresses_filepath : str
JSON file with machine_id -> ip mapping
compression_package : str
Import path of a module that implements the compression.Compression.Compression class
compression_class : str
Name of the compression class inside the compression package
"""
super().__init__(rank, machine_id, mapping, total_procs)
......@@ -68,18 +79,22 @@ class TCP(Communication):
self.rank = rank
self.machine_id = machine_id
self.mapping = mapping
self.offset = offset
self.recv_timeout = recv_timeout
self.uid = mapping.get_uid(rank, machine_id)
self.identity = str(self.uid).encode()
self.context = zmq.Context()
self.router = self.context.socket(zmq.ROUTER)
self.router.setsockopt(zmq.IDENTITY, self.identity)
self.router.setsockopt(zmq.RCVTIMEO, self.recv_timeout)
self.router.setsockopt(zmq.ROUTER_MANDATORY, 1)
self.router.bind(self.addr(rank, machine_id))
self.sent_disconnections = False
self.compress = compress
self.total_data = 0
self.total_meta = 0
self.peer_deque = deque()
self.peer_sockets = dict()
self.barrier = set()
def __del__(self):
"""
......@@ -103,12 +118,12 @@ class TCP(Communication):
Encoded data
"""
if self.compress:
compressor = lzma.LZMACompressor()
output = compressor.compress(pickle.dumps(data)) + compressor.flush()
else:
output = pickle.dumps(data)
data_len = 0
if "params" in data:
data_len = len(pickle.dumps(data["params"]))
output = pickle.dumps(data)
self.total_meta += len(output) - data_len
self.total_data += data_len
return output
def decrypt(self, sender, data):
......@@ -129,61 +144,37 @@ class TCP(Communication):
"""
sender = int(sender.decode())
if self.compress:
data = pickle.loads(lzma.decompress(data))
else:
data = pickle.loads(data)
data = pickle.loads(data)
return sender, data
def connect_neighbors(self, neighbors):
def init_connection(self, neighbor):
"""
Connects all neighbors. Sends HELLO. Waits for HELLO.
Caches any data received while waiting for HELLOs.
Initiates a socket to a given node.
Parameters
----------
neighbors : list(int)
List of neighbors
Raises
------
RuntimeError
If received BYE while waiting for HELLO
neighbor : int
neighbor to connect to
"""
logging.info("Sending connection request to neighbors")
for uid in neighbors:
logging.debug("Connecting to my neighbour: {}".format(uid))
id = str(uid).encode()
req = self.context.socket(zmq.DEALER)
req.setsockopt(zmq.IDENTITY, self.identity)
req.connect(self.addr(*self.mapping.get_machine_and_rank(uid)))
self.peer_sockets[id] = req
req.send(HELLO)
num_neighbors = len(neighbors)
while len(self.barrier) < num_neighbors:
sender, recv = self.router.recv_multipart()
if recv == HELLO:
logging.debug("Received {} from {}".format(HELLO, sender))
self.barrier.add(sender)
elif recv == BYE:
logging.debug("Received {} from {}".format(BYE, sender))
raise RuntimeError(
"A neighbour wants to disconnect before training started!"
)
else:
logging.debug(
"Received message from {} @ connect_neighbors".format(sender)
)
self.peer_deque.append(self.decrypt(sender, recv))
logging.info("Connected to all neighbors")
self.initialized = True
def receive(self):
logging.debug("Connecting to my neighbour: {}".format(neighbor))
id = str(neighbor).encode()
req = self.context.socket(zmq.DEALER)
req.setsockopt(zmq.IDENTITY, self.identity)
req.connect(self.addr(*self.mapping.get_machine_and_rank(neighbor)))
self.peer_sockets[id] = req
def destroy_connection(self, neighbor, linger=None):
id = str(neighbor).encode()
if self.already_connected(neighbor):
self.peer_sockets[id].close(linger=linger)
del self.peer_sockets[id]
def already_connected(self, neighbor):
id = str(neighbor).encode()
return id in self.peer_sockets
def receive(self, block=True):
"""
Returns ONE message received.
......@@ -198,27 +189,21 @@ class TCP(Communication):
If received HELLO
"""
assert self.initialized == True
if len(self.peer_deque) != 0:
resp = self.peer_deque.popleft()
return resp
sender, recv = self.router.recv_multipart()
if recv == HELLO:
logging.debug("Received {} from {}".format(HELLO, sender))
raise RuntimeError(
"A neighbour wants to connect when everyone is connected!"
)
elif recv == BYE:
logging.debug("Received {} from {}".format(BYE, sender))
self.barrier.remove(sender)
return self.receive()
else:
logging.debug("Received message from {}".format(sender))
return self.decrypt(sender, recv)
while True:
try:
sender, recv = self.router.recv_multipart()
s, r = self.decrypt(sender, recv)
return s, r
except zmq.ZMQError as exc:
if exc.errno == zmq.EAGAIN:
if not block:
return None
else:
continue
else:
raise
def send(self, uid, data):
def send(self, uid, data, encrypt=True):
"""
Send a message to a process.
......@@ -230,35 +215,13 @@ class TCP(Communication):
Message as a Python dictionary
"""
assert self.initialized == True
to_send = self.encrypt(data)
if encrypt:
to_send = self.encrypt(data)
else:
to_send = data
data_size = len(to_send)
self.total_bytes += data_size
id = str(uid).encode()
self.peer_sockets[id].send(to_send)
logging.debug("{} sent the message to {}.".format(self.uid, uid))
logging.info("Sent this round: {}".format(data_size))
def disconnect_neighbors(self):
"""
Disconnects all neighbors.
"""
assert self.initialized == True
if not self.sent_disconnections:
logging.info("Disconnecting neighbors")
for sock in self.peer_sockets.values():
sock.send(BYE)
self.sent_disconnections = True
while len(self.barrier):
sender, recv = self.router.recv_multipart()
if recv == BYE:
logging.debug("Received {} from {}".format(BYE, sender))
self.barrier.remove(sender)
else:
logging.critical(
"Received unexpected {} from {}".format(recv, sender)
)
raise RuntimeError(
"Received a message when expecting BYE from {}".format(sender)
)
logging.info("Sent message size: {}".format(data_size))
class Compression:
"""
Compression API
"""
def __init__(self):
"""
Constructor
"""
def compress(self, arr):
"""
compression function
Parameters
----------
arr : np.ndarray
Data to compress
Returns
-------
bytearray
encoded data as bytes
"""
raise NotImplementedError
def decompress(self, bytes):
"""
decompression function
Parameters
----------
bytes :bytearray
compressed data
Returns
-------
arr : np.ndarray
decompressed data as array
"""
raise NotImplementedError
def compress_float(self, arr):
"""
compression function for float arrays
Parameters
----------
arr : np.ndarray
Data to compress
Returns
-------
bytearray
encoded data as bytes
"""
raise NotImplementedError
def decompress_float(self, bytes):
"""
decompression function for compressed float arrays
Parameters
----------
bytes :bytearray
compressed data
Returns
-------
arr : np.ndarray
decompressed data as array
"""
raise NotImplementedError
# elias implementation: taken from this stack overflow post:
# https://stackoverflow.com/questions/62843156/python-fast-compression-of-large-amount-of-numbers-with-elias-gamma
import numpy as np
from decentralizepy.compression.Compression import Compression
class Elias(Compression):
"""
Compression API
"""
def __init__(self):
"""
Constructor
"""
def compress(self, arr):
"""
compression function
Parameters
----------
arr : np.ndarray
Data to compress
Returns
-------
bytearray
encoded data as bytes
"""
arr.sort()
first = arr[0]
arr = np.diff(arr).astype(np.int32)
arr = arr.view(f"u{arr.itemsize}")
l = np.log2(arr).astype("u1")
L = ((l << 1) + 1).cumsum()
out = np.zeros(int(L[-1] + 128), "u1")
for i in range(l.max() + 1):
out[L - i - 1] += (arr >> i) & 1
s = np.array([out.size], dtype=np.int64)
size = np.ndarray(8, dtype="u1", buffer=s.data)
packed = np.packbits(out)
packed[-8:] = size
s = np.array([first], dtype=np.int64)
size = np.ndarray(8, dtype="u1", buffer=s.data)
packed[-16:-8] = size
return packed
def decompress(self, bytes):
"""
decompression function
Parameters
----------
bytes :bytearray
compressed data
Returns
-------
arr : np.ndarray
decompressed data as array
"""
n_arr = bytes[-8:]
n = np.ndarray(1, dtype=np.int64, buffer=n_arr.data)[0]
first = bytes[-16:-8]
first = np.ndarray(1, dtype=np.int64, buffer=first.data)[0]
b = bytes[:-16]
b = np.unpackbits(b, count=n).view(bool)
s = b.nonzero()[0]
s = (s << 1).repeat(np.diff(s, prepend=-1))
s -= np.arange(-1, len(s) - 1)
s = s.tolist() # list has faster __getitem__
ns = len(s)
def gen():
idx = 0
yield idx
while idx < ns:
idx = s[idx]
yield idx
offs = np.fromiter(gen(), int)
sz = np.diff(offs) >> 1
mx = sz.max() + 1
out_fin = np.zeros(offs.size, int)
out_fin[0] = first
out = out_fin[1:]
for i in range(mx):
out[b[offs[1:] - i - 1] & (sz >= i)] += 1 << i
out = np.cumsum(out_fin)
return out
def compress_float(self, arr):
"""
compression function for float arrays
Parameters
----------
arr : np.ndarray
Data to compress
Returns
-------
bytearray
encoded data as bytes
"""
return arr
def decompress_float(self, bytes):
"""
decompression function for compressed float arrays
Parameters
----------
bytes :bytearray
compressed data
Returns
-------
arr : np.ndarray
decompressed data as array
"""
return bytes
# elias implementation: taken from this stack overflow post:
# https://stackoverflow.com/questions/62843156/python-fast-compression-of-large-amount-of-numbers-with-elias-gamma
import fpzip
from decentralizepy.compression.Elias import Elias
class EliasFpzip(Elias):
"""
Compression API
"""
def __init__(self):
"""
Constructor
"""
def compress_float(self, arr):
"""
compression function for float arrays
Parameters
----------
arr : np.ndarray
Data to compress
Returns
-------
bytearray
encoded data as bytes
"""
return fpzip.compress(arr, precision=0, order="C")
def decompress_float(self, bytes):
"""
decompression function for compressed float arrays
Parameters
----------
bytes :bytearray
compressed data
Returns
-------
arr : np.ndarray
decompressed data as array
"""
return fpzip.decompress(bytes, order="C").squeeze()
# elias implementation: taken from this stack overflow post:
# https://stackoverflow.com/questions/62843156/python-fast-compression-of-large-amount-of-numbers-with-elias-gamma
import fpzip
from decentralizepy.compression.Elias import Elias
class EliasFpzipLossy(Elias):
"""
Compression API
"""
def __init__(self):
"""
Constructor
"""
def compress_float(self, arr):
"""
compression function for float arrays
Parameters
----------
arr : np.ndarray
Data to compress
Returns
-------
bytearray
encoded data as bytes
"""
return fpzip.compress(arr, precision=18, order="C")
def decompress_float(self, bytes):
"""
decompression function for compressed float arrays
Parameters
----------
bytes :bytearray
compressed data
Returns
-------
arr : np.ndarray
decompressed data as array
"""
return fpzip.decompress(bytes, order="C").squeeze()
import lz4.frame
import numpy as np
from decentralizepy.compression.Compression import Compression
class Lz4Wrapper(Compression):
"""
Compression API
"""
def __init__(self, compress_metadata=True, compress_data=False):
"""
Constructor
"""
self.compress_metadata = compress_metadata
self.compress_data = compress_data
def compress(self, arr):
"""
compression function
Parameters
----------
arr : np.ndarray
Data to compress
Returns
-------
bytearray
encoded data as bytes
"""
if self.compress_metadata:
arr.sort()
diff = np.diff(arr, prepend=0).astype(np.int32)
to_compress = diff.tobytes("C")
return lz4.frame.compress(to_compress)
return arr
def decompress(self, bytes):
"""
decompression function
Parameters
----------
bytes :bytearray
compressed data
Returns
-------
arr : np.ndarray
decompressed data as array
"""
if self.compress_metadata:
decomp = lz4.frame.decompress(bytes)
return np.cumsum(np.frombuffer(decomp, dtype=np.int32))
return bytes
def compress_float(self, arr):
"""
compression function for float arrays
Parameters
----------
arr : np.ndarray
Data to compress
Returns
-------
bytearray
encoded data as bytes
"""
if self.compress_data:
to_compress = arr.tobytes("C")
return lz4.frame.compress(to_compress)
return arr
def decompress_float(self, bytes):
"""
decompression function for compressed float arrays
Parameters
----------
bytes :bytearray
compressed data
Returns
-------
arr : np.ndarray
decompressed data as array
"""
if self.compress_data:
decomp = lz4.frame.decompress(bytes)
return np.frombuffer(decomp, dtype=np.float32)
return bytes
......@@ -114,6 +114,8 @@ class CIFAR10(Dataset):
test_batch_size,
)
self.num_classes = NUM_CLASSES
self.partition_niid = partition_niid
self.shards = shards
self.transform = transforms.Compose(
......
......@@ -230,6 +230,8 @@ class Celeba(Dataset):
self.IMAGES_DIR = utils.conditional_value(images_dir, "", None)
assert self.IMAGES_DIR != None
self.num_classes = NUM_CLASSES
if self.__training__:
self.load_trainset()
......
......@@ -52,6 +52,7 @@ class Dataset:
self.test_dir = utils.conditional_value(test_dir, "", None)
self.sizes = utils.conditional_value(sizes, "", None)
self.test_batch_size = utils.conditional_value(test_batch_size, "", 64)
self.num_classes = None
if self.sizes:
if type(self.sizes) == str:
self.sizes = eval(self.sizes)
......@@ -66,6 +67,20 @@ class Dataset:
else:
self.__testing__ = False
self.label_distribution = None
def get_label_distribution(self):
# Only supported for classification
if self.label_distribution == None:
self.label_distribution = [0 for _ in range(self.num_classes)]
tr_set = self.get_trainset()
for _, ys in tr_set:
for y in ys:
y_val = y.item()
self.label_distribution[y_val] += 1
return self.label_distribution
def get_trainset(self):
"""
Function to get the training set
......
......@@ -14,6 +14,7 @@ from decentralizepy.datasets.Dataset import Dataset
from decentralizepy.datasets.Partitioner import DataPartitioner
from decentralizepy.mappings.Mapping import Mapping
from decentralizepy.models.Model import Model
from decentralizepy.models.Resnet import BasicBlock, Bottleneck, conv1x1
NUM_CLASSES = 62
IMAGE_SIZE = (28, 28)
......@@ -222,6 +223,8 @@ class Femnist(Dataset):
test_batch_size,
)
self.num_classes = NUM_CLASSES
if self.__training__:
self.load_trainset()
......@@ -290,7 +293,10 @@ class Femnist(Dataset):
"""
if self.__training__:
return DataLoader(
Data(self.train_x, self.train_y), batch_size=batch_size, shuffle=shuffle
Data(self.train_x, self.train_y),
batch_size=batch_size,
shuffle=shuffle,
drop_last=True, # needed for resnet
)
raise RuntimeError("Training set not initialized!")
......@@ -448,3 +454,139 @@ class CNN(Model):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
class RNET(Model):
"""
From PyTorch:
Class for a Resnet Model for FEMNIST
Copied and modified from https://github.com/pytorch/pytorch/blob/75024e228ca441290b6a1c2e564300ad507d7af6/benchmarks/functional_autograd_benchmark/torchvision_models.py
For the license see models/Resnet.py
"""
def __init__(
self,
num_classes=NUM_CLASSES,
zero_init_residual=False,
groups=1,
width_per_group=32,
replace_stride_with_dilation=None,
norm_layer=None,
):
super(RNET, self).__init__()
block = BasicBlock
layers = [2, 2, 2, 2]
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 32
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation)
)
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(
1, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False
)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 32, layers[0])
self.layer2 = self._make_layer(
block, 64, layers[1], stride=2, dilate=replace_stride_with_dilation[0]
)
self.layer3 = self._make_layer(
block, 128, layers[2], stride=2, dilate=replace_stride_with_dilation[1]
)
self.layer4 = self._make_layer(
block, 256, layers[3], stride=2, dilate=replace_stride_with_dilation[2]
)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(256 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes,
planes,
stride,
downsample,
self.groups,
self.base_width,
previous_dilation,
norm_layer,
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer,
)
)
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x)
import logging
import math
import os
import zipfile
import pandas as pd
import requests
import torch
from sklearn import metrics
from torch.utils.data import DataLoader
from decentralizepy.datasets.Data import Data
from decentralizepy.datasets.Dataset import Dataset
from decentralizepy.mappings import Mapping
from decentralizepy.models.Model import Model
class MovieLens(Dataset):
def __init__(
self,
rank: int,
machine_id: int,
mapping: Mapping,
train_dir="",
test_dir="",
sizes="",
test_batch_size=1,
):
super().__init__(
rank, machine_id, mapping, train_dir, test_dir, sizes, test_batch_size
)
self.n_users, self.n_items, df_train, df_test = self._load_data()
self.train_data, self.test_data = self._split_data(
df_train, df_test, self.n_procs
)
# [0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0]
# [0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ]
self.NUM_CLASSES = 10
self.RATING_DICT = {
0.5: 0,
1.0: 1,
1.5: 2,
2.0: 3,
2.5: 4,
3.0: 5,
3.5: 6,
4.0: 7,
4.5: 8,
5.0: 9,
}
def _load_data(self):
f_ratings = os.path.join(self.train_dir, "ml-latest-small", "ratings.csv")
names = ["user_id", "item_id", "rating", "timestamp"]
df_ratings = pd.read_csv(f_ratings, sep=",", names=names, skiprows=1).drop(
columns=["timestamp"]
)
# map item_id properly
items_count = df_ratings["item_id"].nunique()
items_ids = sorted(list(df_ratings["item_id"].unique()))
assert items_count == len(items_ids)
for i in range(0, items_count):
df_ratings.loc[df_ratings["item_id"] == items_ids[i], "item_id"] = i + 1
# split train, test - 70% : 30%
grouped_users = df_ratings.groupby(["user_id"])
users_count = len(grouped_users)
df_train = pd.DataFrame()
df_test = pd.DataFrame()
for i in range(0, users_count):
df_user = df_ratings[df_ratings["user_id"] == i + 1]
df_user_train = df_user.sample(frac=0.7)
df_user_test = pd.concat([df_user, df_user_train]).drop_duplicates(
keep=False
)
assert len(df_user_train) + len(df_user_test) == len(df_user)
df_train = pd.concat([df_train, df_user_train])
df_test = pd.concat([df_test, df_user_test])
# 610, 9724
return users_count, items_count, df_train, df_test
def _split_data(self, train_data, test_data, world_size):
# SPLITTING BY USERS: group by users and split the data accordingly
mod = self.n_users % world_size
users_count = self.n_users // world_size
if self.rank < mod:
users_count += 1
offset = users_count * self.rank
else:
offset = users_count * self.rank + mod
my_train_data = pd.DataFrame()
my_test_data = pd.DataFrame()
for i in range(offset, offset + users_count):
my_train_data = pd.concat(
[my_train_data, train_data[train_data["user_id"] == i + 1]]
)
my_test_data = pd.concat(
[my_test_data, test_data[test_data["user_id"] == i + 1]]
)
logging.info("Data split for test and train.")
return my_train_data, my_test_data
def get_trainset(self, batch_size=1, shuffle=False):
if self.__training__:
train_x = self.train_data[["user_id", "item_id"]].to_numpy()
train_y = self.train_data.rating.values.astype("float32")
return DataLoader(
Data(train_x, train_y), batch_size=batch_size, shuffle=shuffle
)
raise RuntimeError("Training set not initialized!")
def get_testset(self):
if self.__testing__:
test_x = self.test_data[["user_id", "item_id"]].to_numpy()
test_y = self.test_data.rating.values
return DataLoader(Data(test_x, test_y), batch_size=self.test_batch_size)
raise RuntimeError("Test set not initialized!")
def test(self, model, loss):
test_set = self.get_testset()
logging.debug("Test Loader instantiated.")
correct_pred = [0 for _ in range(self.NUM_CLASSES)]
total_pred = [0 for _ in range(self.NUM_CLASSES)]
total_correct = 0
total_predicted = 0
with torch.no_grad():
loss_val = 0.0
loss_predicted = 0.0
count = 0
for test_x, test_y in test_set:
output = model(test_x)
loss_val += loss(output, test_y).item()
count += 1
# threshold values to range [0.5, 5.0]
o1 = (output > 5.0).nonzero(as_tuple=False)
output[o1] = 5.0
o1 = (output < 0.5).nonzero(as_tuple=False)
output[o1] = 0.5
# round a number to the closest half integer
output = torch.round(output * 2) / 2
loss_predicted += metrics.mean_absolute_error(output, test_y)
for rating, prediction in zip(test_y.tolist(), output):
# print(rating, prediction)
logging.debug("{} predicted as {}".format(rating, prediction))
if rating == prediction:
correct_pred[self.RATING_DICT[rating]] += 1
total_correct += 1
total_pred[self.RATING_DICT[rating]] += 1
total_predicted += 1
logging.debug("Predicted on the test set")
for key, value in enumerate(correct_pred):
if total_pred[key] != 0:
accuracy = 100 * float(value) / total_pred[key]
else:
accuracy = 100.0
logging.debug("Accuracy for class {} is: {:.1f} %".format(key, accuracy))
accuracy = 100 * float(total_correct) / total_predicted
loss_val = math.sqrt(loss_val / count)
loss_predicted = loss_predicted / count
logging.info(
"MSE loss: {:.8f} | Rounded MAE loss: {:.8f}".format(
loss_val, loss_predicted
)
)
logging.info("Overall accuracy is: {:.1f} %".format(accuracy))
return accuracy, loss_val
# todo: this class should be in 'models' package; add support for reading it from there and move it
class MatrixFactorization(Model):
"""
Class for a Matrix Factorization model for MovieLens.
"""
def __init__(self, n_users=610, n_items=9724, n_factors=20):
"""
Instantiates the Matrix Factorization model with user and item embeddings.
Parameters
----------
n_users
The number of unique users.
n_items
The number of unique items.
n_factors
The number of columns in embeddings matrix.
"""
super().__init__()
self.user_factors = torch.nn.Embedding(n_users, n_factors)
self.item_factors = torch.nn.Embedding(n_items, n_factors)
self.user_factors.weight.data.uniform_(-0.05, 0.05)
self.item_factors.weight.data.uniform_(-0.05, 0.05)
def forward(self, data):
"""
Forward pass of the model, it does matrix multiplication and returns predictions for given users and items.
"""
users = torch.LongTensor(data[:, 0]) - 1
items = torch.LongTensor(data[:, 1]) - 1
u, it = self.user_factors(users), self.item_factors(items)
x = (u * it).sum(dim=1, keepdim=True)
return x.squeeze(1)
def download_movie_lens(dest_path):
"""
Downloads the movielens latest small dataset.
This data set consists of:
* 100836 ratings from 610 users on 9742 movies.
* Each user has rated at least 20 movies.
https://files.grouplens.org/datasets/movielens/ml-latest-small-README.html
"""
url = "http://files.grouplens.org/datasets/movielens/ml-latest-small.zip"
req = requests.get(url, stream=True)
print("Downloading MovieLens Latest Small data...")
with open(os.path.join(dest_path, "ml-latest-small.zip"), "wb") as fd:
for chunk in req.iter_content(chunk_size=None):
fd.write(chunk)
with zipfile.ZipFile(os.path.join(dest_path, "ml-latest-small.zip"), "r") as z:
z.extractall(dest_path)
print("Downloaded MovieLens Latest Small dataset at", dest_path)
if __name__ == "__main__":
path = "/mnt/nfs/shared/leaf/data/movielens"
zip_file = os.path.join(path, "ml-latest-small.zip")
if not os.path.isfile(zip_file):
download_movie_lens(path)
import json
import logging
import os
from collections import defaultdict
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.utils.data import DataLoader
from decentralizepy.datasets.Data import Data
from decentralizepy.datasets.Dataset import Dataset
from decentralizepy.datasets.Partitioner import DataPartitioner
from decentralizepy.mappings.Mapping import Mapping
from decentralizepy.models.Model import Model
VOCAB = list(
"dhlptx@DHLPTX $(,048cgkoswCGKOSW[_#'/37;?bfjnrvzBFJNRVZ\"&*.26:\naeimquyAEIMQUY]!%)-159\r{{}}<>"
)
VOCAB_LEN = len(VOCAB)
# Creating a mapping from unique characters to indices
char2idx = {u: i for i, u in enumerate(VOCAB)}
idx2char = np.array(VOCAB)
EMBEDDING_DIM = 8
HIDDEN_DIM = 256
NUM_CLASSES = VOCAB_LEN
NUM_LAYERS = 2
SEQ_LENGTH = 80
class Shakespeare(Dataset):
"""
Class for the Shakespeare dataset
-- Based on https://gitlab.epfl.ch/sacs/efficient-federated-learning/-/blob/master/grad_guessing/data_utils.py
"""
def __read_file__(self, file_path):
"""
Read data from the given json file
Parameters
----------
file_path : str
The file path
Returns
-------
tuple
(users, num_samples, data)
"""
with open(file_path, "r") as inf:
client_data = json.load(inf)
return (
client_data["users"],
client_data["num_samples"],
client_data["user_data"],
)
def __read_dir__(self, data_dir):
"""
Function to read all the Reddit data files in the directory
Parameters
----------
data_dir : str
Path to the folder containing the data files
Returns
-------
3-tuple
A tuple containing list of users, number of samples per client,
and the data items per client
"""
users = []
num_samples = []
data = defaultdict(lambda: None)
files = os.listdir(data_dir)
files = [f for f in files if f.endswith(".json")]
for f in files:
file_path = os.path.join(data_dir, f)
u, n, d = self.__read_file__(file_path)
users.extend(u)
num_samples.extend(n)
data.update(d)
return users, num_samples, data
def file_per_user(self, dir, write_dir):
"""
Function to read all the Reddit data files and write one file per user
Parameters
----------
dir : str
Path to the folder containing the data files
write_dir : str
Path to the folder to write the files
"""
clients, num_samples, train_data = self.__read_dir__(dir)
for index, client in enumerate(clients):
my_data = dict()
my_data["users"] = [client]
my_data["num_samples"] = num_samples[index]
my_samples = {"x": train_data[client]["x"], "y": train_data[client]["y"]}
my_data["user_data"] = {client: my_samples}
with open(os.path.join(write_dir, client + ".json"), "w") as of:
json.dump(my_data, of)
print("Created File: ", client + ".json")
def load_trainset(self):
"""
Loads the training set. Partitions it if needed.
"""
logging.info("Loading training set.")
files = os.listdir(self.train_dir)
files = [f for f in files if f.endswith(".json")]
files.sort()
c_len = len(files)
# clients, num_samples, train_data = self.__read_dir__(self.train_dir)
if self.sizes == None: # Equal distribution of data among processes
e = c_len // self.n_procs
frac = e / c_len
self.sizes = [frac] * self.n_procs
self.sizes[-1] += 1.0 - frac * self.n_procs
logging.debug("Size fractions: {}".format(self.sizes))
self.uid = self.mapping.get_uid(self.rank, self.machine_id)
my_clients = DataPartitioner(files, self.sizes).use(self.uid)
my_train_data = {"x": [], "y": []}
self.clients = []
self.num_samples = []
logging.debug("Clients Length: %d", c_len)
logging.debug("My_clients_len: %d", my_clients.__len__())
for i in range(my_clients.__len__()):
cur_file = my_clients.__getitem__(i)
clients, _, train_data = self.__read_file__(
os.path.join(self.train_dir, cur_file)
)
for cur_client in clients:
self.clients.append(cur_client)
my_train_data["x"].extend(self.process(train_data[cur_client]["x"]))
my_train_data["y"].extend(self.process(train_data[cur_client]["y"]))
self.num_samples.append(len(train_data[cur_client]["y"]))
# turns the list of lists into a single list
self.train_y = np.array(my_train_data["y"], dtype=np.dtype("int64")).reshape(-1)
self.train_x = np.array(
my_train_data["x"], dtype=np.dtype("int64")
) # .reshape(-1)
logging.info("train_x.shape: %s", str(self.train_x.shape))
logging.info("train_y.shape: %s", str(self.train_y.shape))
assert self.train_x.shape[0] == self.train_y.shape[0]
assert self.train_x.shape[0] > 0
def load_testset(self):
"""
Loads the testing set.
"""
logging.info("Loading testing set.")
_, _, d = self.__read_dir__(self.test_dir)
test_x = []
test_y = []
for test_data in d.values():
test_x.extend(self.process(test_data["x"]))
test_y.extend(self.process(test_data["y"]))
self.test_y = np.array(test_y, dtype=np.dtype("int64")).reshape(-1)
self.test_x = np.array(test_x, dtype=np.dtype("int64"))
logging.info("test_x.shape: %s", str(self.test_x.shape))
logging.info("test_y.shape: %s", str(self.test_y.shape))
assert self.test_x.shape[0] == self.test_y.shape[0]
assert self.test_x.shape[0] > 0
def __init__(
self,
rank: int,
machine_id: int,
mapping: Mapping,
n_procs="",
train_dir="",
test_dir="",
sizes="",
test_batch_size=1024,
):
"""
Constructor which reads the data files, instantiates and partitions the dataset
Parameters
----------
rank : int
Rank of the current process (to get the partition).
machine_id : int
Machine ID
mapping : decentralizepy.mappings.Mapping
Mapping to convert rank, machine_id -> uid for data partitioning
It also provides the total number of global processes
train_dir : str, optional
Path to the training data files. Required to instantiate the training set
The training set is partitioned according to the number of global processes and sizes
test_dir : str, optional
Path to the testing data files Required to instantiate the testing set
sizes : list(int), optional
A list of fractions specifying how much data to alot each process. Sum of fractions should be 1.0
By default, each process gets an equal amount.
test_batch_size : int, optional
Batch size during testing. Default value is 64
"""
super().__init__(
rank,
machine_id,
mapping,
train_dir,
test_dir,
sizes,
test_batch_size,
)
if self.__training__:
self.load_trainset()
if self.__testing__:
self.load_testset()
def process(self, x):
output = list(
map(lambda sentences: list(map(lambda c: char2idx[c], list(sentences))), x)
)
return output
def get_client_ids(self):
"""
Function to retrieve all the clients of the current process
Returns
-------
list(str)
A list of strings of the client ids.
"""
return self.clients
def get_client_id(self, i):
"""
Function to get the client id of the ith sample
Parameters
----------
i : int
Index of the sample
Returns
-------
str
Client ID
Raises
------
IndexError
If the sample index is out of bounds
"""
lb = 0
for j in range(len(self.clients)):
if i < lb + self.num_samples[j]:
return self.clients[j]
raise IndexError("i is out of bounds!")
def get_trainset(self, batch_size=1, shuffle=False):
"""
Function to get the training set
Parameters
----------
batch_size : int, optional
Batch size for learning
Returns
-------
torch.utils.Dataset(decentralizepy.datasets.Data)
Raises
------
RuntimeError
If the training set was not initialized
"""
if self.__training__:
return DataLoader(
Data(self.train_x, self.train_y), batch_size=batch_size, shuffle=shuffle
)
raise RuntimeError("Training set not initialized!")
def get_testset(self):
"""
Function to get the test set
Returns
-------
torch.utils.Dataset(decentralizepy.datasets.Data)
Raises
------
RuntimeError
If the test set was not initialized
"""
if self.__testing__:
thirstiest = torch.arange(0, self.test_x.shape[0], 30)
return DataLoader(
Data(self.test_x[thirstiest], self.test_y[thirstiest]),
batch_size=self.test_batch_size,
)
raise RuntimeError("Test set not initialized!")
def test(self, model, loss):
"""
Function to evaluate model on the test dataset.
Parameters
----------
model : decentralizepy.models.Model
Model to evaluate
loss : torch.nn.loss
Loss function to evaluate
Returns
-------
tuple
(accuracy, loss_value)
"""
testloader = self.get_testset()
logging.debug("Test Loader instantiated.")
correct_pred = [0 for _ in range(NUM_CLASSES)]
total_pred = [0 for _ in range(NUM_CLASSES)]
total_correct = 0
total_predicted = 0
with torch.no_grad():
loss_val = 0.0
count = 0
for elems, labels in testloader:
outputs = model(elems)
loss_val += loss(outputs, labels).item()
count += 1
_, predictions = torch.max(outputs, 1)
for label, prediction in zip(labels, predictions):
logging.debug("{} predicted as {}".format(label, prediction))
if label == prediction:
correct_pred[label] += 1
total_correct += 1
total_pred[label] += 1
total_predicted += 1
logging.debug("Predicted on the test set")
for key, value in enumerate(correct_pred):
if total_pred[key] != 0:
accuracy = 100 * float(value) / total_pred[key]
else:
accuracy = 100.0
logging.debug("Accuracy for class {} is: {:.1f} %".format(key, accuracy))
accuracy = 100 * float(total_correct) / total_predicted
loss_val = loss_val / count
logging.info("Overall accuracy is: {:.1f} %".format(accuracy))
return accuracy, loss_val
class LSTM(Model):
"""
Class for a RNN Model for Sent140
"""
def __init__(self):
"""
Constructor. Instantiates the RNN Model to predict the next word of a sequence of word.
Based on the TensorFlow model found here: https://gitlab.epfl.ch/sacs/efficient-federated-learning/-/blob/master/grad_guessing/data_utils.py
"""
super().__init__()
# input_length does not exist
self.embedding = nn.Embedding(VOCAB_LEN, EMBEDDING_DIM)
self.lstm = nn.LSTM(
EMBEDDING_DIM, HIDDEN_DIM, batch_first=True, num_layers=NUM_LAYERS
)
# activation function is added in the forward pass
# Note: the tensorflow implementation did not use any activation function in this step?
# should I use one.
self.l1 = nn.Linear(HIDDEN_DIM * SEQ_LENGTH, VOCAB_LEN)
def forward(self, x):
"""
Forward pass of the model
Parameters
----------
x : torch.tensor
The input torch tensor
Returns
-------
torch.tensor
The output torch tensor
"""
# logging.info("Initial Shape: {}".format(x.shape))
x = self.embedding(x)
# logging.info("Embedding Shape: {}".format(x.shape))
x, _ = self.lstm(x)
# logging.info("LSTM Shape: {}".format(x.shape))
x = F.relu(x.reshape((-1, HIDDEN_DIM * SEQ_LENGTH)))
# logging.info("View Shape: {}".format(x.shape))
x = self.l1(x)
# logging.info("Output Shape: {}".format(x.shape))
return x
import networkx as nx
import numpy as np
class Graph:
"""
This class defines the graph topology.
......@@ -18,6 +22,9 @@ class Graph:
self.n_procs = n_procs
self.adj_list = [set() for i in range(self.n_procs)]
def get_all_nodes(self):
return [i for i in range(self.n_procs)]
def __insert_adj__(self, node, neighbours):
"""
Inserts `neighbours` into the adjacency list of `node`
......@@ -144,3 +151,20 @@ class Graph:
"""
return self.adj_list[uid]
def centr(self):
my_adj = {x: list(adj) for x, adj in enumerate(self.adj_list)}
nxGraph = nx.Graph(my_adj)
a = nx.to_numpy_matrix(nxGraph)
self.averaging_weights = np.ones((self.n_procs, self.n_procs), dtype=float)
centrality = nx.betweenness_centrality(nxGraph)
for i in range(len(centrality)):
centrality[i] += 0.01
for i in range(self.averaging_weights.shape[0]):
s = 0
for j in range(self.averaging_weights.shape[0]):
self.averaging_weights[i, j] = 1.0 / centrality[j]
s += self.averaging_weights[i, j]
for j in range(self.averaging_weights.shape[0]):
self.averaging_weights[i, j] = self.averaging_weights[i, j] / s
return self.averaging_weights
import networkx as nx
from decentralizepy.graphs.Graph import Graph
class Star(Graph):
"""
The class for generating a Star topology
Adapted from ./Regular.py
"""
def __init__(self, n_procs):
"""
Constructor. Generates a Ring graph
Parameters
----------
n_procs : int
total number of nodes in the graph
"""
super().__init__(n_procs)
G = nx.star_graph(n_procs - 1)
adj = G.adjacency()
for i, l in adj:
self.adj_list[i] = set() # new set
for k in l:
self.adj_list[i].add(k)
if not nx.is_connected(G):
self.connect_graph()
......@@ -8,7 +8,7 @@ class Linear(Mapping):
"""
def __init__(self, n_machines, procs_per_machine):
def __init__(self, n_machines, procs_per_machine, global_service_machine=0):
"""
Constructor
......@@ -23,6 +23,7 @@ class Linear(Mapping):
super().__init__(n_machines * procs_per_machine)
self.n_machines = n_machines
self.procs_per_machine = procs_per_machine
self.global_service_machine = global_service_machine
def get_uid(self, rank: int, machine_id: int):
"""
......@@ -41,6 +42,8 @@ class Linear(Mapping):
the unique identifier
"""
if rank < 0:
return rank
return machine_id * self.procs_per_machine + rank
def get_machine_and_rank(self, uid: int):
......@@ -58,6 +61,8 @@ class Linear(Mapping):
a tuple of rank and machine_id
"""
if uid < 0:
return uid, self.global_service_machine
return (uid % self.procs_per_machine), (uid // self.procs_per_machine)
def get_local_procs_count(self):
......
import pickle
from pathlib import Path
import torch
from torch import nn
......@@ -58,3 +62,39 @@ class Model(nn.Module):
"""
if self.accumulated_changes is not None:
self.accumulated_changes[indices] = 0.0
def dump_weights(self, directory, uid, round):
"""
dumps the current model as a pickle file into the specified direcectory
Parameters
----------
directory : str
directory in which the weights are dumped
uid : int
uid of the node, will be used to give the weight a unique name
round : int
current round, will be used to give the weight a unique name
"""
with torch.no_grad():
tensors_to_cat = []
for _, v in self.state_dict().items():
tensors_to_cat.append(v.flatten())
flat = torch.cat(tensors_to_cat)
with open(Path(directory) / f"{round}_weight_{uid}.pk", "wb") as f:
pickle.dump(flat, f)
def get_weights(self):
"""
flattens the current weights
"""
with torch.no_grad():
tensors_to_cat = []
for _, v in self.state_dict().items():
tensors_to_cat.append(v.flatten())
flat = torch.cat(tensors_to_cat)
return flat
"""
Copyright (c) 2016- Facebook, Inc (Adam Paszke)
Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
Copyright (c) 2011-2013 NYU (Clement Farabet)
Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
From Caffe2:
Copyright (c) 2016-present, Facebook Inc. All rights reserved.
All contributions by Facebook:
Copyright (c) 2016 Facebook Inc.
All contributions by Google:
Copyright (c) 2015 Google Inc.
All rights reserved.
All contributions by Yangqing Jia:
Copyright (c) 2015 Yangqing Jia
All rights reserved.
All contributions by Kakao Brain:
Copyright 2019-2020 Kakao Brain
All contributions from Caffe:
Copyright(c) 2013, 2014, 2015, the respective contributors
All rights reserved.
All other contributions:
Copyright(c) 2015, 2016 the respective contributors
All rights reserved.
Caffe2 uses a copyright model similar to Caffe: each contributor holds
copyright over their contributions to Caffe2. The project versioning records
all such contribution and copyright details. If a contributor wants to further
mark their specific copyright on a particular contribution, they should
indicate their copyright solely in the commit message of the change when it is
committed.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
and IDIAP Research Institute nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
from torch import nn
# Copied and modified from https://github.com/pytorch/pytorch/blob/75024e228ca441290b6a1c2e564300ad507d7af6/benchmarks/functional_autograd_benchmark/torchvision_models.py
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation,
)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.0)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out