forked from minar09/VGG16-PyTorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
parallelism_tutorial.py
130 lines (106 loc) · 4.35 KB
/
parallelism_tutorial.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
# -*- coding: utf-8 -*-
"""
Multi-GPU Examples
==================
Data Parallelism is when we split the mini-batch of samples into
multiple smaller mini-batches and run the computation for each of the
smaller mini-batches in parallel.
Data Parallelism is implemented using ``torch.nn.DataParallel``.
One can wrap a Module in ``DataParallel`` and it will be parallelized
over multiple GPUs in the batch dimension.
DataParallel
-------------
"""
import torch
import torch.nn as nn
class DataParallelModel(nn.Module):
def __init__(self):
super().__init__()
self.block1 = nn.Linear(10, 20)
# wrap block2 in DataParallel
self.block2 = nn.Linear(20, 20)
self.block2 = nn.DataParallel(self.block2)
self.block3 = nn.Linear(20, 20)
def forward(self, x):
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
return x
########################################################################
# The code does not need to be changed in CPU-mode.
#
# The documentation for DataParallel can be found
# `here <https://pytorch.org/docs/nn.html#dataparallel>`_.
#
# **Primitives on which DataParallel is implemented upon:**
#
#
# In general, pytorch’s `nn.parallel` primitives can be used independently.
# We have implemented simple MPI-like primitives:
#
# - replicate: replicate a Module on multiple devices
# - scatter: distribute the input in the first-dimension
# - gather: gather and concatenate the input in the first-dimension
# - parallel\_apply: apply a set of already-distributed inputs to a set of
# already-distributed models.
#
# To give a better clarity, here function ``data_parallel`` composed using
# these collectives
def data_parallel(module, input, device_ids, output_device=None):
if not device_ids:
return module(input)
if output_device is None:
output_device = device_ids[0]
replicas = nn.parallel.replicate(module, device_ids)
inputs = nn.parallel.scatter(input, device_ids)
replicas = replicas[:len(inputs)]
outputs = nn.parallel.parallel_apply(replicas, inputs)
return nn.parallel.gather(outputs, output_device)
########################################################################
# Part of the model on CPU and part on the GPU
# --------------------------------------------
#
# Let’s look at a small example of implementing a network where part of it
# is on the CPU and part on the GPU
device = torch.device("cuda:0")
class DistributedModel(nn.Module):
def __init__(self):
super().__init__(
embedding=nn.Embedding(1000, 10),
rnn=nn.Linear(10, 10).to(device),
)
def forward(self, x):
# Compute embedding on CPU
x = self.embedding(x)
# Transfer to GPU
x = x.to(device)
# Compute RNN on GPU
x = self.rnn(x)
return x
########################################################################
#
# This was a small introduction to PyTorch for former Torch users.
# There’s a lot more to learn.
#
# Look at our more comprehensive introductory tutorial which introduces
# the ``optim`` package, data loaders etc.: :doc:`/beginner/deep_learning_60min_blitz`.
#
# Also look at
#
# - :doc:`Train neural nets to play video games </intermediate/reinforcement_q_learning>`
# - `Train a state-of-the-art ResNet network on imagenet`_
# - `Train an face generator using Generative Adversarial Networks`_
# - `Train a word-level language model using Recurrent LSTM networks`_
# - `More examples`_
# - `More tutorials`_
# - `Discuss PyTorch on the Forums`_
# - `Chat with other users on Slack`_
#
# .. _`Deep Learning with PyTorch: a 60-minute blitz`: https://github.com/pytorch/tutorials/blob/master/Deep%20Learning%20with%20PyTorch.ipynb
# .. _Train a state-of-the-art ResNet network on imagenet: https://github.com/pytorch/examples/tree/master/imagenet
# .. _Train an face generator using Generative Adversarial Networks: https://github.com/pytorch/examples/tree/master/dcgan
# .. _Train a word-level language model using Recurrent LSTM networks: https://github.com/pytorch/examples/tree/master/word_language_model
# .. _More examples: https://github.com/pytorch/examples
# .. _More tutorials: https://github.com/pytorch/tutorials
# .. _Discuss PyTorch on the Forums: https://discuss.pytorch.org/
# .. _Chat with other users on Slack: https://pytorch.slack.com/messages/beginner/