Skip to content

Commit d394b80

Browse files
jeremyjordanBorda
andauthored
calling self.forward() -> self() (#1211)
* self.forward() -> self() * update changelog Co-authored-by: Jirka Borovec <Borda@users.noreply.github.com>
1 parent 2a4cd47 commit d394b80

File tree

15 files changed

+46
-45
lines changed

15 files changed

+46
-45
lines changed

CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
1414
- Added support for `IterableDataset` in validation and testing ([#1104](https://github.com/PyTorchLightning/pytorch-lightning/pull/1104))
1515
- Added support for non-primitive types in `hparams` for `TensorboardLogger` ([#1130](https://github.com/PyTorchLightning/pytorch-lightning/pull/1130))
1616
- Added a check that stops the training when loss or weights contain `NaN` or `inf` values. ([#1097](https://github.com/PyTorchLightning/pytorch-lightning/pull/1097))
17+
- Updated references to self.forward() to instead use the `__call__` interface. ([#1211](https://github.com/PyTorchLightning/pytorch-lightning/pull/1211))
1718

1819
### Changed
1920

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -200,7 +200,7 @@ def validation_step(self, batch, batch_idx):
200200
x, y = batch
201201

202202
# or as basic as a CNN classification
203-
out = self.forward(x)
203+
out = self(x)
204204
loss = my_loss(out, y)
205205
return {'loss': loss}
206206
```

docs/source/child_modules.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ that change in the `Autoencoder` model are the init, forward, training, validati
2424
x, _ = batch
2525
2626
representation = self.encoder(x)
27-
x_hat = self.forward(representation)
27+
x_hat = self(representation)
2828
2929
loss = MSE(x, x_hat)
3030
return loss
@@ -38,7 +38,7 @@ that change in the `Autoencoder` model are the init, forward, training, validati
3838
def _shared_eval(self, batch, batch_idx, prefix):
3939
x, y = batch
4040
representation = self.encoder(x)
41-
x_hat = self.forward(representation)
41+
x_hat = self(representation)
4242
4343
loss = F.nll_loss(logits, y)
4444
return {f'{prefix}_loss': loss}

docs/source/introduction_guide.rst

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -319,7 +319,7 @@ in the LightningModule
319319
320320
def training_step(self, batch, batch_idx):
321321
x, y = batch
322-
logits = self.forward(x)
322+
logits = self(x)
323323
loss = F.nll_loss(logits, y)
324324
return {'loss': loss}
325325
# return loss (also works)
@@ -371,7 +371,7 @@ For clarity, we'll recall that the full LightningModule now looks like this.
371371
372372
def training_step(self, batch, batch_idx):
373373
x, y = batch
374-
logits = self.forward(x)
374+
logits = self(x)
375375
loss = F.nll_loss(logits, y)
376376
377377
# add logging
@@ -684,7 +684,7 @@ sample split in the `train_dataloader` method.
684684
class LitMNIST(pl.LightningModule):
685685
def validation_step(self, batch, batch_idx):
686686
x, y = batch
687-
logits = self.forward(x)
687+
logits = self(x)
688688
loss = F.nll_loss(logits, y)
689689
return {'val_loss': loss}
690690
@@ -740,7 +740,7 @@ Just like the validation loop, we define exactly the same steps for testing:
740740
class LitMNIST(pl.LightningModule):
741741
def test_step(self, batch, batch_idx):
742742
x, y = batch
743-
logits = self.forward(x)
743+
logits = self(x)
744744
loss = F.nll_loss(logits, y)
745745
return {'val_loss': loss}
746746
@@ -827,7 +827,7 @@ within it.
827827
828828
def training_step(self, batch, batch_idx):
829829
x, y = batch
830-
logits = self.forward(x)
830+
logits = self(x)
831831
loss = F.nll_loss(logits, y)
832832
return loss
833833
@@ -855,7 +855,7 @@ In this case, we've set this LightningModel to predict logits. But we could also
855855
856856
def training_step(self, batch, batch_idx):
857857
x, y = batch
858-
out, l1_feats, l2_feats, l3_feats = self.forward(x)
858+
out, l1_feats, l2_feats, l3_feats = self(x)
859859
logits = torch.log_softmax(out, dim=1)
860860
ce_loss = F.nll_loss(logits, y)
861861
loss = perceptual_loss(l1_feats, l2_feats, l3_feats) + ce_loss
@@ -880,7 +880,7 @@ Or maybe we have a model that we use to do generation
880880
def training_step(self, batch, batch_idx):
881881
x, y = batch
882882
representation = self.encoder(x)
883-
imgs = self.forward(representation)
883+
imgs = self(representation)
884884
885885
loss = perceptual_loss(imgs, x)
886886
return loss

docs/source/multi_gpu.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -207,7 +207,7 @@ to illustrate why this is needed, let's look at dataparallel
207207
208208
def training_step(self, batch, batch_idx):
209209
x, y = batch
210-
y_hat = self.forward(batch)
210+
y_hat = self(batch)
211211
212212
# on dp or ddp2 if we did softmax now it would be wrong
213213
# because batch is actually a piece of the full batch

pl_examples/basic_examples/lightning_module_template.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ def training_step(self, batch, batch_idx):
106106
x, y = batch
107107
x = x.view(x.size(0), -1)
108108

109-
y_hat = self.forward(x)
109+
y_hat = self(x)
110110

111111
# calculate loss
112112
loss_val = self.loss(y, y_hat)
@@ -133,7 +133,7 @@ def validation_step(self, batch, batch_idx):
133133
"""
134134
x, y = batch
135135
x = x.view(x.size(0), -1)
136-
y_hat = self.forward(x)
136+
y_hat = self(x)
137137

138138
loss_val = self.loss(y, y_hat)
139139

pl_examples/domain_templates/gan.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ def training_step(self, batch, batch_idx, optimizer_idx):
105105
z = z.cuda(imgs.device.index)
106106

107107
# generate images
108-
self.generated_imgs = self.forward(z)
108+
self.generated_imgs = self(z)
109109

110110
# log sampled images
111111
# sample_imgs = self.generated_imgs[:6]
@@ -179,7 +179,7 @@ def on_epoch_end(self):
179179
z = z.cuda(self.last_imgs.device.index)
180180

181181
# log sampled images
182-
sample_imgs = self.forward(z)
182+
sample_imgs = self(z)
183183
grid = torchvision.utils.make_grid(sample_imgs)
184184
self.logger.experiment.add_image(f'generated_images', grid, self.current_epoch)
185185

pl_examples/full_examples/imagenet/imagenet_example.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ def forward(self, x):
4242

4343
def training_step(self, batch, batch_idx):
4444
images, target = batch
45-
output = self.forward(images)
45+
output = self(images)
4646
loss_val = F.cross_entropy(output, target)
4747
acc1, acc5 = self.__accuracy(output, target, topk=(1, 5))
4848

@@ -65,7 +65,7 @@ def training_step(self, batch, batch_idx):
6565

6666
def validation_step(self, batch, batch_idx):
6767
images, target = batch
68-
output = self.forward(images)
68+
output = self(images)
6969
loss_val = F.cross_entropy(output, target)
7070
acc1, acc5 = self.__accuracy(output, target, topk=(1, 5))
7171

pl_examples/full_examples/semantic_segmentation/semseg.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,7 @@ def training_step(self, batch, batch_nb):
143143
img, mask = batch
144144
img = img.float()
145145
mask = mask.long()
146-
out = self.forward(img)
146+
out = self(img)
147147
loss_val = F.cross_entropy(out, mask, ignore_index=250)
148148
return {'loss': loss_val}
149149

pytorch_lightning/core/__init__.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ def forward(self, x):
8282
8383
def training_step(self, batch, batch_idx):
8484
x, y = batch
85-
y_hat = self.forward(x)
85+
y_hat = self(x)
8686
return {'loss': F.cross_entropy(y_hat, y)}
8787
8888
def train_dataloader(self):
@@ -159,7 +159,7 @@ def configure_optimizers(self):
159159
class LitModel(pl.LightningModule):
160160
def validation_step(self, batch, batch_idx):
161161
x, y = batch
162-
y_hat = self.forward(x)
162+
y_hat = self(x)
163163
return {'val_loss': F.cross_entropy(y_hat, y)}
164164
165165
def validation_epoch_end(self, outputs):
@@ -178,7 +178,7 @@ def val_dataloader(self):
178178
class LitModel(pl.LightningModule):
179179
def test_step(self, batch, batch_idx):
180180
x, y = batch
181-
y_hat = self.forward(x)
181+
y_hat = self(x)
182182
return {'test_loss': F.cross_entropy(y_hat, y)}
183183
184184
def test_epoch_end(self, outputs):

0 commit comments

Comments
 (0)