take care of backwards within trainer classes for diffusion prior and decoder, readying to take care of gradient accumulation as well (plus, unsure if loss should be backwards within autocast block)

This commit is contained in:
Phil Wang
2022-05-14 15:49:24 -07:00
parent ff3474f05c
commit b494ed81d4
3 changed files with 7 additions and 5 deletions

View File

@@ -10,7 +10,7 @@ setup(
'dream = dalle2_pytorch.cli:dream'
],
},
version = '0.2.23',
version = '0.2.24',
license='MIT',
description = 'DALL-E 2',
author = 'Phil Wang',