Skip to content

Commit

Permalink
Clear the Optimizer before wrapping with FSDP (#1732)
Browse files Browse the repository at this point in the history
* Adding in performance registration

* clearing optimizer param groups before FSDP wrapping

* fixing comments

Co-authored-by: Brandon Cui <bcui@Brandons-MBP.hsd1.ca.comcast.net>
  • Loading branch information
2 people authored and Bandish Shah committed Nov 15, 2022
1 parent 390b1d7 commit 1722c61
Showing 1 changed file with 12 additions and 3 deletions.
15 changes: 12 additions & 3 deletions composer/trainer/dist_strategy.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,17 @@ def prepare_fsdp_module(model: torch.nn.Module, optimizers: Optional[Union[torch
from torch.distributed.fsdp import (BackwardPrefetch, CPUOffload, FullyShardedDataParallel, MixedPrecision,
ShardingStrategy)

if optimizers:
optimizers_tuple = ensure_tuple(optimizers)
if len(optimizers_tuple) != 1:
raise NotImplementedError(f'Only one optimizer is supported; found {len(optimizers_tuple)} optimizers')

# clearing optimizer param groups and state
# that will be recreated at the end of prepare_fsdp_module
optim = optimizers_tuple[0]
optim.param_groups.clear()
optim.state.clear()

sharding_map = {
'NO_SHARD': ShardingStrategy.NO_SHARD,
'SHARD_GRAD_OP': ShardingStrategy.SHARD_GRAD_OP,
Expand Down Expand Up @@ -281,8 +292,6 @@ def _check_fn(module: torch.nn.Module) -> bool:
# Rebuild optimizer now that parameters are sharded
if optimizers:
optimizers_tuple = ensure_tuple(optimizers)
if len(optimizers_tuple) != 1:
raise NotImplementedError(f'Only one optimizer is supported; found {len(optimizers_tuple)} optimizers')
optim = optimizers_tuple[0]
optim.param_groups = []
optim.param_groups.clear()
optim.add_param_group({'params': list(model.parameters())})

0 comments on commit 1722c61

Please sign in to comment.