11import torch
22import torch .nn as nn
3- from torch import Tensor
3+ from torch import Tensor , is_grad_enabled
44from functorch import make_fx
55from torch .fx import immutable_collections
66import torch .utils ._pytree as pytree
77import torch .utils .dlpack
88from torch .nn .utils import _stateless
99from functorch ._C import CompileCache
1010from .decompositions import register_decomposition
11- from .partitioners import default_partition
11+ from .partitioners import default_partition , _get_saved_values , _extract_fwd_bwd_modules
1212from .named_members_polyfill import _named_parameters , _named_buffers
1313from typing import Callable , List , Dict , Any , Tuple , Optional
1414from functools import wraps
@@ -54,7 +54,7 @@ def _dict_unflatten(values: List[Any], context: Context) -> Dict[Any, Any]:
5454
5555def create_joint_forward_backward (fn ):
5656 def joint_forward_backward (
57- primals : List [Any ], tangents : List [Any ]
57+ primals : List [Any ], cotangents : List [Any ]
5858 ) -> Tuple [List [Any ], List [Any ]]:
5959 # Call the forward pass
6060 outs = fn (* primals )
@@ -68,21 +68,21 @@ def joint_forward_backward(
6868 grad_primals .append (p )
6969
7070 # Get the outputs that need gradients
71- assert len (tangents ) == len (outs )
71+ assert len (cotangents ) == len (outs )
7272 needed_outs = []
73- needed_tangents = []
74- for out , tangent in zip (outs , tangents ):
73+ needed_cotangents = []
74+ for out , cotangent in zip (outs , cotangents ):
7575 if isinstance (out , Tensor ) and out .requires_grad :
7676 needed_outs .append (out )
77- needed_tangents .append (tangent )
77+ needed_cotangents .append (cotangent )
7878 backward_out = []
7979 # Call the backwards pass
8080 if grad_primals :
8181 backward_out = torch .autograd .grad (
8282 needed_outs ,
8383 grad_primals ,
84- grad_outputs = needed_tangents ,
85- allow_unused = True ,
84+ grad_outputs = needed_cotangents ,
85+ allow_unused = True
8686 )
8787 backward_out_iter = iter (backward_out )
8888 return outs , [
@@ -138,14 +138,18 @@ def create_aot_autograd_function(
138138 joint_forward_backward = create_joint_forward_backward (flat_fn )
139139
140140 compiled_fw = None
141- compiled_bw = None
141+ fw_module = None
142+ bw_modules = []
142143 num_outs = None
144+ saved_value_names = None
145+ aot_decompositions = {** aot_autograd_decompositions , ** decompositions }
143146
144147 class CompiledFunction (torch .autograd .Function ):
145148 @staticmethod
146149 @disable_torchdynamo
147150 def forward (ctx , * flat_tensor_args ):
148- nonlocal compiled_fw , compiled_bw , num_outs
151+ ctx .set_materialize_grads (False )
152+ nonlocal compiled_fw , num_outs , fw_module , saved_value_names
149153 if compiled_fw is None :
150154 with torch .set_grad_enabled (grad_state ):
151155 out = flat_fn (* flat_tensor_args )
@@ -159,34 +163,81 @@ def forward(ctx, *flat_tensor_args):
159163 num_outs = 1
160164
161165 joint_inputs = (flat_tensor_args , out )
162- aot_decompositions = { ** aot_autograd_decompositions , ** decompositions }
166+ # Need it because autograd.Function disables grad in forward
163167 with torch .set_grad_enabled (grad_state ):
164168 fx_g = make_fx (joint_forward_backward , aot_decompositions )(
165169 * joint_inputs
166170 )
167- fw_module , bw_module = partition_fn (fx_g , joint_inputs )
168- # print(fw_module.code, bw_module.code)
169-
171+ # This means the forward and backward graphs are created based on the input fn
172+ # However we need to take in grad_out for the saved intermediates as well.
173+ fw_module , bw_module , saved_value_nodes = partition_fn (fx_g , joint_inputs )
174+ saved_value_names = [node .name for node in saved_value_nodes ]
170175 compiled_fw = fw_compiler (fw_module , flat_tensor_args )
171176 fw_outs = normalize_as_list (compiled_fw (* flat_tensor_args ))
172-
173- bw_args = fw_outs [num_outs :] + fw_outs [0 :num_outs ]
174- compiled_bw = bw_compiler (bw_module , bw_args )
175177 else :
176178 fw_outs = normalize_as_list (compiled_fw (* flat_tensor_args ))
177- ctx .save_for_backward (* fw_outs [num_outs :])
178- return tuple (fw_outs [0 :num_outs ])
179+
180+ ctx .num_intermediate = len (fw_outs [num_outs :])
181+ ctx .num_inputs = len (flat_tensor_args )
182+ to_be_saved = fw_outs [num_outs :] + list (flat_tensor_args ) + fw_outs [0 :num_outs ]
183+ ctx .save_for_backward (* to_be_saved )
184+ return tuple (fw_outs )
179185
180186 @staticmethod
181187 @disable_torchdynamo
182- def backward (ctx , * flat_args ):
183- contiguous_args = [t .contiguous () for t in flat_args ]
184- # contiguous_args = [t for t in flat_args]
185- out = normalize_as_list (compiled_bw (* ctx .saved_tensors , * contiguous_args ))
186- return tuple (out )
188+ def backward (ctx , * flat_grad_outs ):
189+ nonlocal fw_module , bw_modules , saved_value_names
190+ intermediates = ctx .saved_tensors [:ctx .num_intermediate ]
191+ inputs = ctx .saved_tensors [ctx .num_intermediate :ctx .num_intermediate + ctx .num_inputs ]
192+ is_grad_enabled = torch .is_grad_enabled ()
193+
194+ if not is_grad_enabled :
195+ input_flat_grad_outs = []
196+ for grad in flat_grad_outs :
197+ if grad is not None :
198+ input_flat_grad_outs .append (grad )
199+ with torch .set_grad_enabled (grad_state ):
200+ fx_g_b = make_fx (joint_forward_backward , aot_decompositions )(inputs , input_flat_grad_outs )
201+ saved_value_nodes = _get_saved_values (fx_g_b , saved_value_names )
202+ assert len (saved_value_nodes ) <= len (saved_value_names )
203+ fw_module_b , bw_module_b , saved_values_new = _extract_fwd_bwd_modules (fx_g_b , saved_value_nodes )
204+ if len (saved_values_new ) != len (saved_value_names ):
205+ new_intermediates = []
206+ # Forward saves more intermediates than needed
207+ assert len (saved_values_new ) < len (saved_value_names )
208+ j = 0
209+ for node in saved_values_new :
210+ while node .name != saved_value_names [j ]:
211+ j += 1
212+ new_intermediates .append (intermediates [j ])
213+ j += 1
214+ intermediates = new_intermediates
215+ else :
216+ input_flat_grad_outs = flat_grad_outs
217+ j_b = create_joint_forward_backward (fw_module )
218+ with torch .set_grad_enabled (grad_state ):
219+ fx_g_b = make_fx (j_b , aot_decompositions )(inputs , input_flat_grad_outs )
220+ fw_module_b , bw_module_b , _ = partition_fn (fx_g_b , (inputs , input_flat_grad_outs ))
187221
188- return CompiledFunction
189222
223+ bw_module_fn = None
224+ for elem in bw_modules :
225+ if elem .code == bw_module_b .code :
226+ bw_module_fn = elem
227+ break
228+ if bw_module_fn is None :
229+ bw_modules .append (bw_module_b )
230+ bw_module_fn = bw_module_b
231+
232+ f = aot_function (bw_module_fn , bw_compiler , bw_compiler , partition_fn , aot_decompositions )
233+
234+ out = f (* intermediates , * input_flat_grad_outs )
235+ return tuple (normalize_as_list (out ))
236+
237+ def return_fn (* args , ** kwargs ):
238+ out = CompiledFunction .apply (* args , ** kwargs )
239+ return out [0 :num_outs ]
240+ return return_fn
190241
191242class _CompileCache (CompileCache ):
192243 pass
@@ -275,7 +326,7 @@ def rearrange(tensor_args, static_args, static_argnums):
275326 return args
276327
277328
278- KNOWN_TYPES = [torch .Tensor , int , str , float , bool ]
329+ KNOWN_TYPES = [torch .Tensor , int , str , float , bool , None ]
279330
280331
281332def aot_function (
@@ -411,7 +462,9 @@ def returned_function(*args, **kwargs):
411462 hasher_type ,
412463 * flat_args_for_cache ,
413464 )
414-
465+ # print("fn_id: ", fn_id)
466+ # print("size: ", compile_cache.size())
467+ # print("num_tensor_args: ", num_tensor_args)
415468 # Compile the function and save it in the cache
416469 if cached_res is None :
417470 # Save the args_spec for flat_tensor_args to unflatten while tracing
@@ -436,7 +489,7 @@ def flat_fn(*flat_tensor_args):
436489 for i in flat_out :
437490 is_known_type = False
438491 for j in KNOWN_TYPES :
439- if isinstance (i , j ):
492+ if j is None or isinstance (i , j ):
440493 is_known_type = True
441494 break
442495 if not is_known_type :
@@ -458,7 +511,7 @@ def flat_fn(*flat_tensor_args):
458511 partition_fn ,
459512 decompositions ,
460513 grad_state = torch .is_grad_enabled (),
461- ). apply
514+ )
462515 cached_res = (compiled_fn , out_spec )
463516
464517 # Save the compiled_fn in the cache
@@ -598,7 +651,7 @@ def aot_function_simplified(
598651 partition_fn ,
599652 decompositions ,
600653 grad_state = torch .is_grad_enabled (),
601- ). apply
654+ )
602655
603656 return compiled_fn
604657
@@ -620,4 +673,4 @@ def forward(self, *args, **kwargs):
620673
621674
622675compiled_function = aot_function
623- compiled_module = aot_module
676+ compiled_module = aot_module
0 commit comments