diff --git a/nodes.py b/nodes.py index 04bff4a..be1c23b 100644 --- a/nodes.py +++ b/nodes.py @@ -9,7 +9,7 @@ from unittest.mock import patch from comfy.ldm.flux.layers import timestep_embedding, apply_mod -from comfy.ldm.lightricks.model import precompute_freqs_cis +from comfy.ldm.lightricks.embeddings_connector import Embeddings1DConnector from comfy.ldm.lightricks.symmetric_patchifier import latent_to_pixel_coords from comfy.ldm.wan.model import sinusoidal_embedding_1d @@ -628,7 +628,7 @@ def teacache_ltxvmodel_forward( if attention_mask is not None and not torch.is_floating_point(attention_mask): attention_mask = (attention_mask - 1).to(x.dtype).reshape((attention_mask.shape[0], 1, -1, attention_mask.shape[-1])) * torch.finfo(x.dtype).max - pe = precompute_freqs_cis(fractional_coords, dim=self.inner_dim, out_dtype=x.dtype) + pe = Embeddings1DConnector.precompute_freqs_cis(fractional_coords, dim=self.inner_dim, out_dtype=x.dtype) batch_size = x.shape[0] timestep, embedded_timestep = self.adaln_single(