Skip to content

Commit

Permalink
better comments
Browse files Browse the repository at this point in the history
  • Loading branch information
bogunowicz@arrival.com committed Jul 2, 2024
1 parent 0c36847 commit 03a3939
Showing 1 changed file with 13 additions and 6 deletions.
19 changes: 13 additions & 6 deletions src/sparseml/transformers/sparsification/sparse_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,22 +101,29 @@ def skip(*args, **kwargs):
)

# instantiate compressor from model config
compressor = ModelCompressor.from_pretrained(pretrained_model_name_or_path, **kwargs)
compressor = ModelCompressor.from_pretrained(
pretrained_model_name_or_path, **kwargs
)

# temporarily set the log level to error, to ignore printing out long missing
# and unexpected key error messages (these are EXPECTED for quantized models)
logger = logging.getLogger("transformers.modeling_utils")
restore_log_level = logger.getEffectiveLevel()
logger.setLevel(level=logging.ERROR)


if kwargs.get('trust_remote_code'):

if kwargs.get("trust_remote_code"):
# By artifically aliasing
# class name SparseAutoModelForCausallLM to
# AutoModelForCausalLM we can "trick" the
# `from_pretrained` method into properly
# resolving the logic when
# (has_remote_code and trust_remote_code) == True
cls.__name__ = AutoModelForCausalLM.__name__

model = super(AutoModelForCausalLM, cls).from_pretrained(
pretrained_model_name_or_path, *model_args, **kwargs
)

if model.dtype != model.config.torch_dtype:
_LOGGER.warning(
f"The dtype of the loaded model: {model.dtype} is different "
Expand Down

0 comments on commit 03a3939

Please sign in to comment.