Skip to content

Commit 26c382a

Browse files
committed
move accelerate to utils
1 parent e8e794c commit 26c382a

File tree

5 files changed

+74
-105
lines changed

5 files changed

+74
-105
lines changed

src/sagemaker/serve/builder/model_builder.py

Lines changed: 12 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@
2020

2121
from pathlib import Path
2222

23-
from accelerate.commands.estimate import estimate_command_parser, gather_data
2423
from sagemaker import Session
2524
from sagemaker.model import Model
2625
from sagemaker.base_predictor import PredictorBase
@@ -43,7 +42,11 @@
4342
from sagemaker.serve.utils import task
4443
from sagemaker.serve.utils.exceptions import TaskNotFoundException
4544
from sagemaker.serve.utils.predictors import _get_local_mode_predictor
46-
from sagemaker.serve.utils.hardware_detector import _get_gpu_info, _get_gpu_info_fallback
45+
from sagemaker.serve.utils.hardware_detector import (
46+
_get_gpu_info,
47+
_get_gpu_info_fallback,
48+
_total_inference_model_size_mib,
49+
)
4750
from sagemaker.serve.detector.image_detector import (
4851
auto_detect_container,
4952
_detect_framework_and_version,
@@ -70,13 +73,6 @@
7073
ModelServer.DJL_SERVING,
7174
}
7275

73-
MIB_CONVERSION_FACTOR = 0.00000095367431640625
74-
MEMORY_BUFFER_MULTIPLIER = 1.2 # 20% buffer
75-
VERSION_DETECTION_ERROR = (
76-
"Please install accelerate and transformers for HuggingFace (HF) model "
77-
"size calculations e.g. pip install 'sagemaker[huggingface]'"
78-
)
79-
8076

8177
# pylint: disable=attribute-defined-outside-init, disable=E1101
8278
@dataclass
@@ -723,43 +719,22 @@ def _schema_builder_init(self, model_task: str):
723719
except ValueError:
724720
raise TaskNotFoundException(f"Schema builder for {model_task} could not be found.")
725721

726-
def _total_inference_model_size_mib(self):
727-
"""Calculates the model size from HF accelerate
728-
729-
This function gets the model size from accelerate. It also adds a
730-
padding and converts to size MiB. When performing inference, expect
731-
to add up to an additional 20% to the given model size as found by EleutherAI.
732-
"""
733-
try:
734-
dtypes = self.env_vars.get("dtypes", "float32")
735-
parser = estimate_command_parser()
736-
args = parser.parse_args([self.model, "--dtypes", dtypes])
737-
738-
output = gather_data(
739-
args
740-
) # "dtype", "Largest Layer", "Total Size Bytes", "Training using Adam"
741-
except ImportError as e:
742-
logger.warning(VERSION_DETECTION_ERROR)
743-
raise e
744-
745-
if output is None:
746-
raise ValueError(f"Could not get Model size for {self.model}")
747-
748-
total_memory_size_mib = MEMORY_BUFFER_MULTIPLIER * output[0][2] * MIB_CONVERSION_FACTOR
749-
logger.info("Total memory size MIB: %s", total_memory_size_mib)
750-
return total_memory_size_mib
751-
752722
def _can_fit_on_single_gpu(self) -> Type[bool]:
753723
"""Check if model can fit on a single GPU
754724
755725
If the size of the model is <= single gpu memory size, returns True else False
756726
"""
757727
try:
758728
single_gpu_size_mib = self._try_fetch_gpu_info()
759-
if self._total_inference_model_size_mib() <= single_gpu_size_mib:
729+
if (
730+
_total_inference_model_size_mib(self.model, self.env_vars.get("dtypes", "float32"))
731+
<= single_gpu_size_mib
732+
):
760733
logger.info(
761734
"Total inference model size MIB %s, single GPU size for instance MIB %s",
762-
self._total_inference_model_size_mib(),
735+
_total_inference_model_size_mib(
736+
self.model, self.env_vars.get("dtypes", "float32")
737+
),
763738
single_gpu_size_mib,
764739
)
765740
return True

src/sagemaker/serve/builder/schema_builder.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -216,12 +216,11 @@ def __repr__(self):
216216
f"input_deserializer={self.input_deserializer._deserializer}\n"
217217
f"output_deserializer={self.output_deserializer._deserializer})"
218218
)
219-
elif hasattr(self, "custom_input_translator") and hasattr(self, "custom_output_translator"):
220-
return (
221-
f"SchemaBuilder(\n"
222-
f"custom_input_translator={self.custom_input_translator}\n"
223-
f"custom_output_translator={self.custom_output_translator}\n"
224-
)
219+
return (
220+
f"SchemaBuilder(\n"
221+
f"custom_input_translator={self.custom_input_translator}\n"
222+
f"custom_output_translator={self.custom_output_translator}\n"
223+
)
225224

226225
def generate_marshalling_map(self) -> dict:
227226
"""Generate marshalling map for the schema builder"""

src/sagemaker/serve/utils/hardware_detector.py

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,12 +18,18 @@
1818

1919
from botocore.exceptions import ClientError
2020

21+
from accelerate.commands.estimate import estimate_command_parser, gather_data
2122
from sagemaker import Session
23+
from sagemaker.model import Model
2224
from sagemaker import instance_types_gpu_info
2325

2426
logger = logging.getLogger(__name__)
2527

2628

29+
MIB_CONVERSION_FACTOR = 0.00000095367431640625
30+
MEMORY_BUFFER_MULTIPLIER = 1.2 # 20% buffer
31+
32+
2733
def _get_gpu_info(instance_type: str, session: Session) -> Tuple[int, int]:
2834
"""Get GPU info for the provided instance
2935
@@ -108,3 +114,26 @@ def _format_instance_type(instance_type: str) -> str:
108114

109115
ec2_instance = ".".join(split_instance)
110116
return ec2_instance
117+
118+
119+
def _total_inference_model_size_mib(model: Model, dtype: str) -> int:
120+
"""Calculates the model size from HF accelerate
121+
122+
This function gets the model size from accelerate. It also adds a
123+
padding and converts to size MiB. When performing inference, expect
124+
to add up to an additional 20% to the given model size as found by EleutherAI.
125+
"""
126+
dtypes = dtype
127+
parser = estimate_command_parser()
128+
args = parser.parse_args([model, "--dtypes", dtypes])
129+
130+
output = gather_data(
131+
args
132+
) # "dtype", "Largest Layer", "Total Size Bytes", "Training using Adam"
133+
134+
if output is None:
135+
raise ValueError(f"Could not get Model size for {model}")
136+
137+
total_memory_size_mib = MEMORY_BUFFER_MULTIPLIER * output[0][2] * MIB_CONVERSION_FACTOR
138+
logger.info("Total memory size MIB: %s", total_memory_size_mib)
139+
return total_memory_size_mib

tests/unit/sagemaker/serve/builder/test_model_builder.py

Lines changed: 4 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -53,9 +53,6 @@
5353
ModelServer.DJL_SERVING,
5454
}
5555

56-
MIB_CONVERSION_FACTOR = 0.00000095367431640625
57-
MEMORY_BUFFER_MULTIPLIER = 1.2 # 20% buffer
58-
5956
mock_session = MagicMock()
6057

6158

@@ -1205,7 +1202,7 @@ def test_build_for_transformers_happy_case(
12051202

12061203
@patch("sagemaker.serve.builder.model_builder.ModelBuilder._build_for_transformers")
12071204
@patch("sagemaker.serve.builder.model_builder.ModelBuilder._try_fetch_gpu_info")
1208-
@patch("sagemaker.serve.builder.model_builder.ModelBuilder._total_inference_model_size_mib")
1205+
@patch("sagemaker.serve.builder.model_builder._total_inference_model_size_mib")
12091206
@patch("sagemaker.image_uris.retrieve")
12101207
@patch("sagemaker.djl_inference.model.urllib")
12111208
@patch("sagemaker.djl_inference.model.json")
@@ -1248,7 +1245,7 @@ def test_build_for_transformers_happy_case_with_values(
12481245

12491246
@patch("sagemaker.serve.builder.model_builder.ModelBuilder._build_for_djl", Mock())
12501247
@patch("sagemaker.serve.builder.model_builder._get_gpu_info")
1251-
@patch("sagemaker.serve.builder.model_builder.ModelBuilder._total_inference_model_size_mib")
1248+
@patch("sagemaker.serve.builder.model_builder._total_inference_model_size_mib")
12521249
@patch("sagemaker.image_uris.retrieve")
12531250
@patch("sagemaker.djl_inference.model.urllib")
12541251
@patch("sagemaker.djl_inference.model.json")
@@ -1293,7 +1290,7 @@ def test_build_for_transformers_happy_case_with_valid_gpu_info(
12931290
@patch("sagemaker.serve.builder.model_builder.ModelBuilder._build_for_transformers", Mock())
12941291
@patch("sagemaker.serve.builder.model_builder._get_gpu_info")
12951292
@patch("sagemaker.serve.builder.model_builder._get_gpu_info_fallback")
1296-
@patch("sagemaker.serve.builder.model_builder.ModelBuilder._total_inference_model_size_mib")
1293+
@patch("sagemaker.serve.builder.model_builder._total_inference_model_size_mib")
12971294
@patch("sagemaker.image_uris.retrieve")
12981295
@patch("sagemaker.djl_inference.model.urllib")
12991296
@patch("sagemaker.djl_inference.model.json")
@@ -1342,61 +1339,6 @@ def test_build_for_transformers_happy_case_with_valid_gpu_fallback(
13421339
)
13431340
self.assertEqual(model_builder._can_fit_on_single_gpu(), True)
13441341

1345-
@patch("sagemaker.serve.builder.model_builder.ModelBuilder._build_for_transformers", Mock())
1346-
@patch("sagemaker.serve.builder.model_builder.estimate_command_parser")
1347-
@patch("sagemaker.serve.builder.model_builder.gather_data")
1348-
@patch("sagemaker.image_uris.retrieve")
1349-
@patch("sagemaker.djl_inference.model.urllib")
1350-
@patch("sagemaker.djl_inference.model.json")
1351-
@patch("sagemaker.huggingface.llm_utils.urllib")
1352-
@patch("sagemaker.huggingface.llm_utils.json")
1353-
@patch("sagemaker.model_uris.retrieve")
1354-
@patch("sagemaker.serve.builder.model_builder._ServeSettings")
1355-
def test_build_for_transformers_happy_case_hugging_face_responses(
1356-
self,
1357-
mock_serveSettings,
1358-
mock_model_uris_retrieve,
1359-
mock_llm_utils_json,
1360-
mock_llm_utils_urllib,
1361-
mock_model_json,
1362-
mock_model_urllib,
1363-
mock_image_uris_retrieve,
1364-
mock_gather_data,
1365-
mock_parser,
1366-
):
1367-
mock_setting_object = mock_serveSettings.return_value
1368-
mock_setting_object.role_arn = mock_role_arn
1369-
mock_setting_object.s3_model_data_url = mock_s3_model_data_url
1370-
1371-
mock_model_uris_retrieve.side_effect = KeyError
1372-
mock_llm_utils_json.load.return_value = {"pipeline_tag": "text-classification"}
1373-
mock_llm_utils_urllib.request.Request.side_effect = Mock()
1374-
1375-
mock_model_json.load.return_value = {"some": "config"}
1376-
mock_model_urllib.request.Request.side_effect = Mock()
1377-
mock_image_uris_retrieve.return_value = "https://some-image-uri"
1378-
1379-
mock_parser.return_value = Mock()
1380-
mock_gather_data.return_value = [[1, 1, 1, 1]]
1381-
product = MIB_CONVERSION_FACTOR * 1 * MEMORY_BUFFER_MULTIPLIER
1382-
1383-
model_builder = ModelBuilder(
1384-
model="stable-diffusion",
1385-
sagemaker_session=mock_session,
1386-
instance_type=mock_instance_type,
1387-
)
1388-
self.assertEqual(model_builder._total_inference_model_size_mib(), product)
1389-
1390-
mock_parser.return_value = Mock()
1391-
mock_gather_data.return_value = None
1392-
model_builder = ModelBuilder(
1393-
model="stable-diffusion",
1394-
sagemaker_session=mock_session,
1395-
instance_type=mock_instance_type,
1396-
)
1397-
with self.assertRaises(ValueError) as _:
1398-
model_builder._total_inference_model_size_mib()
1399-
14001342
@patch("sagemaker.serve.builder.model_builder.ModelBuilder._build_for_djl")
14011343
@patch("sagemaker.serve.builder.model_builder.ModelBuilder._can_fit_on_single_gpu")
14021344
@patch("sagemaker.image_uris.retrieve")
@@ -1556,7 +1498,7 @@ def test_try_fetch_gpu_info_throws(
15561498
self.assertEqual(model_builder._can_fit_on_single_gpu(), False)
15571499

15581500
@patch("sagemaker.serve.builder.model_builder.ModelBuilder._build_for_transformers", Mock())
1559-
@patch("sagemaker.serve.builder.model_builder.ModelBuilder._total_inference_model_size_mib")
1501+
@patch("sagemaker.serve.builder.model_builder._total_inference_model_size_mib")
15601502
@patch("sagemaker.image_uris.retrieve")
15611503
@patch("sagemaker.djl_inference.model.urllib")
15621504
@patch("sagemaker.djl_inference.model.json")

tests/unit/sagemaker/serve/utils/test_hardware_detector.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
from __future__ import absolute_import
1414

1515
from botocore.exceptions import ClientError
16+
from unittest.mock import patch, Mock
1617
import pytest
1718

1819
from sagemaker.serve.utils import hardware_detector
@@ -21,6 +22,8 @@
2122
VALID_INSTANCE_TYPE = "ml.g5.48xlarge"
2223
INVALID_INSTANCE_TYPE = "fl.c5.57xxlarge"
2324
EXPECTED_INSTANCE_GPU_INFO = (8, 196608)
25+
MIB_CONVERSION_FACTOR = 0.00000095367431640625
26+
MEMORY_BUFFER_MULTIPLIER = 1.2 # 20% buffer
2427

2528

2629
def test_get_gpu_info_success(sagemaker_session, boto_session):
@@ -96,3 +99,24 @@ def test_format_instance_type_without_ml_success():
9699
formatted_instance_type = hardware_detector._format_instance_type("g5.48xlarge")
97100

98101
assert formatted_instance_type == "g5.48xlarge"
102+
103+
104+
@patch("sagemaker.serve.utils.hardware_detector.estimate_command_parser")
105+
@patch("sagemaker.serve.utils.hardware_detector.gather_data")
106+
def test_total_inference_model_size_mib(
107+
mock_gather_data,
108+
mock_parser,
109+
):
110+
mock_parser.return_value = Mock()
111+
mock_gather_data.return_value = [[1, 1, 1, 1]]
112+
product = MIB_CONVERSION_FACTOR * 1 * MEMORY_BUFFER_MULTIPLIER
113+
114+
assert (
115+
hardware_detector._total_inference_model_size_mib("stable-diffusion", "float32") == product
116+
)
117+
118+
mock_parser.return_value = Mock()
119+
mock_gather_data.return_value = None
120+
121+
with pytest.raises(ValueError):
122+
hardware_detector._total_inference_model_size_mib("stable-diffusion", "float32")

0 commit comments

Comments
 (0)