diff --git a/examples/openvino/audio-classification/requirements.txt b/examples/openvino/audio-classification/requirements.txt index 60c66d8091..df77b9298b 100644 --- a/examples/openvino/audio-classification/requirements.txt +++ b/examples/openvino/audio-classification/requirements.txt @@ -1,4 +1,5 @@ datasets>=1.14.0 evaluate librosa -torchaudio \ No newline at end of file +torchaudio +accelerate diff --git a/examples/openvino/audio-classification/run_audio_classification.py b/examples/openvino/audio-classification/run_audio_classification.py index b8df86a575..30b95c1739 100644 --- a/examples/openvino/audio-classification/run_audio_classification.py +++ b/examples/openvino/audio-classification/run_audio_classification.py @@ -35,7 +35,7 @@ from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version -from optimum.intel.openvino import OVConfig, OVTrainer, OVTrainingArguments +from optimum.intel import OVConfig, OVTrainer, OVTrainingArguments logger = logging.getLogger(__name__) diff --git a/examples/openvino/image-classification/requirements.txt b/examples/openvino/image-classification/requirements.txt index c52a5f399b..a55c46ca4c 100644 --- a/examples/openvino/image-classification/requirements.txt +++ b/examples/openvino/image-classification/requirements.txt @@ -2,3 +2,4 @@ datasets >= 1.8.0 torch >= 1.9.0 torchvision>=0.6.0 evaluate +accelerate diff --git a/examples/openvino/image-classification/run_image_classification.py b/examples/openvino/image-classification/run_image_classification.py index 5f98d95cb5..04c2984d8b 100644 --- a/examples/openvino/image-classification/run_image_classification.py +++ b/examples/openvino/image-classification/run_image_classification.py @@ -52,7 +52,7 @@ from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version -from optimum.intel.openvino import OVConfig, OVTrainer, OVTrainingArguments +from optimum.intel import OVConfig, OVTrainer, OVTrainingArguments logger = logging.getLogger(__name__) diff --git a/examples/openvino/question-answering/requirements.txt b/examples/openvino/question-answering/requirements.txt index 3bd58b158b..0bb12723c2 100644 --- a/examples/openvino/question-answering/requirements.txt +++ b/examples/openvino/question-answering/requirements.txt @@ -1,3 +1,4 @@ datasets >= 1.8.0 torch >= 1.9.0 evaluate +accelerate diff --git a/examples/openvino/question-answering/run_qa.py b/examples/openvino/question-answering/run_qa.py index a86c7fb6d7..261fa839c9 100644 --- a/examples/openvino/question-answering/run_qa.py +++ b/examples/openvino/question-answering/run_qa.py @@ -49,7 +49,7 @@ from transformers.utils.versions import require_version from utils_qa import postprocess_qa_predictions -from optimum.intel.openvino import OVConfig, OVTrainingArguments +from optimum.intel import OVConfig, OVTrainingArguments # Will error if the minimal version of Transformers is not installed. Remove at your own risks. diff --git a/examples/openvino/question-answering/trainer_qa.py b/examples/openvino/question-answering/trainer_qa.py index bda91f99b5..c10466060b 100644 --- a/examples/openvino/question-answering/trainer_qa.py +++ b/examples/openvino/question-answering/trainer_qa.py @@ -20,7 +20,7 @@ import torch.nn.functional as F from transformers.trainer_utils import PredictionOutput -from optimum.intel.openvino.trainer import OVTrainer +from optimum.intel import OVTrainer class QuestionAnsweringOVTrainer(OVTrainer): diff --git a/examples/openvino/text-classification/requirements.txt b/examples/openvino/text-classification/requirements.txt index 95655f80ec..660e820c3c 100644 --- a/examples/openvino/text-classification/requirements.txt +++ b/examples/openvino/text-classification/requirements.txt @@ -4,4 +4,5 @@ scipy scikit-learn protobuf torch >= 1.3 -evaluate \ No newline at end of file +evaluate +accelerate diff --git a/examples/openvino/text-classification/run_glue.py b/examples/openvino/text-classification/run_glue.py index 002f67232c..66670de77e 100644 --- a/examples/openvino/text-classification/run_glue.py +++ b/examples/openvino/text-classification/run_glue.py @@ -46,7 +46,7 @@ from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version -from optimum.intel.openvino import OVConfig, OVTrainer, OVTrainingArguments +from optimum.intel import OVConfig, OVTrainer, OVTrainingArguments # Will error if the minimal version of Transformers is not installed. Remove at your own risks. diff --git a/notebooks/openvino/optimum_openvino_inference.ipynb b/notebooks/openvino/optimum_openvino_inference.ipynb index b94238d358..dcd7dc866f 100644 --- a/notebooks/openvino/optimum_openvino_inference.ipynb +++ b/notebooks/openvino/optimum_openvino_inference.ipynb @@ -76,7 +76,7 @@ } ], "source": [ - "from optimum.intel.openvino import OVModelForQuestionAnswering\n", + "from optimum.intel import OVModelForQuestionAnswering\n", "\n", "# Load PyTorch model from the Hub and export to OpenVINO in the background\n", "model = OVModelForQuestionAnswering.from_pretrained(\"distilbert-base-uncased-distilled-squad\", export=True)\n", @@ -182,7 +182,7 @@ } ], "source": [ - "from optimum.intel.openvino import OVModelForQuestionAnswering\n", + "from optimum.intel import OVModelForQuestionAnswering\n", "from transformers import AutoTokenizer, pipeline\n", "\n", "model = OVModelForQuestionAnswering.from_pretrained(\"distilbert-base-uncased-distilled-squad-ov-fp32\")\n", @@ -240,7 +240,7 @@ ], "source": [ "import torch\n", - "from optimum.intel.openvino import OVModelForQuestionAnswering\n", + "from optimum.intel import OVModelForQuestionAnswering\n", "from transformers import AutoTokenizer, pipeline\n", "\n", "model = OVModelForQuestionAnswering.from_pretrained(\"distilbert-base-uncased-distilled-squad-ov-fp32\")\n", @@ -324,7 +324,7 @@ } ], "source": [ - "from optimum.intel.openvino import OVModelForQuestionAnswering\n", + "from optimum.intel import OVModelForQuestionAnswering\n", "from transformers import AutoTokenizer, pipeline\n", "\n", "model = OVModelForQuestionAnswering.from_pretrained(\n", @@ -529,7 +529,7 @@ ], "source": [ "from IPython.display import Audio\n", - "from optimum.intel.openvino import OVModelForAudioClassification\n", + "from optimum.intel import OVModelForAudioClassification\n", "from transformers import AutoFeatureExtractor, pipeline\n", "from datasets import load_dataset\n", "\n", @@ -638,7 +638,7 @@ } ], "source": [ - "from optimum.intel.openvino import OVModelForCausalLM\n", + "from optimum.intel import OVModelForCausalLM\n", "from transformers import AutoTokenizer, pipeline\n", "\n", "model_id = \"helenai/gpt2-ov\"\n", @@ -704,7 +704,7 @@ ], "source": [ "from IPython.display import Image\n", - "from optimum.intel.openvino import OVModelForImageClassification\n", + "from optimum.intel import OVModelForImageClassification\n", "from transformers import AutoImageProcessor, pipeline\n", "\n", "model_id = \"helenai/microsoft-swin-tiny-patch4-window7-224-ov\"\n", @@ -766,7 +766,7 @@ } ], "source": [ - "from optimum.intel.openvino import OVModelForMaskedLM\n", + "from optimum.intel import OVModelForMaskedLM\n", "from transformers import AutoTokenizer, pipeline\n", "\n", "model_id = \"helenai/bert-base-uncased-ov\"\n", @@ -835,7 +835,7 @@ } ], "source": [ - "from optimum.intel.openvino import OVModelForQuestionAnswering\n", + "from optimum.intel import OVModelForQuestionAnswering\n", "from transformers import AutoTokenizer, pipeline\n", "\n", "# Load the model and tokenizer saved in Part 1 of this notebook. Or use the line below to load them from the hub\n", @@ -890,7 +890,7 @@ } ], "source": [ - "from optimum.intel.openvino import OVModelForSeq2SeqLM\n", + "from optimum.intel import OVModelForSeq2SeqLM\n", "from transformers import AutoTokenizer, pipeline\n", "\n", "model_id = \"helenai/t5-small-ov\"\n", @@ -998,7 +998,7 @@ } ], "source": [ - "from optimum.intel.openvino import OVModelForSequenceClassification\n", + "from optimum.intel import OVModelForSequenceClassification\n", "from transformers import AutoTokenizer, pipeline\n", "\n", "model_id = \"helenai/papluca-xlm-roberta-base-language-detection-ov\"\n", @@ -1047,7 +1047,7 @@ } ], "source": [ - "from optimum.intel.openvino import OVModelForTokenClassification\n", + "from optimum.intel import OVModelForTokenClassification\n", "from transformers import AutoTokenizer, pipeline\n", "\n", "model_id = \"helenai/dslim-bert-base-NER-ov-fp32\"\n", diff --git a/notebooks/openvino/question_answering_quantization.ipynb b/notebooks/openvino/question_answering_quantization.ipynb index 196e3ba6a7..2481c9b904 100644 --- a/notebooks/openvino/question_answering_quantization.ipynb +++ b/notebooks/openvino/question_answering_quantization.ipynb @@ -51,7 +51,7 @@ "import transformers\n", "from evaluate import evaluator\n", "from openvino.runtime import Core\n", - "from optimum.intel.openvino import OVModelForQuestionAnswering, OVQuantizer, OVQuantizationConfig, OVConfig\n", + "from optimum.intel import OVModelForQuestionAnswering, OVQuantizer, OVQuantizationConfig, OVConfig\n", "from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline\n", "\n", "transformers.logging.set_verbosity_error()\n", @@ -286,7 +286,7 @@ "**NOTE:** if you notice very low accuracy after post-training quantization, it is likely caused by an overflow issue which affects processors that do not contain VNNI (Vector Neural Network Instruction). NNCF has an `overflow_fix` option to address this. It will effectively use 7-bits for quantizing instead of 8-bits to prevent the overflow. To use this option, modify the code in the next cell to add an explicit quantization configuration, and set `overflow_fix` to `\"enable\"`:\n", "\n", "```\n", - "from optimum.intel.openvino import OVConfig, OVQuantizationConfig\n", + "from optimum.intel import OVConfig, OVQuantizationConfig\n", "\n", "ov_config = OVConfig(quantization_config=OVQuantizationConfig(overflow_fix=\"enable\")\n", "quantizer = OVQuantizer.from_pretrained(model)\n", diff --git a/notebooks/openvino/stable_diffusion_optimization.ipynb b/notebooks/openvino/stable_diffusion_optimization.ipynb index 6c79bc5df0..f2297b2151 100644 --- a/notebooks/openvino/stable_diffusion_optimization.ipynb +++ b/notebooks/openvino/stable_diffusion_optimization.ipynb @@ -14,7 +14,7 @@ "metadata": {}, "outputs": [], "source": [ - "from optimum.intel.openvino import OVStableDiffusionPipeline\n", + "from optimum.intel import OVStableDiffusionPipeline\n", "from diffusers.training_utils import set_seed\n", "from IPython.display import display" ] diff --git a/optimum/intel/openvino/modeling_seq2seq.py b/optimum/intel/openvino/modeling_seq2seq.py index 0959a8c2f9..6d72dc7b0e 100644 --- a/optimum/intel/openvino/modeling_seq2seq.py +++ b/optimum/intel/openvino/modeling_seq2seq.py @@ -224,7 +224,7 @@ ```python >>> from transformers import {processor_class} - >>> from optimum.intel.openvino import {model_class} + >>> from optimum.intel import {model_class} >>> from datasets import load_dataset >>> processor = {processor_class}.from_pretrained("{checkpoint}") @@ -241,7 +241,7 @@ ```python >>> from transformers import {processor_class}, pipeline - >>> from optimum.intel.openvino import {model_class} + >>> from optimum.intel import {model_class} >>> from datasets import load_dataset >>> processor = {processor_class}.from_pretrained("{checkpoint}") diff --git a/optimum/intel/openvino/quantization.py b/optimum/intel/openvino/quantization.py index 124b0366c1..ddd0cdec75 100644 --- a/optimum/intel/openvino/quantization.py +++ b/optimum/intel/openvino/quantization.py @@ -235,7 +235,7 @@ def quantize( Examples: ```python - >>> from optimum.intel.openvino import OVQuantizer, OVModelForCausalLM + >>> from optimum.intel import OVQuantizer, OVModelForCausalLM >>> from transformers import AutoModelForCausalLM >>> model = AutoModelForCausalLM.from_pretrained("databricks/dolly-v2-3b") >>> quantizer = OVQuantizer.from_pretrained(model, task="text-generation") @@ -245,7 +245,7 @@ def quantize( ``` ```python - >>> from optimum.intel.openvino import OVQuantizer, OVModelForSequenceClassification + >>> from optimum.intel import OVQuantizer, OVModelForSequenceClassification >>> from transformers import AutoModelForSequenceClassification >>> model = OVModelForSequenceClassification.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english", export=True) >>> # or