Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master' into extend-wwb-test-m…
Browse files Browse the repository at this point in the history
…odels
  • Loading branch information
ilya-lavrenov committed Jan 9, 2025
2 parents 6f9ae1b + 7ef754c commit 4a4ea06
Show file tree
Hide file tree
Showing 7 changed files with 15 additions and 13 deletions.
6 changes: 3 additions & 3 deletions .github/workflows/causal_lm_cpp.yml
Original file line number Diff line number Diff line change
Expand Up @@ -53,17 +53,17 @@ jobs:
wget https://huggingface.co/smangrul/tinyllama_lora_sql/resolve/main/adapter_model.safetensors?download=true -O adapter_model.safetensors
- run: >
. ./ov/setupvars.sh
&& timeout 25s ./build/samples/cpp/multinomial_causal_lm/multinomial_causal_lm ./open_llama_3b_v2/ a
&& timeout 35s ./build/samples/cpp/multinomial_causal_lm/multinomial_causal_lm ./open_llama_3b_v2/ a
env:
PYTHONPATH: "./build"
- run: >
. ./ov/setupvars.sh
&& timeout 25s ./samples/python/multinomial_causal_lm/multinomial_causal_lm.py ./open_llama_3b_v2/ b
&& timeout 35s ./samples/python/multinomial_causal_lm/multinomial_causal_lm.py ./open_llama_3b_v2/ b
env:
PYTHONPATH: "./build"
- run: >
. ./ov/setupvars.sh
&& timeout 25s ./build/samples/cpp/text_generation/greedy_causal_lm ./open_llama_3b_v2/ "return 0"
&& timeout 35s ./build/samples/cpp/text_generation/greedy_causal_lm ./open_llama_3b_v2/ "return 0"
| diff <(timeout 25s samples/python/text_generation/greedy_causal_lm.py ./open_llama_3b_v2/ "return 0") -
env:
PYTHONPATH: "./build"
Expand Down
4 changes: 3 additions & 1 deletion .github/workflows/genai-tools.yml
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ jobs:
with:
platform: ubuntu22
commit_packages_to_provide: wheels
revision: 345163f87953fb0dd8dd590257eb7fc84378da8e
revision: latest_available_commit

llm_bench:
name: 'LLM bench tests'
Expand Down Expand Up @@ -128,6 +128,7 @@ jobs:
optimum-cli export openvino --trust-remote-code --model openai/whisper-tiny ./ov_models/whisper-tiny
python ./tools/llm_bench/benchmark.py -m ./ov_models/whisper-tiny --media multilingual_librispeech/data/mls_polish/train/audio/3283_1447_000/3283_1447_000000.flac -d cpu -n 1 --optimum
- name: Test openai/whisper-tiny via GenAI
run: |
python ./tools/llm_bench/benchmark.py -m ./ov_models/whisper-tiny --media multilingual_librispeech/data/mls_polish/train/audio/3283_1447_000/3283_1447_000000.flac -d cpu -n 1
rm -rf ./ov_models/whisper-tiny
rm -rf multilingual_librispeech
Expand All @@ -136,6 +137,7 @@ jobs:
optimum-cli export openvino --model katuni4ka/tiny-random-llava-next ./ov_models/tiny-random-llava-next --task image-text-to-text --trust-remote-code
python ./tools/llm_bench/benchmark.py -m ./ov_models/tiny-random-llava-next --media https://github.com/openvinotoolkit/openvino_notebooks/assets/29454499/d5fbbd1a-d484-415c-88cb-9986625b7b11 --prompt "What is unusual on this image?" -ic 20 --optimum
- name: Test katuni4ka/tiny-random-llava-next via GenAI
run: |
python ./tools/llm_bench/benchmark.py -m ./ov_models/tiny-random-llava-next --media https://github.com/openvinotoolkit/openvino_notebooks/assets/29454499/d5fbbd1a-d484-415c-88cb-9986625b7b11 --prompt "What is unusual on this image?" -ic 20
rm -rf ./ov_models
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/linux.yml
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ jobs:
with:
platform: ubuntu22
commit_packages_to_provide: wheels
revision: 345163f87953fb0dd8dd590257eb7fc84378da8e
revision: latest_available_commit

- name: Clone docker tag from OpenVINO repo
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/mac.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ concurrency:

env:
PYTHON_VERSION: '3.10'
OV_BRANCH: 345163f87953fb0dd8dd590257eb7fc84378da8e
OV_BRANCH: 'master'
OV_TARBALL: ''

jobs:
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/stable_diffusion_1_5_cpp.yml
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ jobs:
with:
platform: ubuntu22
commit_packages_to_provide: wheels
revision: 345163f87953fb0dd8dd590257eb7fc84378da8e
revision: latest_available_commit

openvino_download_windows:
name: Download OpenVINO for Windows
Expand All @@ -71,7 +71,7 @@ jobs:
with:
platform: windows
commit_packages_to_provide: wheels
revision: 345163f87953fb0dd8dd590257eb7fc84378da8e
revision: latest_available_commit

stable_diffusion_1_5_cpp-linux:
runs-on: ubuntu-22.04-8-cores
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/windows.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ concurrency:

env:
PYTHON_VERSION: '3.11'
OV_BRANCH: 345163f87953fb0dd8dd590257eb7fc84378da8e
OV_BRANCH: 'master'
OV_TARBALL: ''

jobs:
Expand Down
8 changes: 4 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -73,9 +73,9 @@ optimum-cli export openvino --model "TinyLlama/TinyLlama-1.1B-Chat-v1.0" --weigh
### Run generation using LLMPipeline API in Python

```python
import openvino_genai as ov_genai
import openvino_genai
#Will run model on CPU, GPU or NPU are possible options
pipe = ov_genai.LLMPipeline("./TinyLlama-1.1B-Chat-v1.0/", "CPU")
pipe = openvino_genai.LLMPipeline("./TinyLlama-1.1B-Chat-v1.0/", "CPU")
print(pipe.generate("The Sun is yellow because", max_new_tokens=100))
```

Expand Down Expand Up @@ -128,11 +128,11 @@ curl -O "https://storage.openvinotoolkit.org/test_data/images/dog.jpg"
```python
import numpy as np
import openvino as ov
import openvino_genai as ov_genai
import openvino_genai
from PIL import Image

# Choose GPU instead of CPU in the line below to run the model on Intel integrated or discrete GPU
pipe = ov_genai.VLMPipeline("./InternVL2-1B", "CPU")
pipe = openvino_genai.VLMPipeline("./InternVL2-1B", "CPU")
pipe.start_chat()

image = Image.open("dog.jpg")
Expand Down

0 comments on commit 4a4ea06

Please sign in to comment.