Skip to content

Commit 45a7950

Browse files
authored
prepare release v16.5.0 (#163)
* prepare release v16.5.0 * fix typo, change cuda to CUDA in comments
1 parent 080ab8a commit 45a7950

File tree

2 files changed

+69
-29
lines changed

2 files changed

+69
-29
lines changed

weaviate/Chart.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,10 +7,10 @@ type: application
77
# This is the chart version. This version number should be incremented each time you make changes
88
# to the chart and its templates, including the app version.
99
# Versions are expected to follow Semantic Versioning (https://semver.org/)
10-
version: 16.4.0
10+
version: 16.5.0
1111

1212
# This is the version number of the application being deployed. This version number should be
1313
# incremented each time you make changes to the application. Versions are not expected to
1414
# follow Semantic Versioning. They should reflect the version the application is using.
15-
appVersion: 1.20.0
15+
appVersion: 1.21.0
1616
icon: https://raw.githubusercontent.com/weaviate/weaviate/19de0956c69b66c5552447e84d016f4fe29d12c9/docs/assets/weaviate-logo.png

weaviate/values.yaml

Lines changed: 67 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ image:
77
# of weaviate. In accordance with Infra-as-code, you should pin this value
88
# down and only change it if you explicitly want to upgrade the Weaviate
99
# version.
10-
tag: 1.20.0
10+
tag: 1.21.0
1111
repo: semitechnologies/weaviate
1212
# Image pull policy: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy
1313
pullPolicy: IfNotPresent
@@ -367,7 +367,8 @@ modules:
367367
# but in some situations it cannot reach the same level of accuracy as
368368
# transformers-based models.
369369
text2vec-contextionary:
370-
# disable if you want to use transformers or import or own vectors
370+
371+
# Enable deployment of this module
371372
enabled: false
372373

373374
# The configuration below is ignored if enabled==false
@@ -416,12 +417,13 @@ modules:
416417
# CUDA-enabled GPUs for optimal performance.
417418
text2vec-transformers:
418419

419-
# enable if you want to use transformers instead of the
420-
# text2vec-contextionary module
420+
# Enable deployment of this module
421421
enabled: false
422+
422423
# You can set directly an inference URL of this module without deploying it with this release.
423424
# You can do so by setting a value for the `inferenceUrl` here AND by setting the `enable` to `false`
424425
inferenceUrl: {}
426+
425427
# The configuration below is ignored if enabled==false
426428

427429
# replace with model of choice, see
@@ -448,11 +450,11 @@ modules:
448450
# accordingly and you need to explicitly set GPU requests & limits below
449451
enable_cuda: false
450452

451-
# only used when cuda is enabled
453+
# only used when CUDA is enabled
452454
nvidia_visible_devices: all
453455
nvidia_driver_capabilities: compute,utility
454456

455-
# only used when cuda is enabled
457+
# only used when CUDA is enabled
456458
ld_library_path: /usr/local/nvidia/lib64
457459

458460
resources:
@@ -496,6 +498,7 @@ modules:
496498
# You can do so by setting a value for the `inferenceUrl` here AND by setting the `enable` to `false`
497499
inferenceUrl: {}
498500

501+
# The configuration below is ignored if enabled==false
499502
tag: facebook-dpr-ctx_encoder-single-nq-base
500503
repo: semitechnologies/transformers-inference
501504
registry: docker.io
@@ -514,11 +517,11 @@ modules:
514517
# accordingly and you need to explicitly set GPU requests & limits below
515518
enable_cuda: false
516519

517-
# only used when cuda is enabled
520+
# only used when CUDA is enabled
518521
nvidia_visible_devices: all
519522
nvidia_driver_capabilities: compute,utility
520523

521-
# only used when cuda is enabled
524+
# only used when CUDA is enabled
522525
ld_library_path: /usr/local/nvidia/lib64
523526

524527
resources:
@@ -550,6 +553,7 @@ modules:
550553
# You can do so by setting a value for the `inferenceUrl` here AND by setting the `enable` to `false`
551554
inferenceUrl: {}
552555

556+
# The configuration below is ignored if enabled==false
553557
tag: facebook-dpr-question_encoder-single-nq-base
554558
repo: semitechnologies/transformers-inference
555559
registry: docker.io
@@ -568,11 +572,11 @@ modules:
568572
# accordingly and you need to explicitly set GPU requests & limits below
569573
enable_cuda: false
570574

571-
# only used when cuda is enabled
575+
# only used when CUDA is enabled
572576
nvidia_visible_devices: all
573577
nvidia_driver_capabilities: compute,utility
574578

575-
# only used when cuda is enabled
579+
# only used when CUDA is enabled
576580
ld_library_path: /usr/local/nvidia/lib64
577581

578582
resources:
@@ -606,10 +610,15 @@ modules:
606610
# If you want to run a different model that published ones you can follow the
607611
# tutorial from here on how to create such a container: https://github.com/weaviate/t2v-gpt4all-models
608612
text2vec-gpt4all:
613+
614+
# Enable deployment of this module
609615
enabled: false
616+
610617
# You can set directly an inference URL of this module without deploying it with this release.
611618
# You can do so by setting a value for the `inferenceUrl` here AND by setting the `enable` to `false`
612619
inferenceUrl: {}
620+
621+
# The configuration below is ignored if enabled==false
613622
tag: all-MiniLM-L6-v2
614623
repo: semitechnologies/gpt4all-inference
615624
registry: docker.io
@@ -721,9 +730,9 @@ modules:
721730
# run with CUDA-enabled GPUs for optimal performance.
722731
multi2vec-clip:
723732

724-
# enable if you want to use transformers instead of the
725-
# text2vec-contextionary module
733+
# Enable deployment of this module
726734
enabled: false
735+
727736
# You can set directly an inference URL of this module without deploying it with this release.
728737
# You can do so by setting a value for the `inferenceUrl` here AND by setting the `enable` to `false`
729738
inferenceUrl: {}
@@ -752,11 +761,11 @@ modules:
752761
# accordingly and you need to explicitly set GPU requests & limits below
753762
enable_cuda: false
754763

755-
# only used when cuda is enabled
764+
# only used when CUDA is enabled
756765
nvidia_visible_devices: all
757766
nvidia_driver_capabilities: compute,utility
758767

759-
# only used when cuda is enabled
768+
# only used when CUDA is enabled
760769
ld_library_path: /usr/local/nvidia/lib64
761770

762771
resources:
@@ -794,7 +803,9 @@ modules:
794803
# https://github.com/weaviate/multi2vec-bind-inference
795804
multi2vec-bind:
796805

806+
# Enable deployment of this module
797807
enabled: false
808+
798809
# You can set directly an inference URL of this module without deploying it with this release.
799810
# You can do so by setting a value for the `inferenceUrl` here AND by setting the `enable` to `false`
800811
inferenceUrl: {}
@@ -819,11 +830,11 @@ modules:
819830
# accordingly and you need to explicitly set GPU requests & limits below
820831
enable_cuda: false
821832

822-
# only used when cuda is enabled
833+
# only used when CUDA is enabled
823834
nvidia_visible_devices: all
824835
nvidia_driver_capabilities: compute,utility
825836

826-
# only used when cuda is enabled
837+
# only used when CUDA is enabled
827838
ld_library_path: /usr/local/nvidia/lib64
828839

829840
resources:
@@ -855,12 +866,17 @@ modules:
855866
tolerations:
856867

857868
# The qna-transformers module uses neural networks, such as BERT,
858-
# DistilBERT, to find an aswer in text to a given question
869+
# DistilBERT, to find an answer in text to a given question
859870
qna-transformers:
871+
872+
# Enable deployment of this module
860873
enabled: false
874+
861875
# You can set directly an inference URL of this module without deploying it with this release.
862876
# You can do so by setting a value for the `inferenceUrl` here AND by setting the `enable` to `false`
863877
inferenceUrl: {}
878+
879+
# The configuration below is ignored if enabled==false
864880
tag: bert-large-uncased-whole-word-masking-finetuned-squad-34d66b1
865881
repo: semitechnologies/qna-transformers
866882
registry: docker.io
@@ -880,11 +896,11 @@ modules:
880896
# accordingly and you need to explicitly set GPU requests & limits below
881897
enable_cuda: false
882898

883-
# only used when cuda is enabled
899+
# only used when CUDA is enabled
884900
nvidia_visible_devices: all
885901
nvidia_driver_capabilities: compute,utility
886902

887-
# only used when cuda is enabled
903+
# only used when CUDA is enabled
888904
ld_library_path: /usr/local/nvidia/lib64
889905

890906
resources:
@@ -977,10 +993,14 @@ modules:
977993
# The img2vec-neural module uses neural networks, to generate
978994
# a vector representation of the image
979995
img2vec-neural:
996+
997+
# Enable deployment of this module
980998
enabled: false
999+
9811000
# You can set directly an inference URL of this module without deploying it with this release.
9821001
# You can do so by setting a value for the `inferenceUrl` here AND by setting the `enable` to `false`
9831002
inferenceUrl: {}
1003+
9841004
tag: resnet50
9851005
repo: semitechnologies/img2vec-pytorch
9861006
registry: docker.io
@@ -1000,11 +1020,11 @@ modules:
10001020
# accordingly and you need to explicitly set GPU requests & limits below
10011021
enable_cuda: false
10021022

1003-
# only used when cuda is enabled
1023+
# only used when CUDA is enabled
10041024
nvidia_visible_devices: all
10051025
nvidia_driver_capabilities: compute,utility
10061026

1007-
# only used when cuda is enabled
1027+
# only used when CUDA is enabled
10081028
ld_library_path: /usr/local/nvidia/lib64
10091029

10101030
resources:
@@ -1058,10 +1078,15 @@ modules:
10581078
# More information about Cross-Encoders can be found here:
10591079
# https://www.sbert.net/examples/applications/cross-encoder/README.html
10601080
reranker-transformers:
1081+
1082+
# Enable deployment of this module
10611083
enabled: false
1084+
10621085
# You can set directly an inference URL of this module without deploying it with this release.
10631086
# You can do so by setting a value for the `inferenceUrl` here AND by setting the `enable` to `false`
10641087
inferenceUrl: {}
1088+
1089+
# The configuration below is ignored if enabled==false
10651090
tag: cross-encoder-ms-marco-MiniLM-L-6-v2
10661091
repo: semitechnologies/reranker-transformers
10671092
registry: docker.io
@@ -1081,11 +1106,11 @@ modules:
10811106
# accordingly and you need to explicitly set GPU requests & limits below
10821107
enable_cuda: false
10831108

1084-
# only used when cuda is enabled
1109+
# only used when CUDA is enabled
10851110
nvidia_visible_devices: all
10861111
nvidia_driver_capabilities: compute,utility
10871112

1088-
# only used when cuda is enabled
1113+
# only used when CUDA is enabled
10891114
ld_library_path: /usr/local/nvidia/lib64
10901115

10911116
resources:
@@ -1124,10 +1149,15 @@ modules:
11241149
# The text-spellcheck module uses spellchecker library to check
11251150
# misspellings in a given text
11261151
text-spellcheck:
1152+
1153+
# Enable deployment of this module
11271154
enabled: false
1155+
11281156
# You can set directly an inference URL of this module without deploying it with this release.
11291157
# You can do so by setting a value for the `inferenceUrl` here AND by setting the `enable` to `false`
11301158
inferenceUrl: {}
1159+
1160+
# The configuration below is ignored if enabled==false
11311161
tag: pyspellchecker-en
11321162
repo: semitechnologies/text-spellcheck-model
11331163
registry: docker.io
@@ -1173,10 +1203,15 @@ modules:
11731203
# The ner-transformers module uses spellchecker library to check
11741204
# misspellings in a given text
11751205
ner-transformers:
1206+
1207+
# Enable deployment of this module
11761208
enabled: false
1209+
11771210
# You can set directly an inference URL of this module without deploying it with this release.
11781211
# You can do so by setting a value for the `inferenceUrl` here AND by setting the `enable` to `false`
11791212
inferenceUrl: {}
1213+
1214+
# The configuration below is ignored if enabled==false
11801215
tag: dbmdz-bert-large-cased-finetuned-conll03-english-0.0.2
11811216
repo: semitechnologies/ner-transformers
11821217
registry: docker.io
@@ -1196,11 +1231,11 @@ modules:
11961231
# accordingly and you need to explicitly set GPU requests & limits below
11971232
enable_cuda: false
11981233

1199-
# only used when cuda is enabled
1234+
# only used when CUDA is enabled
12001235
nvidia_visible_devices: all
12011236
nvidia_driver_capabilities: compute,utility
12021237

1203-
# only used when cuda is enabled
1238+
# only used when CUDA is enabled
12041239
ld_library_path: /usr/local/nvidia/lib64
12051240

12061241
resources:
@@ -1238,10 +1273,15 @@ modules:
12381273

12391274
# The sum-transformers module makes result texts summarizations
12401275
sum-transformers:
1276+
1277+
# Enable deployment of this module
12411278
enabled: false
1279+
12421280
# You can set directly an inference URL of this module without deploying it with this release.
12431281
# You can do so by setting a value for the `inferenceUrl` here AND by setting the `enable` to `false`
12441282
inferenceUrl: {}
1283+
1284+
# The configuration below is ignored if enabled==false
12451285
tag: facebook-bart-large-cnn-1.0.0
12461286
repo: semitechnologies/sum-transformers
12471287
registry: docker.io
@@ -1261,11 +1301,11 @@ modules:
12611301
# accordingly and you need to explicitly set GPU requests & limits below
12621302
enable_cuda: false
12631303

1264-
# only used when cuda is enabled
1304+
# only used when CUDA is enabled
12651305
nvidia_visible_devices: all
12661306
nvidia_driver_capabilities: compute,utility
12671307

1268-
# only used when cuda is enabled
1308+
# only used when CUDA is enabled
12691309
ld_library_path: /usr/local/nvidia/lib64
12701310

12711311
resources:

0 commit comments

Comments
 (0)