Skip to content

Commit

Permalink
Add configs from 143530
Browse files Browse the repository at this point in the history
  • Loading branch information
KodiaqQ committed Jul 15, 2024
1 parent 9ef6766 commit 132cd8f
Showing 1 changed file with 20 additions and 5 deletions.
25 changes: 20 additions & 5 deletions optimum/intel/openvino/configuration.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,9 @@
logger = logging.getLogger(__name__)

_DEFAULT_4BIT_CONFIGS = {
"databricks/dolly-v2-3b": {"bits": 4, "sym": False, "group_size": 128, "ratio": 0.8},
"databricks/dolly-v2-3b": {"bits": 4, "sym": False, "group_size": 128, "scale_estimation": True},
"EleutherAI/gpt-j-6b": {"bits": 4, "sym": False, "group_size": 64},
"facebook/opt-6.7b": {"bits": 4, "sym": False, "group_size": 64, "ratio": 0.8},
"bigscience/bloomz-7b1": {"bits": 4, "sym": False, "group_size": 32, "ratio": 0.6},
"togethercomputer/RedPajama-INCITE-7B-Instruct": {"bits": 4, "sym": False, "group_size": 128},
"HuggingFaceH4/zephyr-7b-beta": {
"bits": 4,
Expand Down Expand Up @@ -69,7 +68,7 @@
"pansophic/rocket-3B": {"bits": 4, "sym": True, "group_size": 128, "ratio": 0.8},
"THUDM/chatglm2-6b": {"bits": 4, "sym": True, "group_size": 128, "ratio": 0.72},
"Qwen/Qwen-7B-Chat": {"bits": 4, "sym": True, "group_size": 128, "ratio": 0.6},
"openlm-research/open_llama_3b": {"bits": 4, "sym": True, "group_size": 64, "all_layers": True},
"openlm-research/open_llama_3b": {"bits": 4, "sym": False, "group_size": 64, "all_layers": True},
"openlm-research/open_llama_3b_v2": {"bits": 4, "sym": True, "group_size": 64, "all_layers": True},
"tiiuae/falcon-7b-instruct": {"bits": 4, "sym": True, "group_size": 64, "all_layers": True},
"psmathur/orca_mini_3b": {
Expand All @@ -90,7 +89,12 @@
},
"mistralai/Mixtral-8x7B-v0.1": {"bits": 4, "sym": True, "group_size": 128, "ratio": 0.8},
"facebook/opt-2.7b": {"bits": 4, "sym": True, "group_size": 128, "ratio": 0.7},
"togethercomputer/RedPajama-INCITE-Chat-3B-v1": {"bits": 4, "sym": False, "group_size": 128, "ratio": 0.8},
"togethercomputer/RedPajama-INCITE-Chat-3B-v1": {
"bits": 4,
"sym": False,
"group_size": 128,
"scale_estimation": True,
},
"lmsys/vicuna-7b-v1.5": {"bits": 4, "sym": False, "group_size": 128, "ratio": 1.0},
"stabilityai/stablelm-tuned-alpha-3b": {"bits": 4, "sym": False, "group_size": 128, "ratio": 0.8},
"mistralai/Mistral-7B-v0.1": {"bits": 4, "sym": True, "group_size": 128, "ratio": 0.9},
Expand All @@ -100,7 +104,18 @@
"group_size": 128,
"ratio": 0.8,
"dataset": "wikitext2",
"awq": True,
"quant_method": "awq",
},
"openai-community/gpt2": {"bits": 4, "sym": False, "group_size": 128, "ratio": 0.5, "scale_estimation": True},
"lmsys/longchat-7b-16k": {"bits": 4, "sym": False, "group_size": 128, "ratio": 0.9},
"bigcode/starcoder2-3b": {"bits": 4, "sym": False, "group_size": 128, "ratio": 0.9},
"TinyLlama/TinyLlama-1.1B-Chat-v1.0": {"bits": 4, "sym": False, "group_size": 128, "ratio": 0.8},
"stabilityai/stablelm-tuned-alpha-7b": {
"bits": 4,
"sym": False,
"group_size": 128,
"ratio": 0.6,
"scale_estimation": True,
},
}

Expand Down

0 comments on commit 132cd8f

Please sign in to comment.