diff --git a/download-model.sh b/download-model.sh new file mode 100755 index 0000000..062d48f --- /dev/null +++ b/download-model.sh @@ -0,0 +1,3 @@ +#!/bin/bash +hfdownloader -m TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF:q4_k_m -s ./tests/data -t $TOKEN +hfdownloader -m TinyLlama/TinyLlama-1.1B-Chat-v1.0:tokenizer -s ./tests/data -t $TOKEN diff --git a/package.json b/package.json index 88d11c7..d6c2b4e 100644 --- a/package.json +++ b/package.json @@ -10,8 +10,8 @@ "build": "wasm-pack build -s text-yoga --dev", "build:release": "wasm-pack build -s text-yoga --release", "test:server": "npx http-server --cors -p 31300 ./tests/data", - "test:chrome": "RUST_LOG=wasm_bindgen_test_runner wasm-pack -vvv test --chrome --chromedriver \"$(which chromedriver)\" --headless", - "test:gecko": "wasm-pack test --firefox --geckodriver \"$(which geckodriver)\" --headless" + "test:chrome": "wasm-pack -vvv test --chrome --chromedriver \"$(which chromedriver)\"", + "test:firefox": "wasm-pack test --firefox --geckodriver \"$(which geckodriver)\" --headless" }, "keywords": [], "author": "", diff --git a/src/quantized_mistral.rs b/src/quantized_mistral.rs index a1f724f..c331333 100644 --- a/src/quantized_mistral.rs +++ b/src/quantized_mistral.rs @@ -31,8 +31,11 @@ impl Model { const REPEAT_LAST_N: usize = 64; let dev = Device::Cpu; + let input = Tensor::new(tokens, &dev)?.unsqueeze(0)?; + debug!("Starting forward pass..."); let logits = self.inner.forward(&input, tokens.len())?; + debug!("Forward pass done."); let logits = logits.squeeze(0)?; let logits = if self.repeat_penalty == 1. || tokens.is_empty() { logits diff --git a/tests/web.rs b/tests/web.rs index 624cab0..0f57f19 100644 --- a/tests/web.rs +++ b/tests/web.rs @@ -20,8 +20,8 @@ wasm_bindgen_test_configure!(run_in_browser); #[wasm_bindgen_test] async fn pass() -> Result<(), JsValue> { - let tokenizer_url = "http://localhost:31300/tokenizer.json"; - let model_url = "http://localhost:31300/tinymistral-248m.q4_k_m.gguf"; + let tokenizer_url = "http://localhost:31300/TinyLlama_TinyLlama-1.1B-Chat-v1.0/tokenizer.json"; + let model_url = "http://localhost:31300/TheBloke_TinyLlama-1.1B-Chat-v1.0-GGUF/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf"; let tokenizer_blob: Vec = utils::load_binary(&tokenizer_url).await?; let tokenizer_blob_len = format!("{}", &tokenizer_blob.len()); @@ -32,9 +32,16 @@ async fn pass() -> Result<(), JsValue> { log!("model blob size", &model_blob_len); log!("loading model..."); + let mut model = Model::new(model_blob, tokenizer_blob)?; log!("model loaded."); - let prompt: String = String::from("What is a good recipe for onion soup"); + let prompt: String = String::from( + "<|system|> + You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information. + <|user|> + What is borrow checking in rust? + <|assistant|>", + ); let temp: f64 = 0.8; let top_p: f64 = 1.; let repeat_penalty: f32 = 1.1;