diff --git a/projects/llavaguard/index.html b/projects/llavaguard/index.html index 741ff73..0ac9643 100644 --- a/projects/llavaguard/index.html +++ b/projects/llavaguard/index.html @@ -367,6 +367,53 @@

Poster

--> + +
+
+
+

Usage

+

+ We here provide a code snippet for LlavaGuard-7B using SGLang. An HF implementation will follow soon. We are happy about further help here. A suitable docker image can be found at our Github repo. +

+
+
+
+ 0. Install requirements +

+            pip install "sglang[all]"
+        
+ 1. Select a model and start an SGLang server +

+            CUDA_VISIBLE_DEVICES=0 python3 -m sglang.launch_server --model-path AIML-TUDA/LlavaGuard-7B --tokenizer-path llava-hf/llava-1.5-7b-hf --port 10000
+        
+ 2. Model Inference +

+            import sglang as sgl
+            from sglang import RuntimeEndpoint
+
+            @sgl.function
+            def guard_gen(s, image_path, prompt):
+                s += sgl.user(sgl.image(image_path) + prompt)
+                hyperparameters = {
+                    'temperature': 0.2,
+                    'top_p': 0.95,
+                    'top_k': 50,
+                    'max_tokens': 500,
+                }
+                s += sgl.assistant(sgl.gen("json_output", **hyperparameters))
+
+            im_path = 'path/to/your/image'
+            prompt = safety_taxonomy_below
+            backend = RuntimeEndpoint(f"http://localhost:10000")
+            sgl.set_default_backend(backend)
+            out = guard_gen.run(image_path=im_path, prompt=prompt)
+            print(out['json_output'])
+        
+
+
+ +## Overview +