Skip to content

Commit

Permalink
Update references.bib
Browse files Browse the repository at this point in the history
  • Loading branch information
kristiankersting committed Nov 14, 2024
1 parent 4de0378 commit e2670e4
Showing 1 changed file with 10 additions and 6 deletions.
16 changes: 10 additions & 6 deletions references.bib
Original file line number Diff line number Diff line change
@@ -1,14 +1,18 @@
@misc{haerle2024scarsparseconditionedautoencoders,
Anote={./images/haerle2024scar.png},
anote={./images/haerle2024scar.png},
title={SCAR: Sparse Conditioned Autoencoders for Concept Detection and Steering in LLMs},
author={Ruben Härle and Felix Friedrich and Manuel Brack and Björn Deiseroth and Patrick Schramowski and Kristian Kersting},
year={2024},
eprint={2411.07122},
archivePrefix={arXiv},
primaryClass={cs.CL},
url={https://arxiv.org/abs/2411.07122},
Keywords = {LLMs, Concept Steering, AI Safety, SAEs, Mechanistic Interpretability},
Note = {Large Language Models (LLMs) have demonstrated remarkable capabilities in generating human-like text, but their output may not be aligned with the user or even produce harmful content. This paper presents a novel approach to detect and steer concepts such as toxicity before generation. We introduce the Sparse Conditioned Autoencoder (SCAR), a single trained module that extends the otherwise untouched LLM. SCAR ensures full steerability, towards and away from concepts (e.g., toxic content), without compromising the quality of the model's text generation on standard evaluation benchmarks. We demonstrate the effective application of our approach through a variety of concepts, including toxicity, safety, and writing style alignment. As such, this work establishes a robust framework for controlling LLM generations, ensuring their ethical and safe deployment in real-world applications.},
url={https://arxiv.org/pdf/2411.07122},
Keywords = {Large Language Models, Concept Steering, Sparse Autoencoder, AI Safety, SAEs, Mechanistic Interpretability},
Note = {Large Language Models (LLMs) have demonstrated remarkable capabilities in generating human-like text, but their output may not be aligned with the user or even produce harmful content.
This paper presents a novel approach to detect and steer concepts such as toxicity before generation. We introduce the Sparse Conditioned Autoencoder (SCAR), a single trained module that
extends the otherwise untouched LLM. SCAR ensures full steerability, towards and away from concepts (e.g., toxic content),
without compromising the quality of the model's text generation on standard evaluation benchmarks. We demonstrate the effective
application of our approach through a variety of concepts, including toxicity, safety, and writing style alignment. As such, this work establishes a robust framework for
controlling LLM generations, ensuring their ethical and safe deployment in real-world applications.}
}


@incollection{wuest2024bongard,
Expand Down

0 comments on commit e2670e4

Please sign in to comment.