diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 0000000..34aa06b --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,49 @@ +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. +# rust-clippy is a tool that runs a bunch of lints to catch common +# mistakes in your Rust code and help improve your Rust code. +# More details at https://github.com/rust-lang/rust-clippy +# and https://rust-lang.github.io/rust-clippy/ + +name: CI + +on: + pull_request: + push: + paths-ignore: + - '**.md' + - 'LICENSE' + +jobs: + rust-clippy-analyze: + name: Run rust-clippy analyzing + runs-on: ubuntu-latest + permissions: + security-events: write + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Check format + run: cargo fmt --check + + - name: Run test + run: cargo test + + - name: Install required cargo + run: cargo install clippy-sarif sarif-fmt + + - name: Run rust-clippy + run: + cargo clippy + --all-features + --message-format=json | clippy-sarif | tee rust-clippy-results.sarif | sarif-fmt + continue-on-error: true + + - name: Upload analysis results to GitHub + uses: github/codeql-action/upload-sarif@v3 + with: + sarif_file: rust-clippy-results.sarif + wait-for-processing: true diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..4fffb2f --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +/target +/Cargo.lock diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..5408aef --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "tokeneer" +version = "0.0.0" +edition = "2021" +authors = ["YdrMaster "] + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +regex = "1.10" diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..da5fd36 --- /dev/null +++ b/LICENSE @@ -0,0 +1,9 @@ +The MIT License (MIT) + +Copyright © 2024 YdrMaster + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..1cb1561 --- /dev/null +++ b/README.md @@ -0,0 +1,11 @@ +# Tokeneer + +[![CI](https://github.com/YdrMaster/tokeneer/actions/workflows/build.yml/badge.svg?branch=main)](https://github.com/YdrMaster/tokeneer/actions) +[![license](https://img.shields.io/github/license/YdrMaster/tokeneer)](https://mit-license.org/) +![GitHub repo size](https://img.shields.io/github/repo-size/YdrMaster/tokeneer) +![GitHub code size in bytes](https://img.shields.io/github/languages/code-size/YdrMaster/tokeneer) + +[![GitHub Issues](https://img.shields.io/github/issues/YdrMaster/tokeneer)](https://github.com/YdrMaster/tokeneer/issues) +[![GitHub Pull Requests](https://img.shields.io/github/issues-pr/YdrMaster/tokeneer)](https://github.com/YdrMaster/tokeneer/pulls) +![GitHub contributors](https://img.shields.io/github/contributors/YdrMaster/tokeneer) +![GitHub commit activity](https://img.shields.io/github/commit-activity/m/YdrMaster/tokeneer) diff --git a/src/bpe/algorithm.rs b/src/bpe/algorithm.rs new file mode 100644 index 0000000..5094a59 --- /dev/null +++ b/src/bpe/algorithm.rs @@ -0,0 +1,293 @@ +use super::{utok, Bpe}; +use std::{ + cmp::Ordering::{self, Equal}, + collections::BinaryHeap, + fmt, + iter::zip, + ops::Range, +}; + +pub struct BpeTokenizer<'v, 't> { + text: &'t [u8], + bpe: &'v Bpe, + marks: Vec, + merges: BinaryHeap, +} + +pub struct IntoIter<'v> { + bpe: &'v Bpe, + marks: Vec, + i: usize, +} + +pub struct Iter<'a> { + bpe: &'a Bpe, + marks: &'a [Mark], +} + +impl Bpe { + pub fn build_tokenizer<'v, 't>(&'v self, text: &'t str) -> BpeTokenizer<'v, 't> { + let mut marks = vec![Mark::unk(self.unk); text.len()]; + let mut merges = BinaryHeap::new(); + + let mut buf = [0u8; 4]; + let mut last = None; + for (i, c) in text.char_indices() { + let c = c.encode_utf8(&mut buf).as_bytes(); + last = if let Some(token) = self.find_piece(c) { + marks[i].token = token; + if let Some(pos) = last.take() { + marks[i].back_distance = (i - pos) as _; + if let Some(merge) = self.build_merge( + text.as_bytes(), + pos..i + c.len(), + (marks[pos].token, token), + ) { + merges.push(merge); + } + } + Some(i) + } else { + for (&b, mark) in zip(c, &mut marks[i..]) { + mark.token = self.bytes[b as usize]; + } + None + }; + } + + BpeTokenizer { + text: text.as_bytes(), + bpe: self, + marks, + merges, + } + } + + fn build_merge(&self, text: &[u8], range: Range, pair: (utok, utok)) -> Option { + self.find_piece(&text[range.clone()]).map(|merged| Merge { + pos: range.start, + pair, + merge: merged, + rank: self.token(merged).rank, + }) + } +} + +#[derive(Clone, Copy, Debug)] +struct Mark { + token: utok, + back_distance: u32, +} + +impl Mark { + #[inline(always)] + const fn unk(unk: utok) -> Self { + Self { + token: unk, + back_distance: 0, + } + } +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +struct Merge { + pos: usize, + pair: (utok, utok), + merge: utok, + rank: u32, +} +impl Ord for Merge { + fn cmp(&self, other: &Self) -> Ordering { + // 比较顺序:rank -> merged -> pos -> pair + match self.rank.cmp(&other.rank) { + Equal => match self.merge.cmp(&other.merge) { + Equal => match self.pos.cmp(&other.pos) { + Equal => self.pair.cmp(&other.pair), + other => other, + }, + other => other, + }, + other => other, + } + .reverse() + } +} +impl PartialOrd for Merge { + #[inline] + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl BpeTokenizer<'_, '_> { + /// 尝试执行一次合并,返回是否成功执行了一次合并。 + pub fn merge(&mut self) -> bool { + // 一次合并将涉及至多 4 个 token: + // + // t0 t1 t2 t3 + // -- -- -- -- + // ↓ + // t0 merge t3 + // -- ----- -- + // + // 成功的合并将至少消费合并队列中的 1 个项, + // 同时至多向合并队列添加 2 个项: + // + // t0 merge t3 + // -------- + // -------- + + // 从合并队列消费 + while let Some(Merge { + pos: p1, + pair: (t1, t2), + merge, + .. + }) = self.merges.pop() + { + // 确认合并项有效性 + if self.marks[p1].token != t1 { + continue; + } + let l1 = self.bpe.token(t1).len(); + let p2 = p1 + l1; + if self.marks[p2].token != t2 { + continue; + } + // 合并 + self.marks[p1].token = merge; + self.marks[p2].token = self.bpe.unk; + + let l2 = self.bpe.token(t2).len(); + let p3 = p2 + l2; + // 创建 merge + t3 合并项 + match self.marks.get_mut(p3) { + None => {} + Some(Mark { + token, + back_distance, + }) => { + *back_distance = (l1 + l2) as _; + + let t3 = *token; + let l3 = self.bpe.token(t3).len(); + let p4 = p3 + l3; + if let Some(merge) = self.bpe.build_merge(self.text, p1..p4, (merge, t3)) { + self.merges.push(merge); + } + } + } + // 创建 t0 + merge 合并项 + match self.marks[p1].back_distance as usize { + 0 => {} + l0 => { + let p0 = p1 - l0; + let t0 = self.marks[p0].token; + if let Some(merge) = self.bpe.build_merge(self.text, p0..p3, (t0, merge)) { + self.merges.push(merge); + } + } + } + // 成功合并 + return true; + } + false + } + + #[inline] + pub fn iter(&self) -> Iter { + Iter { + bpe: self.bpe, + marks: &self.marks, + } + } +} + +impl<'v> IntoIterator for BpeTokenizer<'v, '_> { + type Item = utok; + type IntoIter = IntoIter<'v>; + #[inline] + fn into_iter(self) -> Self::IntoIter { + Self::IntoIter { + bpe: self.bpe, + marks: self.marks, + i: 0, + } + } +} + +impl Iterator for IntoIter<'_> { + type Item = utok; + + fn next(&mut self) -> Option { + match &self.marks[self.i..] { + &[Mark { token, .. }, ..] => { + self.i += self.bpe.token(token).len(); + Some(token) + } + [] => None, + } + } +} + +impl Iterator for Iter<'_> { + type Item = utok; + + fn next(&mut self) -> Option { + match self.marks { + &[Mark { token, .. }, ref tail @ ..] => { + self.marks = &tail[self.bpe.token(token).len() - 1..]; + Some(token) + } + [] => None, + } + } +} + +impl fmt::Display for BpeTokenizer<'_, '_> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use std::str::{from_utf8, from_utf8_unchecked}; + + writeln!(f, "---------------------------")?; + { + writeln!(f, "text:")?; + writeln!(f, " {}", unsafe { from_utf8_unchecked(self.text) })?; + } + writeln!(f, "---------------------------")?; + { + writeln!(f, "tokens:")?; + write!(f, " ")?; + for token in self.iter() { + let text = unsafe { from_utf8_unchecked(self.bpe.token(token)) }; + write!(f, "{text}")?; + } + writeln!(f)?; + } + writeln!(f, "---------------------------")?; + { + writeln!(f, "tokens:")?; + for token in self.iter() { + write!(f, " {token:>6}: ")?; + match from_utf8(self.bpe.token(token)) { + Ok(s) => writeln!(f, "{s}")?, + Err(_) => writeln!(f, "{token:?}")?, + } + } + } + writeln!(f, "---------------------------")?; + { + writeln!(f, "merges:")?; + let mut merges = self.merges.clone(); + while let Some(Merge { + rank, + merge: merged, + .. + }) = merges.pop() + { + let text = unsafe { from_utf8_unchecked(self.bpe.token(merged)) }; + writeln!(f, " {rank:>6} | {text}")?; + } + } + writeln!(f, "---------------------------") + } +} diff --git a/src/bpe/mod.rs b/src/bpe/mod.rs new file mode 100644 index 0000000..be29392 --- /dev/null +++ b/src/bpe/mod.rs @@ -0,0 +1,254 @@ +mod algorithm; + +use crate::{as_byte_token, utok, Method}; +use std::{ + collections::{HashMap, HashSet}, + iter::zip, + ops::Deref, + pin::Pin, + ptr::NonNull, +}; + +pub struct Bpe { + /// 保存所有词的字符串内容,以 u8 为单位所以不需要对齐,占用空间少 + _vocab: Pin>, + /// 按 token 顺序保存元信息 + tokens: Box<[TokenMeta]>, + /// 按字符串的字典序排序的 token 索引,用于从字符串二分查找 token。 + /// 建立索引时直接剔除了不可能从 piece 构造的所有单字节 + sorted_pieces: Box<[utok]>, + /// 用于索引单字节 token,因此不需要其他元信息 + bytes: Box<[utok; 256]>, + /// token: + unk: utok, +} + +struct TokenMeta { + /// 指向字符串内容的指针 + ptr: NonNull, + /// 字符串长度 + len: u32, + /// 字符串的合并排名,从 0 开始 + rank: u32, +} + +unsafe impl Send for TokenMeta {} +unsafe impl Sync for TokenMeta {} + +impl Deref for TokenMeta { + type Target = [u8]; + #[inline] + fn deref(&self) -> &Self::Target { + unsafe { std::slice::from_raw_parts(self.ptr.as_ptr(), self.len as _) } + } +} + +impl Bpe { + /// 解析 tokenizer.model 文件并构造一个 bpe 分词器。 + pub fn from_tokenizer_model(model: &[u8]) -> Self { + // 遍历文件,标记所有词汇的位置并记录最大长度 + let offsets = (0..) + .scan(0usize, |offset, _| match &model[*offset..] { + [10, total_len, 10, content @ ..] => { + let total_len = *total_len as usize; + *offset += total_len + 2; + Some(&content[..total_len - 2]) + } + [..] => None, + }) + .collect::>(); + // 产生词迭代器 + let vocabs = offsets.iter().map(|slice| { + let len = slice[0] as usize; + std::str::from_utf8(&slice[1..][..len]).unwrap() + }); + // 产生评分迭代器 + let scores = offsets.iter().map(|slice| { + let len = slice[0] as usize; + let ptr = slice[len + 2..].as_ptr().cast::(); + unsafe { ptr.read_unaligned() } + }); + // 产生字节标记迭代器 + let mut i = 0; + let is_byte = std::iter::from_fn(|| { + if i < 3 { + i += 1; + Some(false) + } else if i < 3 + 256 { + i += 1; + Some(true) + } else { + Some(false) + } + }); + // 构造分词器 + Self::new(vocabs, scores, is_byte, 0, offsets.len()) + } + + pub fn new<'a>( + vocabs: impl IntoIterator, + scores: impl IntoIterator, + is_byte: impl IntoIterator, + unk: utok, + vocab_size_hint: usize, + ) -> Self { + let mut text_buf = Vec::with_capacity(vocab_size_hint * 4); + let mut bytes = Box::new([unk; 256]); + // 重新编排词表 + // 将字符串的内容和元信息分离 + // 内容全部保存到 text_buf 以实现缓存友好性 + // 字符串起始位置在 text_buf 中的偏移量和字符串长度保存到 meta 中 + let meta = vocabs + .into_iter() + .map(str::as_bytes) + .zip(is_byte) + .enumerate() + .map(|(t, (piece, is_byte))| { + let off = text_buf.len(); + let len = if is_byte { + let b = as_byte_token(piece).unwrap(); + text_buf.push(b); + bytes[b as usize] = t as utok; + 1 + } else { + text_buf.extend_from_slice(piece); + piece.len() + }; + (off, len) + }) + .collect::>(); + // 锁定字符串内容的位置,以实现安全的自引用 + let _vocab = unsafe { Pin::new_unchecked(text_buf.into_boxed_slice()) }; + // 对分词评分重新赋权,转换为整型 + let rank = rank(&scores.into_iter().collect::>()); + assert_eq!( + meta.len(), + rank.len(), + "scores size mismatch with vocab size" + ); + // tokens 中直接引用字符串位置,绑定评分 + let ptr = NonNull::new(_vocab.as_ptr().cast_mut()).unwrap(); + let tokens = zip(meta, rank) + .map(|((off, len), rank)| TokenMeta { + ptr: unsafe { ptr.add(off) }, + len: len as _, + rank, + }) + .collect::>(); + // 对 token 按字符串的字典序排序,用于从字符串二分查找 token + // 和 <0xyz> 不应该通过 piece 搜索到,使用 set 排除 + let bytes_set = bytes.iter().chain(&[unk]).cloned().collect::>(); + let mut sorted_pieces = (0..tokens.len() as utok) + .filter(|i| !bytes_set.contains(i)) + .collect::>(); + sorted_pieces.sort_unstable_by_key(|&i| &*tokens[i as usize]); + + Self { + _vocab, + tokens, + sorted_pieces, + bytes, + unk, + } + } + + /// BPE 词表中,并非所有词都是合词规则可达的。此算法可识别“内部不可达”的 token。 + pub fn inaccessible(&self) -> HashMap<&str, utok> { + self.sorted_pieces + .iter() + .filter_map(|&t| { + let s = unsafe { std::str::from_utf8_unchecked(self.token(t)) }; + if self.encode(s).into_iter().nth(1).is_some() { + Some((s, t)) + } else { + None + } + }) + .collect() + } + + /// piece -> token + #[inline] + fn find_piece(&self, piece: &[u8]) -> Option { + match self + .sorted_pieces + .binary_search_by_key(&piece, |&i| self.token(i)) + { + Ok(i) => Some(self.sorted_pieces[i]), + Err(_) => match *piece { + [b] => Some(self.bytes[b as usize]), + [..] => None, + }, + } + } + + /// token id -> token meta + #[inline(always)] + fn token(&self, token: utok) -> &TokenMeta { + &self.tokens[token as usize] + } +} + +impl Method for Bpe { + #[inline] + fn unk_token(&self) -> utok { + self.unk + } + #[inline] + fn vocab_size(&self) -> usize { + self.tokens.len() + } + #[inline] + fn internal_special(&self) -> impl IntoIterator { + self.inaccessible() + } + #[inline] + fn encode(&self, text: &str) -> impl IntoIterator + '_ { + let mut tokenizer = self.build_tokenizer(text); + while tokenizer.merge() {} + tokenizer.into_iter() + } + #[inline] + fn decode(&self, token: utok) -> &[u8] { + self.token(token) + } +} + +/// 对一组评分排序、去重并重新赋权,转换为保持相同顺序的整型序列 +fn rank(scores: &[f32]) -> Vec { + use std::{ + cmp::Ordering, + collections::{BTreeMap, BTreeSet}, + }; + + #[derive(PartialEq, Debug)] + struct FloatOrd(f32); + impl Eq for FloatOrd {} + impl PartialOrd for FloatOrd { + #[inline] + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } + } + impl Ord for FloatOrd { + #[inline] + fn cmp(&self, other: &Self) -> Ordering { + self.0.total_cmp(&other.0) + } + } + + let map = scores + // 排序 + 去重 + .iter() + .copied() + .map(FloatOrd) + .collect::>() + // 重新赋权 + .into_iter() + .rev() + .enumerate() + .map(|(i, f)| (f, i as u32)) + .collect::>(); + + scores.iter().map(|f| map[&FloatOrd(*f)]).collect() +} diff --git a/src/lib.rs b/src/lib.rs new file mode 100644 index 0000000..de10559 --- /dev/null +++ b/src/lib.rs @@ -0,0 +1,119 @@ +#![deny(warnings)] + +mod bpe; + +use regex::Regex; +use std::collections::HashMap; + +pub use bpe::Bpe; + +/// `utok` for token id. +#[allow(non_camel_case_types)] +pub type utok = u32; + +pub trait Method { + fn unk_token(&self) -> utok; + fn vocab_size(&self) -> usize; + fn internal_special(&self) -> impl IntoIterator; + fn encode(&self, text: &str) -> impl IntoIterator + '_; + fn decode(&self, token: utok) -> &[u8]; +} + +pub struct Tokeneer { + method: M, + special: HashMap>, + special_regex: regex::Regex, +} + +impl Tokeneer { + pub fn new(method: M) -> Self { + let special = method + .internal_special() + .into_iter() + .map(|(k, v)| (k.to_string(), vec![v])) + .collect::>(); + let special_regex = build_pattern(special.keys()); + Self { + method, + special, + special_regex, + } + } + + pub fn extend_special(&mut self, patterns: impl IntoIterator)>) { + use std::collections::hash_map::Entry::{Occupied, Vacant}; + let mut any = false; + for (k, v) in patterns { + match self.special.entry(k) { + Occupied(entry) => { + assert_eq!(entry.get(), &v); + } + Vacant(entry) => { + entry.insert(v); + any = true; + } + } + } + if any { + self.special_regex = build_pattern(self.special.keys()); + } + } + + pub fn encode(&self, text: &str) -> Vec { + let mut ans = Vec::new(); + let mut start = 0; + for m in self.special_regex.find_iter(text) { + ans.extend(self.method.encode(&text[start..m.start()])); + ans.extend_from_slice(&self.special[m.as_str()]); + start = m.end(); + } + ans.extend(self.method.encode(&text[start..])); + ans + } + + pub fn decode(&self, tokens: &[utok]) -> String { + let mut ans = Vec::new(); + for &t in tokens { + ans.extend_from_slice(self.method.decode(t)); + } + String::from_utf8(ans).unwrap() + } + #[inline] + pub fn internal(&self) -> &M { + &self.method + } +} + +fn build_pattern<'a, T: AsRef>(text: impl IntoIterator) -> Regex { + let mut pattern = String::new(); + let mut iter = text.into_iter(); + if let Some(p) = iter.next() { + pattern.push_str(p.as_ref()); + } + for p in iter { + pattern.push('|'); + pattern.push_str(p.as_ref()); + } + regex::Regex::new(&pattern).unwrap() +} + +const fn as_byte_token(piece: &[u8]) -> Option { + // 按结构分解并转换 + match piece { + &[b'<', b'0', b'x', a, b, b'>'] if a.is_ascii_hexdigit() && b.is_ascii_hexdigit() => { + // ascii 转数字 + #[inline(always)] + const fn to_num(c: u8) -> u8 { + match c { + b'0'..=b'9' => c - b'0', + b'a'..=b'f' => c - b'a' + 10, + b'A'..=b'F' => c - b'A' + 10, + _ => unreachable!(), + } + } + + Some(to_num(a) * 16 + to_num(b)) + } + _ => None, + } +}