From 1c2f5d7a1e67d0f5e1f0287bedadb8b2dbc175b7 Mon Sep 17 00:00:00 2001 From: EricLBuehler Date: Sat, 25 May 2024 10:56:23 -0400 Subject: [PATCH 01/19] Add automatic conversion from gguf to hf tokenizer --- mistralrs-core/src/pipeline/gguf.rs | 6 +- mistralrs-core/src/pipeline/gguf_tokenizer.rs | 92 +++++++++++++++++++ mistralrs-core/src/pipeline/mod.rs | 1 + 3 files changed, 96 insertions(+), 3 deletions(-) create mode 100644 mistralrs-core/src/pipeline/gguf_tokenizer.rs diff --git a/mistralrs-core/src/pipeline/gguf.rs b/mistralrs-core/src/pipeline/gguf.rs index eb3a7ac09..ae40ec16a 100644 --- a/mistralrs-core/src/pipeline/gguf.rs +++ b/mistralrs-core/src/pipeline/gguf.rs @@ -7,11 +7,11 @@ use crate::aici::bintokens::build_tok_trie; use crate::aici::toktree::TokTrie; use crate::lora::Ordering; use crate::pipeline::chat_template::calculate_eos_tokens; +use crate::pipeline::gguf_tokenizer::convert_ggml_to_hf_tokenizer; use crate::pipeline::Cache; use crate::pipeline::{ChatTemplate, LocalModelPaths}; use crate::prefix_cacher::PrefixCacheManager; use crate::sequence::Sequence; -use crate::utils::tokenizer::get_tokenizer; use crate::utils::varbuilder_utils::{from_mmaped_safetensors, load_preload_adapters}; use crate::xlora_models::NonGranularState; use crate::{deserialize_chat_template, do_sample, get_mut_arcmutex, get_paths, DeviceMapMetadata}; @@ -329,6 +329,8 @@ impl Loader for GGUFLoader { } } + let tokenizer = convert_ggml_to_hf_tokenizer(&model)?; + let mut is_lora = false; let model = match self.kind { ModelKind::QuantizedGGUF => match arch { @@ -449,8 +451,6 @@ impl Loader for GGUFLoader { _ => unreachable!(), }; - let tokenizer = get_tokenizer(paths.get_tokenizer_filename())?; - let (chat_template, gen_conf) = deserialize_chat_template!(paths, self); let max_seq_len = match model { diff --git a/mistralrs-core/src/pipeline/gguf_tokenizer.rs b/mistralrs-core/src/pipeline/gguf_tokenizer.rs new file mode 100644 index 000000000..246c7bbd9 --- /dev/null +++ b/mistralrs-core/src/pipeline/gguf_tokenizer.rs @@ -0,0 +1,92 @@ +use std::collections::HashMap; + +use anyhow::Result; +use candle_core::quantized::gguf_file::Content; +use tokenizers::{models::bpe::BpeBuilder, AddedToken, ModelWrapper, Tokenizer}; + +pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result { + let model = content.metadata["tokenizer.ggml.model"] + .to_string() + .expect("GGUF tokenizer model is not a string.") + .clone(); + let tokens = content.metadata["tokenizer.ggml.tokens"] + .to_vec() + .expect("GGUF tokenizer tokens is not a vec.") + .iter() + .map(|t| t.to_string().expect("GGUF token is not a string.").clone()) + .collect::>(); + let added_tokens = content + .metadata + .get("tokenizer.ggml.added_tokens") + .map(|items| { + items + .to_vec() + .expect("GGUF tokenizer added_tokens is not a vec.") + .iter() + .map(|t| { + t.to_string() + .expect("GGUF added_token is not a string.") + .clone() + }) + .collect::>() + }); + let merges = content.metadata.get("tokenizer.ggml.merges").map(|items| { + items + .to_vec() + .expect("GGUF tokenizer merges is not a vec.") + .iter() + .map(|t| t.to_string().expect("GGUF merges is not a string.").clone()) + .collect::>() + }); + + let _bos = content.metadata["tokenizer.ggml.bos_token_id"] + .to_u32() + .expect("GGUF bos token is not u32"); + let _eos = content.metadata["tokenizer.ggml.eos_token_id"] + .to_u32() + .expect("GGUF eos token is not u32"); + let unk = content.metadata["tokenizer.ggml.unknown_token_id"] + .to_u32() + .expect("GGUF unk token is not u32"); + let _sep = content.metadata["tokenizer.ggml.separator_token_id"] + .to_u32() + .expect("GGUF sep token is not u32"); + let _pad = content.metadata["tokenizer.ggml.padding_token_id"] + .to_u32() + .expect("GGUF pad token is not u32"); + + let tokenizer = match model.as_str() { + "llama" | "replit" | "gpt2" | "rwkv" => { + // BPE, as seen in relevant tokenizer.json files + let bpe_builder = BpeBuilder::new().unk_token(tokens[unk as usize].clone()); + + let mut vocab = HashMap::new(); + for (i, tok) in tokens.into_iter().enumerate() { + #[allow(clippy::cast_possible_truncation)] + vocab.insert(tok, i as u32); + } + let mut merges_vec = Vec::new(); + if let Some(merges) = merges { + for tok in merges { + let split = tok.splitn(2, ' ').collect::>(); + merges_vec.push((split[0].to_string(), split[1].to_string())); + } + } + let bpe = bpe_builder + .vocab_and_merges(vocab, merges_vec) + .build() + .map_err(anyhow::Error::msg)?; + let mut tokenizer = Tokenizer::new(ModelWrapper::BPE(bpe)); + if let Some(added_tokens) = added_tokens { + for added_token in added_tokens { + tokenizer.add_special_tokens(&[AddedToken::from(added_token, true)]); + } + } + tokenizer + } + other => { + anyhow::bail!("Tokenizer model `{other}` not supported."); + } + }; + Ok(tokenizer) +} diff --git a/mistralrs-core/src/pipeline/mod.rs b/mistralrs-core/src/pipeline/mod.rs index c2c5512ff..6b61dd0ea 100644 --- a/mistralrs-core/src/pipeline/mod.rs +++ b/mistralrs-core/src/pipeline/mod.rs @@ -2,6 +2,7 @@ mod cache_manager; mod chat_template; mod ggml; mod gguf; +mod gguf_tokenizer; mod loaders; mod macros; mod normal; From b3ac5c80e3d98e5572a9e28544984733365ab4fa Mon Sep 17 00:00:00 2001 From: EricLBuehler Date: Sat, 25 May 2024 11:00:24 -0400 Subject: [PATCH 02/19] Add info messages --- mistralrs-core/src/pipeline/gguf_tokenizer.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/mistralrs-core/src/pipeline/gguf_tokenizer.rs b/mistralrs-core/src/pipeline/gguf_tokenizer.rs index 246c7bbd9..27c2cdf6a 100644 --- a/mistralrs-core/src/pipeline/gguf_tokenizer.rs +++ b/mistralrs-core/src/pipeline/gguf_tokenizer.rs @@ -3,6 +3,7 @@ use std::collections::HashMap; use anyhow::Result; use candle_core::quantized::gguf_file::Content; use tokenizers::{models::bpe::BpeBuilder, AddedToken, ModelWrapper, Tokenizer}; +use tracing::info; pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result { let model = content.metadata["tokenizer.ggml.model"] @@ -39,6 +40,12 @@ pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result { .collect::>() }); + info!( + "Converting GGML tokenizer. Model: `{model}`, num tokens: {}, num added tokens: {}, num merges: {}", + tokens.len(), + added_tokens.as_ref().map(|x| x.len()).unwrap_or(0), + merges.as_ref().map(|x| x.len()).unwrap_or(0) + ); let _bos = content.metadata["tokenizer.ggml.bos_token_id"] .to_u32() .expect("GGUF bos token is not u32"); @@ -59,6 +66,7 @@ pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result { "llama" | "replit" | "gpt2" | "rwkv" => { // BPE, as seen in relevant tokenizer.json files let bpe_builder = BpeBuilder::new().unk_token(tokens[unk as usize].clone()); + info!("Loading as BPE tokenizer."); let mut vocab = HashMap::new(); for (i, tok) in tokens.into_iter().enumerate() { From 36c46cc602933b96c8627ae2bab9d9d862a9ba0d Mon Sep 17 00:00:00 2001 From: EricLBuehler Date: Sat, 25 May 2024 11:12:01 -0400 Subject: [PATCH 03/19] Add decoder to tokenizer --- mistralrs-core/src/pipeline/gguf_tokenizer.rs | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/mistralrs-core/src/pipeline/gguf_tokenizer.rs b/mistralrs-core/src/pipeline/gguf_tokenizer.rs index 27c2cdf6a..66fdf4349 100644 --- a/mistralrs-core/src/pipeline/gguf_tokenizer.rs +++ b/mistralrs-core/src/pipeline/gguf_tokenizer.rs @@ -2,7 +2,9 @@ use std::collections::HashMap; use anyhow::Result; use candle_core::quantized::gguf_file::Content; -use tokenizers::{models::bpe::BpeBuilder, AddedToken, ModelWrapper, Tokenizer}; +use tokenizers::{ + decoders::bpe::BPEDecoder, models::bpe::BpeBuilder, AddedToken, ModelWrapper, Tokenizer, +}; use tracing::info; pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result { @@ -46,21 +48,9 @@ pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result { added_tokens.as_ref().map(|x| x.len()).unwrap_or(0), merges.as_ref().map(|x| x.len()).unwrap_or(0) ); - let _bos = content.metadata["tokenizer.ggml.bos_token_id"] - .to_u32() - .expect("GGUF bos token is not u32"); - let _eos = content.metadata["tokenizer.ggml.eos_token_id"] - .to_u32() - .expect("GGUF eos token is not u32"); let unk = content.metadata["tokenizer.ggml.unknown_token_id"] .to_u32() .expect("GGUF unk token is not u32"); - let _sep = content.metadata["tokenizer.ggml.separator_token_id"] - .to_u32() - .expect("GGUF sep token is not u32"); - let _pad = content.metadata["tokenizer.ggml.padding_token_id"] - .to_u32() - .expect("GGUF pad token is not u32"); let tokenizer = match model.as_str() { "llama" | "replit" | "gpt2" | "rwkv" => { @@ -85,6 +75,7 @@ pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result { .build() .map_err(anyhow::Error::msg)?; let mut tokenizer = Tokenizer::new(ModelWrapper::BPE(bpe)); + tokenizer.with_decoder(BPEDecoder::default()); if let Some(added_tokens) = added_tokens { for added_token in added_tokens { tokenizer.add_special_tokens(&[AddedToken::from(added_token, true)]); From be2fca1be34856c0e64bbb1a5f16922707d206a2 Mon Sep 17 00:00:00 2001 From: EricLBuehler Date: Sat, 25 May 2024 16:47:37 -0400 Subject: [PATCH 04/19] More progress, its horrifying --- mistralrs-core/src/pipeline/gguf_tokenizer.rs | 58 +++++++++++++++++-- 1 file changed, 54 insertions(+), 4 deletions(-) diff --git a/mistralrs-core/src/pipeline/gguf_tokenizer.rs b/mistralrs-core/src/pipeline/gguf_tokenizer.rs index 66fdf4349..01e700463 100644 --- a/mistralrs-core/src/pipeline/gguf_tokenizer.rs +++ b/mistralrs-core/src/pipeline/gguf_tokenizer.rs @@ -3,7 +3,11 @@ use std::collections::HashMap; use anyhow::Result; use candle_core::quantized::gguf_file::Content; use tokenizers::{ - decoders::bpe::BPEDecoder, models::bpe::BpeBuilder, AddedToken, ModelWrapper, Tokenizer, + decoders::{byte_fallback::ByteFallback, fuse::Fuse, sequence::Sequence, strip::Strip}, + models::bpe::BpeBuilder, + normalizers::{self, Prepend, Replace}, + processors::template::{self, Template, TemplateProcessing, Tokens}, + AddedToken, DecoderWrapper, ModelWrapper, NormalizerWrapper, Tokenizer, }; use tracing::info; @@ -52,6 +56,14 @@ pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result { .to_u32() .expect("GGUF unk token is not u32"); + let eos = content.metadata["tokenizer.ggml.eos_token_id"] + .to_u32() + .expect("GGUF unk token is not u32"); + + let bos = content.metadata["tokenizer.ggml.bos_token_id"] + .to_u32() + .expect("GGUF unk token is not u32"); + let tokenizer = match model.as_str() { "llama" | "replit" | "gpt2" | "rwkv" => { // BPE, as seen in relevant tokenizer.json files @@ -59,9 +71,9 @@ pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result { info!("Loading as BPE tokenizer."); let mut vocab = HashMap::new(); - for (i, tok) in tokens.into_iter().enumerate() { + for (i, tok) in tokens.iter().enumerate() { #[allow(clippy::cast_possible_truncation)] - vocab.insert(tok, i as u32); + vocab.insert(tok.clone(), i as u32); } let mut merges_vec = Vec::new(); if let Some(merges) = merges { @@ -72,15 +84,53 @@ pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result { } let bpe = bpe_builder .vocab_and_merges(vocab, merges_vec) + .fuse_unk(true) .build() .map_err(anyhow::Error::msg)?; let mut tokenizer = Tokenizer::new(ModelWrapper::BPE(bpe)); - tokenizer.with_decoder(BPEDecoder::default()); + tokenizer.with_decoder(Sequence::new(vec![ + DecoderWrapper::Replace(Replace::new("▁", " ").map_err(anyhow::Error::msg)?), + DecoderWrapper::ByteFallback(ByteFallback::default()), + DecoderWrapper::Fuse(Fuse::new()), + DecoderWrapper::Strip(Strip::new(' ', 1, 0)), + ])); if let Some(added_tokens) = added_tokens { for added_token in added_tokens { tokenizer.add_special_tokens(&[AddedToken::from(added_token, true)]); } } + tokenizer.add_special_tokens(&[AddedToken::from(tokens[bos as usize].clone(), true)]); + tokenizer.add_special_tokens(&[AddedToken::from(tokens[eos as usize].clone(), true)]); + tokenizer.add_special_tokens(&[AddedToken::from(tokens[unk as usize].clone(), true)]); + + tokenizer.with_post_processor( + TemplateProcessing::builder() + .special_tokens(Tokens::from(vec![template::SpecialToken::new( + tokens[bos as usize].clone(), + vec![bos], + vec![tokens[bos as usize].clone()], + ) + .map_err(anyhow::Error::msg)?])) + .pair( + Template::try_from(vec![ + tokens[bos as usize].clone(), + "$A".to_string(), + tokens[bos as usize].clone(), + "$B:1".to_string(), + ]) + .unwrap(), + ) + .single( + Template::try_from(vec![tokens[bos as usize].clone(), "$A".to_string()]) + .unwrap(), + ) + .build()?, + ); + tokenizer.with_normalizer(normalizers::Sequence::new(vec![ + NormalizerWrapper::Prepend(Prepend::new("▁".to_string())), + NormalizerWrapper::Replace(Replace::new(" ", "▁").map_err(anyhow::Error::msg)?), + ])); + info!("Decoder is: {:?}", tokenizer.get_decoder()); tokenizer } other => { From ba44cca98cba1ba53e9370ca1941fee5bda9f617 Mon Sep 17 00:00:00 2001 From: EricLBuehler Date: Tue, 28 May 2024 05:19:01 -0400 Subject: [PATCH 05/19] Merge --- mistralrs-core/src/pipeline/gguf.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/mistralrs-core/src/pipeline/gguf.rs b/mistralrs-core/src/pipeline/gguf.rs index 5b1f1ca65..6e104836c 100644 --- a/mistralrs-core/src/pipeline/gguf.rs +++ b/mistralrs-core/src/pipeline/gguf.rs @@ -482,8 +482,6 @@ impl Loader for GGUFLoader { _ => unreachable!(), }; - let tokenizer = get_tokenizer(paths.get_tokenizer_filename())?; - let gen_conf: Option = paths .get_gen_conf_filename() .map(|f| serde_json::from_str(&fs::read_to_string(f).unwrap()).unwrap()); From b276c160189f8e900efd7213acb031b3061f82de Mon Sep 17 00:00:00 2001 From: EricLBuehler Date: Tue, 28 May 2024 07:17:56 -0400 Subject: [PATCH 06/19] Use unigram tokenizer for llama --- mistralrs-core/src/pipeline/gguf_tokenizer.rs | 102 ++++-------------- 1 file changed, 23 insertions(+), 79 deletions(-) diff --git a/mistralrs-core/src/pipeline/gguf_tokenizer.rs b/mistralrs-core/src/pipeline/gguf_tokenizer.rs index 01e700463..697f0f320 100644 --- a/mistralrs-core/src/pipeline/gguf_tokenizer.rs +++ b/mistralrs-core/src/pipeline/gguf_tokenizer.rs @@ -1,14 +1,6 @@ -use std::collections::HashMap; - use anyhow::Result; use candle_core::quantized::gguf_file::Content; -use tokenizers::{ - decoders::{byte_fallback::ByteFallback, fuse::Fuse, sequence::Sequence, strip::Strip}, - models::bpe::BpeBuilder, - normalizers::{self, Prepend, Replace}, - processors::template::{self, Template, TemplateProcessing, Tokens}, - AddedToken, DecoderWrapper, ModelWrapper, NormalizerWrapper, Tokenizer, -}; +use tokenizers::{models::unigram::Unigram, ModelWrapper, Tokenizer}; use tracing::info; pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result { @@ -37,6 +29,14 @@ pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result { }) .collect::>() }); + let scores = content.metadata.get("tokenizer.ggml.scores").map(|items| { + items + .to_vec() + .expect("GGUF tokenizer scores is not a vec.") + .iter() + .map(|t| t.to_f32().expect("GGUF score is not a f32.")) + .collect::>() + }); let merges = content.metadata.get("tokenizer.ggml.merges").map(|items| { items .to_vec() @@ -47,91 +47,35 @@ pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result { }); info!( - "Converting GGML tokenizer. Model: `{model}`, num tokens: {}, num added tokens: {}, num merges: {}", + "Converting GGML tokenizer. Model: `{model}`, num tokens: {}, num added tokens: {}, num merges: {}, num scores: {}", tokens.len(), added_tokens.as_ref().map(|x| x.len()).unwrap_or(0), - merges.as_ref().map(|x| x.len()).unwrap_or(0) + merges.as_ref().map(|x| x.len()).unwrap_or(0), + scores.as_ref().map(|x| x.len()).unwrap_or(0) ); let unk = content.metadata["tokenizer.ggml.unknown_token_id"] .to_u32() .expect("GGUF unk token is not u32"); - let eos = content.metadata["tokenizer.ggml.eos_token_id"] + let _eos = content.metadata["tokenizer.ggml.eos_token_id"] .to_u32() .expect("GGUF unk token is not u32"); - let bos = content.metadata["tokenizer.ggml.bos_token_id"] + let _bos = content.metadata["tokenizer.ggml.bos_token_id"] .to_u32() .expect("GGUF unk token is not u32"); let tokenizer = match model.as_str() { - "llama" | "replit" | "gpt2" | "rwkv" => { - // BPE, as seen in relevant tokenizer.json files - let bpe_builder = BpeBuilder::new().unk_token(tokens[unk as usize].clone()); - info!("Loading as BPE tokenizer."); - - let mut vocab = HashMap::new(); - for (i, tok) in tokens.iter().enumerate() { - #[allow(clippy::cast_possible_truncation)] - vocab.insert(tok.clone(), i as u32); - } - let mut merges_vec = Vec::new(); - if let Some(merges) = merges { - for tok in merges { - let split = tok.splitn(2, ' ').collect::>(); - merges_vec.push((split[0].to_string(), split[1].to_string())); - } + "llama" => { + let scores = + scores.expect("Expect `tokenizer.ggml.scores` for `llama` unigram tokeizer."); + let mut vocab = Vec::new(); + for (token, score) in tokens.into_iter().zip(scores) { + vocab.push((token, score as f64)); } - let bpe = bpe_builder - .vocab_and_merges(vocab, merges_vec) - .fuse_unk(true) - .build() - .map_err(anyhow::Error::msg)?; - let mut tokenizer = Tokenizer::new(ModelWrapper::BPE(bpe)); - tokenizer.with_decoder(Sequence::new(vec![ - DecoderWrapper::Replace(Replace::new("▁", " ").map_err(anyhow::Error::msg)?), - DecoderWrapper::ByteFallback(ByteFallback::default()), - DecoderWrapper::Fuse(Fuse::new()), - DecoderWrapper::Strip(Strip::new(' ', 1, 0)), - ])); - if let Some(added_tokens) = added_tokens { - for added_token in added_tokens { - tokenizer.add_special_tokens(&[AddedToken::from(added_token, true)]); - } - } - tokenizer.add_special_tokens(&[AddedToken::from(tokens[bos as usize].clone(), true)]); - tokenizer.add_special_tokens(&[AddedToken::from(tokens[eos as usize].clone(), true)]); - tokenizer.add_special_tokens(&[AddedToken::from(tokens[unk as usize].clone(), true)]); - - tokenizer.with_post_processor( - TemplateProcessing::builder() - .special_tokens(Tokens::from(vec![template::SpecialToken::new( - tokens[bos as usize].clone(), - vec![bos], - vec![tokens[bos as usize].clone()], - ) - .map_err(anyhow::Error::msg)?])) - .pair( - Template::try_from(vec![ - tokens[bos as usize].clone(), - "$A".to_string(), - tokens[bos as usize].clone(), - "$B:1".to_string(), - ]) - .unwrap(), - ) - .single( - Template::try_from(vec![tokens[bos as usize].clone(), "$A".to_string()]) - .unwrap(), - ) - .build()?, - ); - tokenizer.with_normalizer(normalizers::Sequence::new(vec![ - NormalizerWrapper::Prepend(Prepend::new("▁".to_string())), - NormalizerWrapper::Replace(Replace::new(" ", "▁").map_err(anyhow::Error::msg)?), - ])); - info!("Decoder is: {:?}", tokenizer.get_decoder()); - tokenizer + let unigram = + Unigram::from(vocab, Some(unk as usize), true).map_err(anyhow::Error::msg)?; + Tokenizer::new(ModelWrapper::Unigram(unigram)) } other => { anyhow::bail!("Tokenizer model `{other}` not supported."); From 1e31df7835279c4a263b932c39aa2d3184fb75cb Mon Sep 17 00:00:00 2001 From: EricLBuehler Date: Tue, 28 May 2024 07:23:01 -0400 Subject: [PATCH 07/19] Logging --- mistralrs-core/src/pipeline/gguf_tokenizer.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/mistralrs-core/src/pipeline/gguf_tokenizer.rs b/mistralrs-core/src/pipeline/gguf_tokenizer.rs index 697f0f320..783b3fb16 100644 --- a/mistralrs-core/src/pipeline/gguf_tokenizer.rs +++ b/mistralrs-core/src/pipeline/gguf_tokenizer.rs @@ -81,5 +81,6 @@ pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result { anyhow::bail!("Tokenizer model `{other}` not supported."); } }; + info!("GGUF tokenizer model is `{model}`: {tokenizer:?}."); Ok(tokenizer) } From dd5a855b24dff1783fceba7b3707a603b2421b5f Mon Sep 17 00:00:00 2001 From: EricLBuehler Date: Tue, 28 May 2024 07:39:56 -0400 Subject: [PATCH 08/19] Implement for llama and replit --- mistralrs-core/src/pipeline/gguf_tokenizer.rs | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/mistralrs-core/src/pipeline/gguf_tokenizer.rs b/mistralrs-core/src/pipeline/gguf_tokenizer.rs index 783b3fb16..8e15911f9 100644 --- a/mistralrs-core/src/pipeline/gguf_tokenizer.rs +++ b/mistralrs-core/src/pipeline/gguf_tokenizer.rs @@ -1,6 +1,11 @@ use anyhow::Result; use candle_core::quantized::gguf_file::Content; -use tokenizers::{models::unigram::Unigram, ModelWrapper, Tokenizer}; +use tokenizers::{ + decoders::{byte_fallback::ByteFallback, sequence::Sequence, strip::Strip}, + models::unigram::Unigram, + normalizers::Replace, + DecoderWrapper, ModelWrapper, Tokenizer, +}; use tracing::info; pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result { @@ -66,7 +71,8 @@ pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result { .expect("GGUF unk token is not u32"); let tokenizer = match model.as_str() { - "llama" => { + "llama" | "replit" => { + // unigram let scores = scores.expect("Expect `tokenizer.ggml.scores` for `llama` unigram tokeizer."); let mut vocab = Vec::new(); @@ -75,7 +81,13 @@ pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result { } let unigram = Unigram::from(vocab, Some(unk as usize), true).map_err(anyhow::Error::msg)?; - Tokenizer::new(ModelWrapper::Unigram(unigram)) + let mut tokenizer = Tokenizer::new(ModelWrapper::Unigram(unigram)); + tokenizer.with_decoder(Sequence::new(vec![ + DecoderWrapper::Replace(Replace::new("▁", " ").map_err(anyhow::Error::msg)?), + DecoderWrapper::ByteFallback(ByteFallback::new()), + DecoderWrapper::Strip(Strip::new(' ', 1, 0)), + ])); + tokenizer } other => { anyhow::bail!("Tokenizer model `{other}` not supported."); From d68522cf25376f1e5c7de60bf17d54b549be14dd Mon Sep 17 00:00:00 2001 From: EricLBuehler Date: Tue, 28 May 2024 07:43:15 -0400 Subject: [PATCH 09/19] Better logging --- mistralrs-core/src/pipeline/gguf_tokenizer.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/mistralrs-core/src/pipeline/gguf_tokenizer.rs b/mistralrs-core/src/pipeline/gguf_tokenizer.rs index 8e15911f9..42fb9a40b 100644 --- a/mistralrs-core/src/pipeline/gguf_tokenizer.rs +++ b/mistralrs-core/src/pipeline/gguf_tokenizer.rs @@ -70,7 +70,7 @@ pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result { .to_u32() .expect("GGUF unk token is not u32"); - let tokenizer = match model.as_str() { + let (tokenizer, ty) = match model.as_str() { "llama" | "replit" => { // unigram let scores = @@ -87,12 +87,16 @@ pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result { DecoderWrapper::ByteFallback(ByteFallback::new()), DecoderWrapper::Strip(Strip::new(' ', 1, 0)), ])); - tokenizer + (tokenizer, "unigram") } other => { anyhow::bail!("Tokenizer model `{other}` not supported."); } }; - info!("GGUF tokenizer model is `{model}`: {tokenizer:?}."); + info!( + "GGUF tokenizer model is `{model}`, num vocab: {}, kind: `{}`", + tokenizer.get_vocab_size(true), + ty + ); Ok(tokenizer) } From d366d2aba0fa5ae1d8c96268480ec71db7d657de Mon Sep 17 00:00:00 2001 From: EricLBuehler Date: Tue, 28 May 2024 07:46:47 -0400 Subject: [PATCH 10/19] Nicer logging --- mistralrs-core/src/pipeline/gguf_tokenizer.rs | 21 ++++++++----------- 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/mistralrs-core/src/pipeline/gguf_tokenizer.rs b/mistralrs-core/src/pipeline/gguf_tokenizer.rs index 42fb9a40b..47eaef3ec 100644 --- a/mistralrs-core/src/pipeline/gguf_tokenizer.rs +++ b/mistralrs-core/src/pipeline/gguf_tokenizer.rs @@ -51,13 +51,6 @@ pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result { .collect::>() }); - info!( - "Converting GGML tokenizer. Model: `{model}`, num tokens: {}, num added tokens: {}, num merges: {}, num scores: {}", - tokens.len(), - added_tokens.as_ref().map(|x| x.len()).unwrap_or(0), - merges.as_ref().map(|x| x.len()).unwrap_or(0), - scores.as_ref().map(|x| x.len()).unwrap_or(0) - ); let unk = content.metadata["tokenizer.ggml.unknown_token_id"] .to_u32() .expect("GGUF unk token is not u32"); @@ -73,11 +66,12 @@ pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result { let (tokenizer, ty) = match model.as_str() { "llama" | "replit" => { // unigram - let scores = - scores.expect("Expect `tokenizer.ggml.scores` for `llama` unigram tokeizer."); + let scores = scores + .as_ref() + .expect("Expect `tokenizer.ggml.scores` for `llama` unigram tokeizer."); let mut vocab = Vec::new(); for (token, score) in tokens.into_iter().zip(scores) { - vocab.push((token, score as f64)); + vocab.push((token, *score as f64)); } let unigram = Unigram::from(vocab, Some(unk as usize), true).map_err(anyhow::Error::msg)?; @@ -94,9 +88,12 @@ pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result { } }; info!( - "GGUF tokenizer model is `{model}`, num vocab: {}, kind: `{}`", + "GGUF tokenizer model is `{model}`, kind: `{}`, num tokens: {}, num added tokens: {}, num merges: {}, num scores: {}", + ty, tokenizer.get_vocab_size(true), - ty + added_tokens.as_ref().map(|x| x.len()).unwrap_or(0), + merges.as_ref().map(|x| x.len()).unwrap_or(0), + scores.as_ref().map(|x| x.len()).unwrap_or(0) ); Ok(tokenizer) } From 3d416a7ffb50434d4ac2bb2b85c07315c4d56f6a Mon Sep 17 00:00:00 2001 From: EricLBuehler Date: Tue, 28 May 2024 07:47:30 -0400 Subject: [PATCH 11/19] Update for verbose mode --- mistralrs-core/src/pipeline/gguf_tokenizer.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/mistralrs-core/src/pipeline/gguf_tokenizer.rs b/mistralrs-core/src/pipeline/gguf_tokenizer.rs index 47eaef3ec..b693bb154 100644 --- a/mistralrs-core/src/pipeline/gguf_tokenizer.rs +++ b/mistralrs-core/src/pipeline/gguf_tokenizer.rs @@ -1,3 +1,5 @@ +use std::sync::atomic::Ordering; + use anyhow::Result; use candle_core::quantized::gguf_file::Content; use tokenizers::{ @@ -8,6 +10,8 @@ use tokenizers::{ }; use tracing::info; +use crate::DEBUG; + pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result { let model = content.metadata["tokenizer.ggml.model"] .to_string() @@ -95,5 +99,8 @@ pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result { merges.as_ref().map(|x| x.len()).unwrap_or(0), scores.as_ref().map(|x| x.len()).unwrap_or(0) ); + if DEBUG.load(Ordering::Relaxed) { + info!("Tokenizer: {tokenizer:?}"); + } Ok(tokenizer) } From 19cf0288392aa8e2587997f6a62afc3aaf045ae3 Mon Sep 17 00:00:00 2001 From: EricLBuehler Date: Tue, 28 May 2024 08:34:02 -0400 Subject: [PATCH 12/19] Allow fully local loading for gguf --- mistralrs-core/src/model_loader.rs | 6 -- mistralrs-core/src/model_selected.rs | 24 +++----- mistralrs-core/src/pipeline/gguf.rs | 18 +++--- mistralrs-core/src/pipeline/macros.rs | 83 +++++++++++++++++++++++++++ mistralrs-core/src/pipeline/mod.rs | 16 +++++- mistralrs-core/src/toml_selector.rs | 15 +++-- mistralrs-pyo3/API.md | 12 +++- mistralrs-pyo3/mistralrs.pyi | 3 - mistralrs-pyo3/src/lib.rs | 6 -- mistralrs-pyo3/src/which.rs | 3 - mistralrs/examples/quantized/main.rs | 1 - 11 files changed, 131 insertions(+), 56 deletions(-) diff --git a/mistralrs-core/src/model_loader.rs b/mistralrs-core/src/model_loader.rs index b7438b0f0..3ab381ad9 100644 --- a/mistralrs-core/src/model_loader.rs +++ b/mistralrs-core/src/model_loader.rs @@ -150,14 +150,12 @@ fn loader_from_model_selected(args: LoaderBuilder) -> anyhow::Result GGUFLoaderBuilder::new( GGUFSpecificConfig { repeat_last_n }, args.chat_template, - tokenizer_json, Some(tok_model_id), quantized_model_id, quantized_filename, @@ -165,7 +163,6 @@ fn loader_from_model_selected(args: LoaderBuilder) -> anyhow::Result anyhow::Result GGUFLoaderBuilder::new( GGUFSpecificConfig { repeat_last_n }, args.chat_template, - tokenizer_json, tok_model_id, quantized_model_id, quantized_filename, @@ -192,7 +188,6 @@ fn loader_from_model_selected(args: LoaderBuilder) -> anyhow::Result anyhow::Result GGUFLoaderBuilder::new( GGUFSpecificConfig { repeat_last_n }, args.chat_template, - tokenizer_json, tok_model_id, quantized_model_id, quantized_filename, diff --git a/mistralrs-core/src/model_selected.rs b/mistralrs-core/src/model_selected.rs index 6642c3f8f..1bf68939d 100644 --- a/mistralrs-core/src/model_selected.rs +++ b/mistralrs-core/src/model_selected.rs @@ -95,14 +95,12 @@ pub enum ModelSelected { /// Select a GGUF model. GGUF { - /// Model ID to load the tokenizer from. This may be a HF hub repo or a local path. + /// `tok_model_id` is the local or remote model ID where you can find a `tokenizer_config.json` file. + /// If the `chat_template` is specified, then it will be treated as a path and used over remote files, + /// removing all remote accesses. #[arg(short, long)] tok_model_id: String, - /// Path to local tokenizer.json file. If this is specified it is used over any remote file. - #[arg(long)] - tokenizer_json: Option, - /// Quantized model ID to find the `quantized_filename`, only applicable if `quantized` is set. /// This may be a HF hub repo or a local path. #[arg(short = 'm', long)] @@ -119,14 +117,12 @@ pub enum ModelSelected { /// Select a GGUF model with X-LoRA. XLoraGGUF { - /// Model ID to load the tokenizer from. This may be a HF hub repo or a local path. + /// `tok_model_id` is the local or remote model ID where you can find a `tokenizer_config.json` file. + /// If the `chat_template` is specified, then it will be treated as a path and used over remote files, + /// removing all remote accesses. #[arg(short, long)] tok_model_id: Option, - /// Path to local tokenizer.json file. If this is specified it is used over any remote file. - #[arg(long)] - tokenizer_json: Option, - /// Quantized model ID to find the `quantized_filename`, only applicable if `quantized` is set. /// This may be a HF hub repo or a local path. #[arg(short = 'm', long)] @@ -156,14 +152,12 @@ pub enum ModelSelected { /// Select a GGUF model with LoRA. LoraGGUF { - /// Model ID to load the tokenizer from. This may be a HF hub repo or a local path. + /// `tok_model_id` is the local or remote model ID where you can find a `tokenizer_config.json` file. + /// If the `chat_template` is specified, then it will be treated as a path and used over remote files, + /// removing all remote accesses. #[arg(short, long)] tok_model_id: Option, - /// Path to local tokenizer.json file. If this is specified it is used over any remote file. - #[arg(long)] - tokenizer_json: Option, - /// Quantized model ID to find the `quantized_filename`, only applicable if `quantized` is set. /// This may be a HF hub repo or a local path. #[arg(short = 'm', long)] diff --git a/mistralrs-core/src/pipeline/gguf.rs b/mistralrs-core/src/pipeline/gguf.rs index 6e104836c..ae3bb9dca 100644 --- a/mistralrs-core/src/pipeline/gguf.rs +++ b/mistralrs-core/src/pipeline/gguf.rs @@ -14,7 +14,7 @@ use crate::prefix_cacher::PrefixCacheManager; use crate::sequence::Sequence; use crate::utils::varbuilder_utils::{from_mmaped_safetensors, load_preload_adapters}; use crate::xlora_models::NonGranularState; -use crate::{do_sample, get_mut_arcmutex, get_paths, DeviceMapMetadata, DEBUG}; +use crate::{do_sample, get_mut_arcmutex, get_paths_gguf, DeviceMapMetadata, DEBUG}; use crate::{ models::quantized_llama::ModelWeights as QLlama, models::quantized_phi2::ModelWeights as QPhi, @@ -69,7 +69,6 @@ pub struct GGUFLoader { xlora_order: Option, no_kv_cache: bool, chat_template: Option, - tokenizer_json: Option, kind: ModelKind, tgt_non_granular_index: Option, } @@ -119,24 +118,24 @@ pub struct GGUFLoaderBuilder { xlora_order: Option, no_kv_cache: bool, chat_template: Option, - tokenizer_json: Option, tgt_non_granular_index: Option, } impl GGUFLoaderBuilder { + /// Create a loader builder for a GGUF model. `tok_model_id` is the model ID where you can find a + /// `tokenizer_config.json` file. If the `chat_template` is specified, then it will be treated as a + /// path and used over remote files, removing all remote accesses. pub fn new( config: GGUFSpecificConfig, chat_template: Option, - tokenizer_json: Option, - model_id: Option, + tok_model_id: Option, quantized_model_id: String, quantized_filename: String, ) -> Self { Self { config, chat_template, - tokenizer_json, - model_id, + model_id: tok_model_id, kind: ModelKind::QuantizedGGUF, quantized_filename, quantized_model_id, @@ -197,7 +196,6 @@ impl GGUFLoaderBuilder { xlora_order: self.xlora_order, no_kv_cache: self.no_kv_cache, chat_template: self.chat_template, - tokenizer_json: self.tokenizer_json, tgt_non_granular_index: self.tgt_non_granular_index, quantized_filename: Some(self.quantized_filename), quantized_model_id: Some(self.quantized_model_id), @@ -217,7 +215,6 @@ impl GGUFLoader { xlora_order: Option, no_kv_cache: bool, chat_template: Option, - tokenizer_json: Option, tgt_non_granular_index: Option, ) -> Self { let model_id = if let Some(id) = model_id { @@ -238,7 +235,6 @@ impl GGUFLoader { xlora_order, no_kv_cache, chat_template, - tokenizer_json, kind, tgt_non_granular_index, } @@ -279,7 +275,7 @@ impl Loader for GGUFLoader { mapper: DeviceMapMetadata, in_situ_quant: Option, ) -> Result>> { - let paths: anyhow::Result> = get_paths!( + let paths: anyhow::Result> = get_paths_gguf!( LocalModelPaths, &token_source, revision, diff --git a/mistralrs-core/src/pipeline/macros.rs b/mistralrs-core/src/pipeline/macros.rs index 25068ccad..7f8f663d5 100644 --- a/mistralrs-core/src/pipeline/macros.rs +++ b/mistralrs-core/src/pipeline/macros.rs @@ -138,6 +138,89 @@ macro_rules! get_paths { }}; } +#[macro_export] +macro_rules! get_paths_gguf { + ($path_name:ident, $token_source:expr, $revision:expr, $this:expr, $quantized_model_id:expr, $quantized_filename:expr, $silent:expr) => {{ + let api = ApiBuilder::new() + .with_progress(!$silent) + .with_token(get_token($token_source)?) + .build()?; + let revision = $revision.unwrap_or("main".to_string()); + let api = api.repo(Repo::with_revision( + $this.model_id.clone(), + RepoType::Model, + revision.clone(), + )); + let model_id = std::path::Path::new(&$this.model_id); + + let chat_template = if let Some(ref p) = $this.chat_template { + if p.ends_with(".json") { + info!("Using chat template file at `{p}`"); + PathBuf::from_str(p)? + } else { + PathBuf::from_str("")? + } + } else { + $crate::api_get_file!( + api, + "tokenizer_config.json", + model_id + ) // Will be loaded from inside gguf file + }; + + let filenames = get_model_paths( + revision.clone(), + &$token_source, + &$quantized_model_id, + &$quantized_filename, + &api, + &model_id, + )?; + + let XLoraPaths { + adapter_configs, + adapter_safetensors, + classifier_path, + xlora_order, + xlora_config, + lora_preload_adapter_info, + } = get_xlora_paths( + $this.model_id.clone(), + &$this.xlora_model_id, + &$token_source, + revision.clone(), + &$this.xlora_order, + )?; + + let gen_conf = if $crate::api_dir_list!(api, model_id) + .collect::>() + .contains(&"generation_config.json".to_string()) + { + Some($crate::api_get_file!( + api, + "generation_config.json", + model_id + )) + } else { + None + }; + + Ok(Box::new($path_name { + tokenizer_filename: PathBuf::from_str("")?, + config_filename: PathBuf::from_str("")?, + filenames, + xlora_adapter_configs: adapter_configs, + xlora_adapter_filenames: adapter_safetensors, + classifier_path, + classifier_config: xlora_config, + xlora_ordering: xlora_order, + template_filename: chat_template, + gen_conf, + lora_preload_adapter_info, + })) + }}; +} + #[macro_export] macro_rules! normal_model_loader { ($paths:expr, $dtype:expr, $default_dtype:expr, $device:expr, $config:expr, $loader:expr, $use_flash_attn:expr, $silent:expr, $mapper:expr, $loading_isq:expr, $real_device:expr) => {{ diff --git a/mistralrs-core/src/pipeline/mod.rs b/mistralrs-core/src/pipeline/mod.rs index 68b94bb08..d06b91af7 100644 --- a/mistralrs-core/src/pipeline/mod.rs +++ b/mistralrs-core/src/pipeline/mod.rs @@ -1298,8 +1298,20 @@ pub(crate) fn get_chat_template( paths: &Box, chat_template: &Option, ) -> ChatTemplate { + let template_filename = if paths.get_template_filename().to_string_lossy().is_empty() { + PathBuf::from( + chat_template + .as_ref() + .expect("A tokenizer config or chat template file path must be specified."), + ) + } else { + paths.get_template_filename().clone() + }; + if !template_filename.ends_with(".json") { + panic!("Template filename {template_filename:?} must end with `.json`."); + } let template: ChatTemplate = - serde_json::from_str(&fs::read_to_string(paths.get_template_filename()).unwrap()).unwrap(); + serde_json::from_str(&fs::read_to_string(&template_filename).unwrap()).unwrap(); #[derive(Debug, serde::Deserialize)] struct SpecifiedTemplate { @@ -1314,7 +1326,7 @@ pub(crate) fn get_chat_template( info!("`tokenizer_config.json` does not contain a chat template, attempting to use specified JINJA chat template."); let mut deser: HashMap = - serde_json::from_str(&fs::read_to_string(paths.get_template_filename()).unwrap()).unwrap(); + serde_json::from_str(&fs::read_to_string(&template_filename).unwrap()).unwrap(); match chat_template.clone() { Some(t) => { diff --git a/mistralrs-core/src/toml_selector.rs b/mistralrs-core/src/toml_selector.rs index 5bf67276c..478d940eb 100644 --- a/mistralrs-core/src/toml_selector.rs +++ b/mistralrs-core/src/toml_selector.rs @@ -65,7 +65,9 @@ enum TomlModelSelected { /// Select a GGUF model. #[allow(clippy::upper_case_acronyms)] GGUF { - /// Model ID to load the tokenizer from. This may be a HF hub repo or a local path. + /// `tok_model_id` is the local or remote model ID where you can find a `tokenizer_config.json` file. + /// If the `chat_template` is specified, then it will be treated as a path and used over remote files, + /// removing all remote accesses. tok_model_id: String, /// Quantized model ID to find the `quantized_filename`, only applicable if `quantized` is set. @@ -78,7 +80,9 @@ enum TomlModelSelected { /// Select a GGUF model with X-LoRA. XLoraGGUF { - /// Model ID to load the tokenizer from. This may be a HF hub repo or a local path. + /// `tok_model_id` is the local or remote model ID where you can find a `tokenizer_config.json` file. + /// If the `chat_template` is specified, then it will be treated as a path and used over remote files, + /// removing all remote accesses. tok_model_id: Option, /// Quantized model ID to find the `quantized_filename`, only applicable if `quantized` is set. @@ -101,7 +105,9 @@ enum TomlModelSelected { /// Select a GGUF model with LoRA. LoraGGUF { - /// Model ID to load the tokenizer from. This may be a HF hub repo or a local path. + /// `tok_model_id` is the local or remote model ID where you can find a `tokenizer_config.json` file. + /// If the `chat_template` is specified, then it will be treated as a path and used over remote files, + /// removing all remote accesses. tok_model_id: Option, /// Quantized model ID to find the `quantized_filename`, only applicable if `quantized` is set. @@ -299,7 +305,6 @@ fn loader_from_selected( repeat_last_n: args.repeat_last_n, }, args.chat_template, - args.tokenizer_json, Some(tok_model_id), quantized_model_id, quantized_filename, @@ -317,7 +322,6 @@ fn loader_from_selected( repeat_last_n: args.repeat_last_n, }, args.chat_template, - args.tokenizer_json, tok_model_id, quantized_model_id, quantized_filename, @@ -343,7 +347,6 @@ fn loader_from_selected( repeat_last_n: args.repeat_last_n, }, args.chat_template, - args.tokenizer_json, tok_model_id, quantized_model_id, quantized_filename, diff --git a/mistralrs-pyo3/API.md b/mistralrs-pyo3/API.md index 7d0387348..359ac00e8 100644 --- a/mistralrs-pyo3/API.md +++ b/mistralrs-pyo3/API.md @@ -22,11 +22,13 @@ Additionally, for models without quantization, the model architecture should be ```py class Which(Enum): + @dataclass class Plain: model_id: str arch: Architecture tokenizer_json: str | None = None repeat_last_n: int = 64 + @dataclass class XLora: arch: Architecture xlora_model_id: str @@ -35,6 +37,7 @@ class Which(Enum): model_id: str | None = None tokenizer_json: str | None = None repeat_last_n: int = 64 + @dataclass class Lora: arch: Architecture adapters_model_id: str @@ -42,12 +45,13 @@ class Which(Enum): model_id: str | None = None tokenizer_json: str | None = None repeat_last_n: int = 64 + @dataclass class GGUF: tok_model_id: str quantized_model_id: str quantized_filename: str - tokenizer_json: str | None = None repeat_last_n: int = 64 + @dataclass class XLoraGGUF: tok_model_id: str quantized_model_id: str @@ -55,22 +59,23 @@ class Which(Enum): xlora_model_id: str order: str tgt_non_granular_index: int | None = None - tokenizer_json: str | None = None repeat_last_n: int = 64 + @dataclass class LoraGGUF: tok_model_id: str quantized_model_id: str quantized_filename: str adapters_model_id: str order: str - tokenizer_json: str | None = None repeat_last_n: int = 64 + @dataclass class GGML: tok_model_id: str quantized_model_id: str quantized_filename: str tokenizer_json: str | None = None repeat_last_n: int = 64 + @dataclass class XLoraGGML: tok_model_id: str quantized_model_id: str @@ -80,6 +85,7 @@ class Which(Enum): tgt_non_granular_index: int | None = None tokenizer_json: str | None = None repeat_last_n: int = 64 + @dataclass class LoraGGML: tok_model_id: str quantized_model_id: str diff --git a/mistralrs-pyo3/mistralrs.pyi b/mistralrs-pyo3/mistralrs.pyi index a1239a557..f1d7c46c7 100644 --- a/mistralrs-pyo3/mistralrs.pyi +++ b/mistralrs-pyo3/mistralrs.pyi @@ -96,7 +96,6 @@ class Which(Enum): tok_model_id: str quantized_model_id: str quantized_filename: str - tokenizer_json: str | None = None repeat_last_n: int = 64 @dataclass class XLoraGGUF: @@ -106,7 +105,6 @@ class Which(Enum): xlora_model_id: str order: str tgt_non_granular_index: int | None = None - tokenizer_json: str | None = None repeat_last_n: int = 64 @dataclass class LoraGGUF: @@ -115,7 +113,6 @@ class Which(Enum): quantized_filename: str adapters_model_id: str order: str - tokenizer_json: str | None = None repeat_last_n: int = 64 @dataclass class GGML: diff --git a/mistralrs-pyo3/src/lib.rs b/mistralrs-pyo3/src/lib.rs index b1e5f8a83..aa61d5d7f 100644 --- a/mistralrs-pyo3/src/lib.rs +++ b/mistralrs-pyo3/src/lib.rs @@ -167,7 +167,6 @@ fn parse_which( .build(arch.into()), Which::GGUF { tok_model_id, - tokenizer_json, quantized_model_id, quantized_filename, repeat_last_n, @@ -176,7 +175,6 @@ fn parse_which( repeat_last_n: repeat_last_n.unwrap_or(REPEAT_LAST_N_DEFAULT), }, chat_template, - tokenizer_json, Some(tok_model_id), quantized_model_id, quantized_filename, @@ -184,7 +182,6 @@ fn parse_which( .build(), Which::XLoraGGUF { tok_model_id, - tokenizer_json, quantized_model_id, quantized_filename, repeat_last_n, @@ -196,7 +193,6 @@ fn parse_which( repeat_last_n: repeat_last_n.unwrap_or(REPEAT_LAST_N_DEFAULT), }, chat_template, - tokenizer_json, tok_model_id, quantized_model_id, quantized_filename, @@ -214,7 +210,6 @@ fn parse_which( .build(), Which::LoraGGUF { tok_model_id, - tokenizer_json, quantized_model_id, quantized_filename, repeat_last_n, @@ -225,7 +220,6 @@ fn parse_which( repeat_last_n: repeat_last_n.unwrap_or(REPEAT_LAST_N_DEFAULT), }, chat_template, - tokenizer_json, tok_model_id, quantized_model_id, quantized_filename, diff --git a/mistralrs-pyo3/src/which.rs b/mistralrs-pyo3/src/which.rs index f7def2cfb..98bce20d8 100644 --- a/mistralrs-pyo3/src/which.rs +++ b/mistralrs-pyo3/src/which.rs @@ -57,7 +57,6 @@ pub enum Which { #[allow(clippy::upper_case_acronyms)] GGUF { tok_model_id: String, - tokenizer_json: Option, quantized_model_id: String, quantized_filename: String, repeat_last_n: Option, @@ -65,7 +64,6 @@ pub enum Which { XLoraGGUF { tok_model_id: Option, - tokenizer_json: Option, quantized_model_id: String, quantized_filename: String, repeat_last_n: Option, @@ -76,7 +74,6 @@ pub enum Which { LoraGGUF { tok_model_id: Option, - tokenizer_json: Option, quantized_model_id: String, quantized_filename: String, repeat_last_n: Option, diff --git a/mistralrs/examples/quantized/main.rs b/mistralrs/examples/quantized/main.rs index 37f60ef01..58f1ac92b 100644 --- a/mistralrs/examples/quantized/main.rs +++ b/mistralrs/examples/quantized/main.rs @@ -12,7 +12,6 @@ fn setup() -> anyhow::Result> { let loader = GGUFLoaderBuilder::new( GGUFSpecificConfig { repeat_last_n: 64 }, None, - None, Some("mistralai/Mistral-7B-Instruct-v0.1".to_string()), "TheBloke/Mistral-7B-Instruct-v0.1-GGUF".to_string(), "mistral-7b-instruct-v0.1.Q4_K_M.gguf".to_string(), From d8831239b62d93597e234a2ef9ab2e9c6ebd5ab0 Mon Sep 17 00:00:00 2001 From: EricLBuehler Date: Tue, 28 May 2024 08:38:28 -0400 Subject: [PATCH 13/19] Update docs for loading --- README.md | 10 ++++++---- mistralrs-core/src/model_loader.rs | 2 +- mistralrs-core/src/model_selected.rs | 2 +- mistralrs-pyo3/src/lib.rs | 2 +- mistralrs-pyo3/src/which.rs | 2 +- 5 files changed, 10 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 256e5d369..6bc804e6b 100644 --- a/README.md +++ b/README.md @@ -240,16 +240,18 @@ This is passed in the following ways: If token cannot be loaded, no token will be used (i.e. effectively using `none`). -## Loading models from local files:** +## Loading models from local files: -You can also instruct mistral.rs to load models locally by modifying the `*_model_id` arguments or options: +You can also instruct mistral.rs to load models fully locally by modifying the `*_model_id` arguments or options: ```bash ./mistralrs_server --port 1234 plain -m . -a mistral ``` -or + +To run GGUF models fully locally, you do not need to specify the tokenizer model ID argument and instead should pass a path to the +chat template JSON file (examples [here](chat_templates)) as well as specifying a local model ID. For example: ```bash -./mistralrs-server gguf -m . -t . -f Phi-3-mini-128k-instruct-q4_K_M.gguf +./mistralrs-server --chat-template gguf -m . -f Phi-3-mini-128k-instruct-q4_K_M.gguf ``` Throughout mistral.rs, any model ID argument or option may be a local path and should contain the following files for each model ID option: diff --git a/mistralrs-core/src/model_loader.rs b/mistralrs-core/src/model_loader.rs index 3ab381ad9..3d61eb62c 100644 --- a/mistralrs-core/src/model_loader.rs +++ b/mistralrs-core/src/model_loader.rs @@ -156,7 +156,7 @@ fn loader_from_model_selected(args: LoaderBuilder) -> anyhow::Result GGUFLoaderBuilder::new( GGUFSpecificConfig { repeat_last_n }, args.chat_template, - Some(tok_model_id), + tok_model_id, quantized_model_id, quantized_filename, ) diff --git a/mistralrs-core/src/model_selected.rs b/mistralrs-core/src/model_selected.rs index 1bf68939d..a9ed08e5d 100644 --- a/mistralrs-core/src/model_selected.rs +++ b/mistralrs-core/src/model_selected.rs @@ -99,7 +99,7 @@ pub enum ModelSelected { /// If the `chat_template` is specified, then it will be treated as a path and used over remote files, /// removing all remote accesses. #[arg(short, long)] - tok_model_id: String, + tok_model_id: Option, /// Quantized model ID to find the `quantized_filename`, only applicable if `quantized` is set. /// This may be a HF hub repo or a local path. diff --git a/mistralrs-pyo3/src/lib.rs b/mistralrs-pyo3/src/lib.rs index aa61d5d7f..ae0ec9d3b 100644 --- a/mistralrs-pyo3/src/lib.rs +++ b/mistralrs-pyo3/src/lib.rs @@ -175,7 +175,7 @@ fn parse_which( repeat_last_n: repeat_last_n.unwrap_or(REPEAT_LAST_N_DEFAULT), }, chat_template, - Some(tok_model_id), + tok_model_id, quantized_model_id, quantized_filename, ) diff --git a/mistralrs-pyo3/src/which.rs b/mistralrs-pyo3/src/which.rs index 98bce20d8..a5a33a612 100644 --- a/mistralrs-pyo3/src/which.rs +++ b/mistralrs-pyo3/src/which.rs @@ -56,7 +56,7 @@ pub enum Which { #[allow(clippy::upper_case_acronyms)] GGUF { - tok_model_id: String, + tok_model_id: Option, quantized_model_id: String, quantized_filename: String, repeat_last_n: Option, From bf308d476e13e3ca5f80e9dcf0af38f7c0cca905 Mon Sep 17 00:00:00 2001 From: EricLBuehler Date: Tue, 28 May 2024 12:46:30 -0400 Subject: [PATCH 14/19] Fix extension checking --- mistralrs-core/src/pipeline/mod.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/mistralrs-core/src/pipeline/mod.rs b/mistralrs-core/src/pipeline/mod.rs index d06b91af7..9d7dbee83 100644 --- a/mistralrs-core/src/pipeline/mod.rs +++ b/mistralrs-core/src/pipeline/mod.rs @@ -1307,7 +1307,12 @@ pub(crate) fn get_chat_template( } else { paths.get_template_filename().clone() }; - if !template_filename.ends_with(".json") { + if template_filename + .extension() + .expect("Template filename must be a file") + .to_string_lossy() + != "json" + { panic!("Template filename {template_filename:?} must end with `.json`."); } let template: ChatTemplate = From ec4ccb9ac31e8240e301aaf0321566b80555425f Mon Sep 17 00:00:00 2001 From: EricLBuehler Date: Tue, 28 May 2024 14:07:46 -0400 Subject: [PATCH 15/19] Add some tests --- mistralrs-core/Cargo.toml | 1 + mistralrs-core/src/pipeline/gguf_tokenizer.rs | 165 +++++++++++++++++- mistralrs-core/src/sampler.rs | 6 +- 3 files changed, 159 insertions(+), 13 deletions(-) diff --git a/mistralrs-core/Cargo.toml b/mistralrs-core/Cargo.toml index 2ba047587..9d4c9c199 100644 --- a/mistralrs-core/Cargo.toml +++ b/mistralrs-core/Cargo.toml @@ -56,6 +56,7 @@ toml = "0.8.12" strum = { version = "0.26", features = ["derive"] } derive_more = { version = "0.99.17", default-features = false, features = ["from"] } tracing-subscriber.workspace = true +reqwest = { version = "0.12.4", features = ["blocking"] } [features] pyo3_macros = ["pyo3"] diff --git a/mistralrs-core/src/pipeline/gguf_tokenizer.rs b/mistralrs-core/src/pipeline/gguf_tokenizer.rs index b693bb154..3bb97fa9b 100644 --- a/mistralrs-core/src/pipeline/gguf_tokenizer.rs +++ b/mistralrs-core/src/pipeline/gguf_tokenizer.rs @@ -3,10 +3,10 @@ use std::sync::atomic::Ordering; use anyhow::Result; use candle_core::quantized::gguf_file::Content; use tokenizers::{ - decoders::{byte_fallback::ByteFallback, sequence::Sequence, strip::Strip}, + decoders::{self, byte_fallback::ByteFallback, fuse::Fuse, strip::Strip}, models::unigram::Unigram, - normalizers::Replace, - DecoderWrapper, ModelWrapper, Tokenizer, + normalizers::{self, Prepend, Replace}, + AddedToken, DecoderWrapper, ModelWrapper, NormalizerWrapper, Tokenizer, }; use tracing::info; @@ -59,11 +59,11 @@ pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result { .to_u32() .expect("GGUF unk token is not u32"); - let _eos = content.metadata["tokenizer.ggml.eos_token_id"] + let eos = content.metadata["tokenizer.ggml.eos_token_id"] .to_u32() .expect("GGUF unk token is not u32"); - let _bos = content.metadata["tokenizer.ggml.bos_token_id"] + let bos = content.metadata["tokenizer.ggml.bos_token_id"] .to_u32() .expect("GGUF unk token is not u32"); @@ -74,17 +74,27 @@ pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result { .as_ref() .expect("Expect `tokenizer.ggml.scores` for `llama` unigram tokeizer."); let mut vocab = Vec::new(); - for (token, score) in tokens.into_iter().zip(scores) { - vocab.push((token, *score as f64)); + for (token, score) in tokens.iter().zip(scores) { + vocab.push((token.clone(), *score as f64)); } let unigram = Unigram::from(vocab, Some(unk as usize), true).map_err(anyhow::Error::msg)?; let mut tokenizer = Tokenizer::new(ModelWrapper::Unigram(unigram)); - tokenizer.with_decoder(Sequence::new(vec![ + tokenizer.with_decoder(decoders::sequence::Sequence::new(vec![ DecoderWrapper::Replace(Replace::new("▁", " ").map_err(anyhow::Error::msg)?), DecoderWrapper::ByteFallback(ByteFallback::new()), + DecoderWrapper::Fuse(Fuse::new()), DecoderWrapper::Strip(Strip::new(' ', 1, 0)), ])); + tokenizer.with_normalizer(normalizers::Sequence::new(vec![ + NormalizerWrapper::Prepend(Prepend::new("▁".to_string())), + NormalizerWrapper::Replace(Replace::new(" ", "▁").map_err(anyhow::Error::msg)?), + ])); + + tokenizer.add_special_tokens(&[AddedToken::from(tokens[bos as usize].clone(), true)]); + tokenizer.add_special_tokens(&[AddedToken::from(tokens[eos as usize].clone(), true)]); + tokenizer.add_special_tokens(&[AddedToken::from(tokens[unk as usize].clone(), true)]); + (tokenizer, "unigram") } other => { @@ -104,3 +114,142 @@ pub fn convert_ggml_to_hf_tokenizer(content: &Content) -> Result { } Ok(tokenizer) } + +mod tests { + use anyhow::Result; + use candle_core::quantized::gguf_file::Content; + use hf_hub::{api::sync::ApiBuilder, Repo, RepoType}; + use tokenizers::Tokenizer; + + use super::convert_ggml_to_hf_tokenizer; + + #[allow(dead_code)] + #[derive(Debug)] + enum TokenizerType { + /// Mistral v0.1 tokenizer + Llama, + Replit, + Gpt2, + Rwkv, + } + + #[allow(dead_code)] + fn get_gguf_tokenizer(tokenizer: TokenizerType) -> Result { + match tokenizer { + TokenizerType::Llama => { + let api = ApiBuilder::new().with_progress(true).build().unwrap(); + let api = api.repo(Repo::with_revision( + "TheBloke/Mistral-7B-Instruct-v0.1-GGUF".to_string(), + RepoType::Model, + "main".to_string(), + )); + + let filename = api.get("mistral-7b-instruct-v0.1.Q2_K.gguf").unwrap(); + let mut file = std::fs::File::open(&filename)?; + convert_ggml_to_hf_tokenizer( + &Content::read(&mut file) + .map_err(|e| e.with_path(filename)) + .map_err(anyhow::Error::msg)?, + ) + .map_err(anyhow::Error::msg) + } + other => anyhow::bail!("Cannot get testing HF tokenizer for type {other:?}"), + } + } + + #[allow(dead_code)] + fn get_hf_tokenizer(tokenizer: TokenizerType) -> Result { + match tokenizer { + TokenizerType::Llama => { + let api = ApiBuilder::new().with_progress(true).build().unwrap(); + let api = api.repo(Repo::with_revision( + "EricB/mistralrs_tests".to_string(), + RepoType::Model, + "main".to_string(), + )); + + let tokenizer_filename = api.get("tokenizer.json").unwrap(); + Ok(Tokenizer::from_file(tokenizer_filename).unwrap()) + } + other => anyhow::bail!("Cannot get testing HF tokenizer for type {other:?}"), + } + } + + #[allow(dead_code)] + fn get_test_passage() -> String { + let passage = reqwest::blocking::get("https://loripsum.net/api") + .expect("Failed to download sample text") + .bytes() + .expect("Failed to get bytes"); + String::from_utf8(passage.to_vec()).expect("Failed to convert sample text to string.") + } + + #[test] + fn test_encode_llama() -> Result<()> { + let passage = get_test_passage(); + let hf_tokenizer = get_hf_tokenizer(TokenizerType::Llama)?; + let gguf_tokenizer = get_gguf_tokenizer(TokenizerType::Llama)?; + + // Without special tokens + let hf_tokenized = hf_tokenizer + .encode(passage.as_str(), false) + .map_err(anyhow::Error::msg)?; + let gguf_tokenized = gguf_tokenizer + .encode(passage.as_str(), false) + .map_err(anyhow::Error::msg)?; + let hf_decoded = hf_tokenizer + .decode(hf_tokenized.get_ids(), false) + .map_err(anyhow::Error::msg)?; + let gguf_decoded = gguf_tokenizer + .decode(gguf_tokenized.get_ids(), false) + .map_err(anyhow::Error::msg)?; + assert_eq!(hf_decoded, gguf_decoded); + + // With special tokens + let hf_tokenized = hf_tokenizer + .encode(passage.as_str(), true) + .map_err(anyhow::Error::msg)?; + let gguf_tokenized = gguf_tokenizer + .encode(passage.as_str(), true) + .map_err(anyhow::Error::msg)?; + let hf_decoded = hf_tokenizer + .decode(hf_tokenized.get_ids(), true) + .map_err(anyhow::Error::msg)?; + let gguf_decoded = gguf_tokenizer + .decode(gguf_tokenized.get_ids(), true) + .map_err(anyhow::Error::msg)?; + assert_eq!(hf_decoded, gguf_decoded); + Ok(()) + } + + #[test] + fn test_decode() -> Result<()> { + use rand::seq::SliceRandom; + use rand::thread_rng; + + let hf_tokenizer = get_hf_tokenizer(TokenizerType::Llama)?; + let gguf_tokenizer = get_gguf_tokenizer(TokenizerType::Llama)?; + + let mut tokens = (0..hf_tokenizer.get_vocab_size(false) as u32).collect::>(); + tokens.shuffle(&mut thread_rng()); + + // Without skipping special tokens + let hf_decoded = hf_tokenizer + .decode(&tokens, false) + .map_err(anyhow::Error::msg)?; + let gguf_decoded = gguf_tokenizer + .decode(&tokens, false) + .map_err(anyhow::Error::msg)?; + assert_eq!(hf_decoded, gguf_decoded); + + // With skipping special tokens + let hf_decoded = hf_tokenizer + .decode(&tokens, true) + .map_err(anyhow::Error::msg)?; + let gguf_decoded = gguf_tokenizer + .decode(&tokens, true) + .map_err(anyhow::Error::msg)?; + assert_eq!(hf_decoded, gguf_decoded); + Ok(()) + } +} diff --git a/mistralrs-core/src/sampler.rs b/mistralrs-core/src/sampler.rs index 520b139f0..a8da56c10 100644 --- a/mistralrs-core/src/sampler.rs +++ b/mistralrs-core/src/sampler.rs @@ -413,11 +413,7 @@ mod tests { #[allow(dead_code)] fn get_tokenizer() -> Tokenizer { - let api = ApiBuilder::new() - .with_progress(true) - .with_token(Some(std::env::var("TESTS_HF_TOKEN").unwrap())) - .build() - .unwrap(); + let api = ApiBuilder::new().with_progress(true).build().unwrap(); let api = api.repo(Repo::with_revision( "EricB/mistralrs_tests".to_string(), RepoType::Model, From e0551d3fdca1704fbdf6372565666af3e06b1427 Mon Sep 17 00:00:00 2001 From: EricLBuehler Date: Tue, 28 May 2024 14:21:06 -0400 Subject: [PATCH 16/19] Update test --- mistralrs-core/src/pipeline/gguf_tokenizer.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/mistralrs-core/src/pipeline/gguf_tokenizer.rs b/mistralrs-core/src/pipeline/gguf_tokenizer.rs index 3bb97fa9b..5d6e64498 100644 --- a/mistralrs-core/src/pipeline/gguf_tokenizer.rs +++ b/mistralrs-core/src/pipeline/gguf_tokenizer.rs @@ -230,6 +230,7 @@ mod tests { let hf_tokenizer = get_hf_tokenizer(TokenizerType::Llama)?; let gguf_tokenizer = get_gguf_tokenizer(TokenizerType::Llama)?; + #[allow(clippy::cast_possible_truncation)] let mut tokens = (0..hf_tokenizer.get_vocab_size(false) as u32).collect::>(); tokens.shuffle(&mut thread_rng()); From 6c832d15dc70202736b88e060108080f79bce972 Mon Sep 17 00:00:00 2001 From: EricLBuehler Date: Tue, 28 May 2024 14:27:25 -0400 Subject: [PATCH 17/19] Update docs --- README.md | 22 +++++++++++++------ mistralrs-core/src/pipeline/gguf_tokenizer.rs | 2 +- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 6bc804e6b..55edf1342 100644 --- a/README.md +++ b/README.md @@ -247,13 +247,6 @@ You can also instruct mistral.rs to load models fully locally by modifying the ` ./mistralrs_server --port 1234 plain -m . -a mistral ``` -To run GGUF models fully locally, you do not need to specify the tokenizer model ID argument and instead should pass a path to the -chat template JSON file (examples [here](chat_templates)) as well as specifying a local model ID. For example: - -```bash -./mistralrs-server --chat-template gguf -m . -f Phi-3-mini-128k-instruct-q4_K_M.gguf -``` - Throughout mistral.rs, any model ID argument or option may be a local path and should contain the following files for each model ID option: - `--model-id` (server) or `model_id` (python/rust) or `--tok-model-id` (server) or `tok_model_id` (python/rust): - `config.json` @@ -269,6 +262,21 @@ Throughout mistral.rs, any model ID argument or option may be a local path and s - `--adapters-model-id` (server) or `adapters_model_id` (python/rust): - Adapters `.safetensors` and `adapter_config.json` files in their respective directories +## Running GGUF models locally + +To run GGUF models fully locally, you do not need to specify the tokenizer model ID argument and instead should pass a path to the +chat template JSON file (examples [here](chat_templates)) as well as specifying a local model ID. For example: + +```bash +./mistralrs-server --chat-template gguf -m . -f Phi-3-mini-128k-instruct-q4_K_M.gguf +``` + +The following tokenizer model types are currently supported. If you would like one to be added, please raise an issue. Otherwise, +please consider using the method demonstrated in examples below, where the tokenizer is sourced from Hugging Face. + +**Supported GGUF tokenizer types** +- `llama` + ### Run To start a server serving Mistral GGUF on `localhost:1234`, diff --git a/mistralrs-core/src/pipeline/gguf_tokenizer.rs b/mistralrs-core/src/pipeline/gguf_tokenizer.rs index 5d6e64498..1a8333616 100644 --- a/mistralrs-core/src/pipeline/gguf_tokenizer.rs +++ b/mistralrs-core/src/pipeline/gguf_tokenizer.rs @@ -223,7 +223,7 @@ mod tests { } #[test] - fn test_decode() -> Result<()> { + fn test_decode_llama() -> Result<()> { use rand::seq::SliceRandom; use rand::thread_rng; From 30055fff8aacbc402284482b391b995ce04c61e0 Mon Sep 17 00:00:00 2001 From: EricLBuehler Date: Tue, 28 May 2024 14:38:52 -0400 Subject: [PATCH 18/19] Update readme --- README.md | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 55edf1342..a75eb5279 100644 --- a/README.md +++ b/README.md @@ -155,7 +155,7 @@ Please submit more benchmarks via raising an issue! ## Usage ### Installation and Build -To install mistral.rs, one should ensure they have Rust installed by following [this](https://rustup.rs/) link. Additionally, the Hugging Face token should be provided in `~/.cache/huggingface/token` when using the server to enable automatic download of gated models. +To install mistral.rs, one should ensure they have Rust installed by following [this](https://rustup.rs/) link. Additionally, the Hugging Face token should be provided in `~/.cache/huggingface/token` by running `huggingface-cli login` to enable automatic download of gated models. 1) Install required packages - `openssl` (ex., `sudo apt install libssl-dev`) @@ -169,9 +169,7 @@ To install mistral.rs, one should ensure they have Rust installed by following [ 3) Set HF token correctly (skip if already set or your model is not gated, or if you want to use the `token_source` parameters in Python or the command line.) ```bash - mkdir ~/.cache/huggingface - touch ~/.cache/huggingface/token - echo > ~/.cache/huggingface/token + huggingface-cli login ``` 4) Download the code @@ -220,6 +218,7 @@ To install mistral.rs, one should ensure they have Rust installed by following [ You can install Python support by following the guide [here](mistralrs-pyo3/README.md). +## Getting models ### Getting models from HF Hub Mistral.rs can automatically download models from HF Hub. To access gated models, you should provide a token source. They may be one of: @@ -240,7 +239,7 @@ This is passed in the following ways: If token cannot be loaded, no token will be used (i.e. effectively using `none`). -## Loading models from local files: +### Loading models from local files: You can also instruct mistral.rs to load models fully locally by modifying the `*_model_id` arguments or options: ```bash @@ -262,10 +261,10 @@ Throughout mistral.rs, any model ID argument or option may be a local path and s - `--adapters-model-id` (server) or `adapters_model_id` (python/rust): - Adapters `.safetensors` and `adapter_config.json` files in their respective directories -## Running GGUF models locally +### Running GGUF models locally To run GGUF models fully locally, you do not need to specify the tokenizer model ID argument and instead should pass a path to the -chat template JSON file (examples [here](chat_templates)) as well as specifying a local model ID. For example: +chat template JSON file (examples [here](chat_templates), you will need to create your own by specifying the chat template and `bos`/`eos` tokens) as well as specifying a local model ID. For example: ```bash ./mistralrs-server --chat-template gguf -m . -f Phi-3-mini-128k-instruct-q4_K_M.gguf @@ -277,7 +276,7 @@ please consider using the method demonstrated in examples below, where the token **Supported GGUF tokenizer types** - `llama` -### Run +## Run To start a server serving Mistral GGUF on `localhost:1234`, ```bash From c374297fa6f087d0ef3b4cee94c3a95504843fa6 Mon Sep 17 00:00:00 2001 From: EricLBuehler Date: Tue, 28 May 2024 14:41:48 -0400 Subject: [PATCH 19/19] Update readme --- README.md | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index a75eb5279..80c2b16c5 100644 --- a/README.md +++ b/README.md @@ -219,7 +219,12 @@ To install mistral.rs, one should ensure they have Rust installed by following [ You can install Python support by following the guide [here](mistralrs-pyo3/README.md). ## Getting models -### Getting models from HF Hub + +There are 2 ways to run a model with mistral.rs: +- From Hugging Face Hub (easiest) +- From local files + +### Getting models from Hugging Face Hub Mistral.rs can automatically download models from HF Hub. To access gated models, you should provide a token source. They may be one of: - `literal:`: Load from a specified literal @@ -299,7 +304,7 @@ Additionally, for models without quantization, the model architecture should be You can launch interactive mode, a simple chat application running in the terminal, by passing `-i`: ```bash -./mistralrs_server -i gguf -t mistralai/Mistral-7B-Instruct-v0.1 -m TheBloke/Mistral-7B-Instruct-v0.1-GGUF -f mistral-7b-instruct-v0.1.Q4_K_M.gguf +./mistralrs_server -i plain -m microsoft/Phi-3-mini-128k-instruct -a phi3 ``` ### Quick examples: @@ -342,7 +347,7 @@ To start a server running Llama from GGML: To start a server running Mistral from safetensors. ```bash -./mistralrs_server --port 1234 gguf -m mistralai/Mistral-7B-Instruct-v0.1 +./mistralrs_server --port 1234 plain -m mistralai/Mistral-7B-Instruct-v0.1 -a mistral ``` ### Structured selection with a `.toml` file