From d50ae1281d89be5d150b242d1f401c486ee1bfa9 Mon Sep 17 00:00:00 2001 From: Nestor Qin Date: Tue, 23 Jul 2024 20:27:23 -0400 Subject: [PATCH] model: add Llama 3.1 models --- app/client/api.ts | 1 - app/constant.ts | 233 ++++++++++++++++++++++++-------------------- app/store/config.ts | 4 +- package.json | 2 +- yarn.lock | 8 +- 5 files changed, 133 insertions(+), 115 deletions(-) diff --git a/app/client/api.ts b/app/client/api.ts index 2e945eea..4492df91 100644 --- a/app/client/api.ts +++ b/app/client/api.ts @@ -53,7 +53,6 @@ export interface ModelRecord { provider?: string; size?: string; quantization?: string; - context_length?: string; family?: string; vram_required_MB?: number; buffer_size_required_bytes?: number; diff --git a/app/constant.ts b/app/constant.ts index cb85d8fc..243f3b6f 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -65,14 +65,14 @@ Latex block format: $$e=mc^2$$ `; export const DEFAULT_MODELS: ModelRecord[] = [ + // Llama-3.1 8B { - name: "Llama-3-8B-Instruct-q4f32_1-MLC-1k", + name: "Llama-3.1-8B-Instruct-q4f32_1-MLC-1k", display_name: "Llama", provider: "Meta", size: "8B", - quantization: "q4f32_1", - context_length: "1k", - family: "Llama 3", + quantization: "q4f32", + family: "Llama 3.1", vram_required_MB: 5295.7, low_resource_required: true, recommended_config: { @@ -83,13 +83,12 @@ export const DEFAULT_MODELS: ModelRecord[] = [ }, }, { - name: "Llama-3-8B-Instruct-q4f16_1-MLC-1k", + name: "Llama-3.1-8B-Instruct-q4f16_1-MLC-1k", display_name: "Llama", provider: "Meta", size: "8B", quantization: "q4f16_1", - context_length: "1k", - family: "Llama 3", + family: "Llama 3.1", vram_required_MB: 4598.34, low_resource_required: true, recommended_config: { @@ -100,13 +99,12 @@ export const DEFAULT_MODELS: ModelRecord[] = [ }, }, { - name: "Llama-3-8B-Instruct-q4f32_1-MLC", + name: "Llama-3.1-8B-Instruct-q4f32_1-MLC", display_name: "Llama", provider: "Meta", size: "8B", - quantization: "q4f32_1", - context_length: "4k", - family: "Llama 3", + quantization: "q4f32", + family: "Llama 3.1", vram_required_MB: 6101.01, low_resource_required: false, recommended_config: { @@ -117,13 +115,12 @@ export const DEFAULT_MODELS: ModelRecord[] = [ }, }, { - name: "Llama-3-8B-Instruct-q4f16_1-MLC", + name: "Llama-3.1-8B-Instruct-q4f16_1-MLC", display_name: "Llama", provider: "Meta", size: "8B", quantization: "q4f16_1", - context_length: "4k", - family: "Llama 3", + family: "Llama 3.1", vram_required_MB: 5001, low_resource_required: false, recommended_config: { @@ -139,7 +136,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "NousResearch", size: "8B", quantization: "q4f16_1", - context_length: "4k", family: "Hermes 2 Pro", vram_required_MB: 4976.13, low_resource_required: false, @@ -155,8 +151,7 @@ export const DEFAULT_MODELS: ModelRecord[] = [ display_name: "Hermes", provider: "NousResearch", size: "8B", - quantization: "q4f32_1", - context_length: "4k", + quantization: "q4f32", family: "Hermes 2 Pro", vram_required_MB: 6051.27, low_resource_required: false, @@ -173,7 +168,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "NousResearch", size: "7B", quantization: "q4f16_1", - context_length: "4k", family: "Hermes 2 Pro", vram_required_MB: 4033.28, low_resource_required: false, @@ -190,7 +184,7 @@ export const DEFAULT_MODELS: ModelRecord[] = [ display_name: "Phi", provider: "MLC", quantization: "q4f16_1", - context_length: "4k", + family: "Phi 3 Mini", vram_required_MB: 3672.07, low_resource_required: false, @@ -205,8 +199,7 @@ export const DEFAULT_MODELS: ModelRecord[] = [ name: "Phi-3-mini-4k-instruct-q4f32_1-MLC", display_name: "Phi", provider: "MLC", - quantization: "q4f32_1", - context_length: "4k", + quantization: "q4f32", family: "Phi 3 Mini", vram_required_MB: 5483.12, low_resource_required: false, @@ -222,7 +215,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ display_name: "Phi", provider: "MLC", quantization: "q4f16_1", - context_length: "1k", family: "Phi 3 Mini", vram_required_MB: 2520.07, low_resource_required: true, @@ -237,8 +229,7 @@ export const DEFAULT_MODELS: ModelRecord[] = [ name: "Phi-3-mini-4k-instruct-q4f32_1-MLC-1k", display_name: "Phi", provider: "MLC", - quantization: "q4f32_1", - context_length: "1k", + quantization: "q4f32", family: "Phi 3 Mini", vram_required_MB: 3179.12, low_resource_required: true, @@ -255,7 +246,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "Mistral AI", size: "7B", quantization: "q4f16_1", - context_length: "4k", family: "Mistral", vram_required_MB: 4573.39, low_resource_required: false, @@ -272,8 +262,7 @@ export const DEFAULT_MODELS: ModelRecord[] = [ display_name: "Mistral", provider: "Mistral AI", size: "7B", - quantization: "q4f32_1", - context_length: "4k", + quantization: "q4f32", family: "Mistral", vram_required_MB: 5619.27, low_resource_required: false, @@ -290,7 +279,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "Mistral AI", size: "7B", quantization: "q4f16_1", - context_length: "4k", family: "Mistral", vram_required_MB: 4573.39, low_resource_required: false, @@ -306,7 +294,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "NousResearch", size: "7B", quantization: "q4f16_1", - context_length: "4k", family: "Hermes", vram_required_MB: 4573.39, low_resource_required: false, @@ -322,7 +309,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "Maxime Labonne", size: "7B", quantization: "q4f16_1", - context_length: "4k", family: "Hermes", vram_required_MB: 4573.39, low_resource_required: false, @@ -338,7 +324,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "WizardLM", size: "7B", quantization: "q4f16_1", - context_length: "4k", family: "WizardMath", vram_required_MB: 4573.39, low_resource_required: false, @@ -354,7 +339,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "SmolLM", size: "1.7B", quantization: "q0f16", - context_length: "2048", family: "SmolLM", vram_required_MB: 3736.19, low_resource_required: true, @@ -370,7 +354,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "SmolLM", size: "1.7B", quantization: "q0f32", - context_length: "2048", family: "SmolLM", vram_required_MB: 7432.38, low_resource_required: false, @@ -385,7 +368,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "SmolLM", size: "1.7B", quantization: "q4f16_1", - context_length: "2048", family: "SmolLM", vram_required_MB: 1390.19, low_resource_required: true, @@ -400,8 +382,7 @@ export const DEFAULT_MODELS: ModelRecord[] = [ display_name: "SmolLM", provider: "SmolLM", size: "1.7B", - quantization: "q4f32_1", - context_length: "2048", + quantization: "q4f32", family: "SmolLM", vram_required_MB: 1924.38, low_resource_required: true, @@ -416,7 +397,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "SmolLM", size: "360M", quantization: "q0f16", - context_length: "2048", family: "SmolLM", vram_required_MB: 791.99, low_resource_required: true, @@ -432,7 +412,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "SmolLM", size: "360M", quantization: "q0f32", - context_length: "2048", family: "SmolLM", vram_required_MB: 1583.99, low_resource_required: true, @@ -447,7 +426,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "SmolLM", size: "360M", quantization: "q4f16_1", - context_length: "2048", family: "SmolLM", vram_required_MB: 296.06, low_resource_required: true, @@ -462,8 +440,7 @@ export const DEFAULT_MODELS: ModelRecord[] = [ display_name: "SmolLM", provider: "SmolLM", size: "360M", - quantization: "q4f32_1", - context_length: "2048", + quantization: "q4f32", family: "SmolLM", vram_required_MB: 419.61, low_resource_required: true, @@ -478,7 +455,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "SmolLM", size: "135M", quantization: "q0f16", - context_length: "2048", family: "SmolLM", vram_required_MB: 314.69, low_resource_required: true, @@ -494,7 +470,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "SmolLM", size: "135M", quantization: "q0f32", - context_length: "2048", family: "SmolLM", vram_required_MB: 629.38, low_resource_required: true, @@ -509,7 +484,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "SmolLM", size: "135M", quantization: "q4f16_1", - context_length: "2048", family: "SmolLM", vram_required_MB: 130.33, low_resource_required: true, @@ -524,8 +498,7 @@ export const DEFAULT_MODELS: ModelRecord[] = [ display_name: "SmolLM", provider: "SmolLM", size: "135M", - quantization: "q4f32_1", - context_length: "2048", + quantization: "q4f32", family: "SmolLM", vram_required_MB: 196.54, low_resource_required: true, @@ -540,7 +513,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "Alibaba", size: "0.5B", quantization: "q4f16_1", - context_length: "4k", family: "Qwen 2", vram_required_MB: 500, //rough estimate low_resource_required: true, @@ -557,7 +529,7 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "Alibaba", size: "0.5B", quantization: "q0f16", - context_length: "4k", + family: "Qwen 2", vram_required_MB: 1624.12, low_resource_required: true, @@ -574,7 +546,7 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "Alibaba", size: "0.5B", quantization: "q0f32", - context_length: "4k", + family: "Qwen 2", vram_required_MB: 2654.75, low_resource_required: true, @@ -591,7 +563,7 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "Alibaba", size: "1.5B", quantization: "q4f16_1", - context_length: "4k", + family: "Qwen 2", vram_required_MB: 1629.75, low_resource_required: true, @@ -607,8 +579,8 @@ export const DEFAULT_MODELS: ModelRecord[] = [ display_name: "Qwen", provider: "Alibaba", size: "1.5B", - quantization: "q4f32_1", - context_length: "4k", + quantization: "q4f32", + family: "Qwen 2", vram_required_MB: 1888.97, low_resource_required: true, @@ -625,7 +597,7 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "Alibaba", size: "7B", quantization: "q4f16_1", - context_length: "4k", + family: "Qwen 2", vram_required_MB: 5106.67, low_resource_required: false, @@ -641,8 +613,8 @@ export const DEFAULT_MODELS: ModelRecord[] = [ display_name: "Qwen", provider: "Alibaba", size: "7B", - quantization: "q4f32_1", - context_length: "4k", + quantization: "q4f32", + family: "Qwen 2", vram_required_MB: 5900.09, low_resource_required: false, @@ -659,7 +631,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "Google", size: "2B", quantization: "q4f16_1", - context_length: "4k", family: "Gemma", vram_required_MB: 1476.52, low_resource_required: false, @@ -677,8 +648,8 @@ export const DEFAULT_MODELS: ModelRecord[] = [ display_name: "Gemma", provider: "Google", size: "2B", - quantization: "q4f32_1", - context_length: "4k", + quantization: "q4f32", + family: "Gemma", vram_required_MB: 1750.66, low_resource_required: false, @@ -696,7 +667,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "Google", size: "2B", quantization: "q4f16_1", - context_length: "1k", family: "Gemma", vram_required_MB: 1476.52, low_resource_required: true, @@ -714,8 +684,7 @@ export const DEFAULT_MODELS: ModelRecord[] = [ display_name: "Gemma", provider: "Google", size: "2B", - quantization: "q4f32_1", - context_length: "1k", + quantization: "q4f32", family: "Gemma", vram_required_MB: 1750.66, low_resource_required: true, @@ -733,7 +702,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "Hugging Face", size: "1.6B", quantization: "q4f16_1", - context_length: "4k", family: "StableLM 2", vram_required_MB: 2087.66, low_resource_required: false, @@ -749,8 +717,7 @@ export const DEFAULT_MODELS: ModelRecord[] = [ display_name: "StableLM", provider: "Hugging Face", size: "1.6B", - quantization: "q4f32_1", - context_length: "4k", + quantization: "q4f32", family: "StableLM 2", vram_required_MB: 2999.33, low_resource_required: false, @@ -767,7 +734,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "Hugging Face", size: "1.6B", quantization: "q4f16_1", - context_length: "1k", family: "StableLM 2", vram_required_MB: 1511.66, low_resource_required: true, @@ -783,8 +749,7 @@ export const DEFAULT_MODELS: ModelRecord[] = [ display_name: "StableLM", provider: "Hugging Face", size: "1.6B", - quantization: "q4f32_1", - context_length: "1k", + quantization: "q4f32", family: "StableLM 2", vram_required_MB: 1847.33, low_resource_required: true, @@ -801,7 +766,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "Together", size: "3B", quantization: "q4f16_1", - context_length: "2k", family: "RedPajama", vram_required_MB: 2972.09, low_resource_required: false, @@ -816,8 +780,7 @@ export const DEFAULT_MODELS: ModelRecord[] = [ display_name: "RedPajama", provider: "Together", size: "3B", - quantization: "q4f32_1", - context_length: "2k", + quantization: "q4f32", family: "RedPajama", vram_required_MB: 3928.09, low_resource_required: false, @@ -832,7 +795,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "Together", size: "3B", quantization: "q4f16_1", - context_length: "1k", family: "RedPajama", vram_required_MB: 2041.09, low_resource_required: true, @@ -847,8 +809,7 @@ export const DEFAULT_MODELS: ModelRecord[] = [ display_name: "RedPajama", provider: "Together", size: "3B", - quantization: "q4f32_1", - context_length: "1k", + quantization: "q4f32", family: "RedPajama", vram_required_MB: 2558.09, low_resource_required: true, @@ -863,7 +824,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "Zhang Peiyuan", size: "1.1B", quantization: "q4f16_1", - context_length: "2k", family: "TinyLlama", vram_required_MB: 697.24, low_resource_required: true, @@ -880,8 +840,7 @@ export const DEFAULT_MODELS: ModelRecord[] = [ display_name: "TinyLlama", provider: "Zhang Peiyuan", size: "1.1B", - quantization: "q4f32_1", - context_length: "2k", + quantization: "q4f32", family: "TinyLlama", vram_required_MB: 839.98, low_resource_required: true, @@ -898,7 +857,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "Zhang Peiyuan", size: "1.1B", quantization: "q4f16_1", - context_length: "1k", family: "TinyLlama", vram_required_MB: 675.24, low_resource_required: true, @@ -915,8 +873,7 @@ export const DEFAULT_MODELS: ModelRecord[] = [ display_name: "TinyLlama", provider: "Zhang Peiyuan", size: "1.1B", - quantization: "q4f32_1", - context_length: "1k", + quantization: "q4f32", family: "TinyLlama", vram_required_MB: 795.98, low_resource_required: true, @@ -928,12 +885,91 @@ export const DEFAULT_MODELS: ModelRecord[] = [ }, }, { - name: "Llama-3-70B-Instruct-q3f16_1-MLC", + name: "Llama-3.1-70B-Instruct-q3f16_1-MLC", + display_name: "Llama", + provider: "Meta", + size: "70B", + quantization: "q3f16", + family: "Llama 3.1", + vram_required_MB: 31153.13, + low_resource_required: false, + recommended_config: { + temperature: 0.6, + presence_penalty: 0, + frequency_penalty: 0, + top_p: 0.9, + }, + }, + { + name: "Llama-3-8B-Instruct-q4f32_1-MLC-1k", + display_name: "Llama", + provider: "Meta", + size: "8B", + quantization: "q4f32", + family: "Llama 3", + vram_required_MB: 5295.7, + low_resource_required: true, + recommended_config: { + temperature: 0.6, + presence_penalty: 0, + frequency_penalty: 0, + top_p: 0.9, + }, + }, + { + name: "Llama-3-8B-Instruct-q4f16_1-MLC-1k", + display_name: "Llama", + provider: "Meta", + size: "8B", + quantization: "q4f16", + family: "Llama 3", + vram_required_MB: 4598.34, + low_resource_required: true, + recommended_config: { + temperature: 0.6, + presence_penalty: 0, + frequency_penalty: 0, + top_p: 0.9, + }, + }, + { + name: "Llama-3-8B-Instruct-q4f32_1-MLC", + display_name: "Llama", + provider: "Meta", + size: "8B", + quantization: "q4f32", + family: "Llama 3", + vram_required_MB: 6101.01, + low_resource_required: false, + recommended_config: { + temperature: 0.6, + presence_penalty: 0, + frequency_penalty: 0, + top_p: 0.9, + }, + }, + { + name: "Llama-3-8B-Instruct-q4f16_1-MLC", + display_name: "Llama", + provider: "Meta", + size: "8B", + quantization: "q4f16", + family: "Llama 3", + vram_required_MB: 5001.0, + low_resource_required: false, + recommended_config: { + temperature: 0.6, + presence_penalty: 0, + frequency_penalty: 0, + top_p: 0.9, + }, + }, + { + name: "Llama-3-70B-Instruct-q3f16-MLC", display_name: "Llama", provider: "Meta", size: "70B", - quantization: "q3f16_1", - context_length: "4k", + quantization: "q3f16", family: "Llama 3", vram_required_MB: 31153.13, low_resource_required: false, @@ -949,8 +985,7 @@ export const DEFAULT_MODELS: ModelRecord[] = [ display_name: "Llama", provider: "Meta", size: "7B", - quantization: "q4f32_1", - context_length: "1k", + quantization: "q4f32", family: "Llama 2", vram_required_MB: 5284.01, low_resource_required: false, @@ -965,7 +1000,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "Meta", size: "7B", quantization: "q4f16_1", - context_length: "1k", family: "Llama 2", vram_required_MB: 4618.52, low_resource_required: false, @@ -980,8 +1014,7 @@ export const DEFAULT_MODELS: ModelRecord[] = [ display_name: "Llama", provider: "Meta", size: "7B", - quantization: "q4f32_1", - context_length: "4k", + quantization: "q4f32", family: "Llama 2", vram_required_MB: 9109.03, low_resource_required: false, @@ -996,7 +1029,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "Meta", size: "7B", quantization: "q4f16_1", - context_length: "4k", family: "Llama 2", vram_required_MB: 6749.02, low_resource_required: false, @@ -1012,7 +1044,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "Meta", size: "13B", quantization: "q4f16_1", - context_length: "4k", family: "Llama 2", vram_required_MB: 11814.09, low_resource_required: false, @@ -1027,7 +1058,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ display_name: "Phi", provider: "Microsoft", quantization: "q4f16_1", - context_length: "2k", family: "Phi 2", vram_required_MB: 3053.97, low_resource_required: false, @@ -1041,8 +1071,7 @@ export const DEFAULT_MODELS: ModelRecord[] = [ name: "phi-2-q4f32_1-MLC", display_name: "Phi", provider: "Microsoft", - quantization: "q4f32_1", - context_length: "2k", + quantization: "q4f32", family: "Phi 2", vram_required_MB: 4032.48, low_resource_required: false, @@ -1056,7 +1085,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ display_name: "Phi", provider: "Microsoft", quantization: "q4f16_1", - context_length: "1k", family: "Phi 2", vram_required_MB: 2131.97, low_resource_required: true, @@ -1070,8 +1098,7 @@ export const DEFAULT_MODELS: ModelRecord[] = [ name: "phi-2-q4f32_1-MLC-1k", display_name: "Phi", provider: "Microsoft", - quantization: "q4f32_1", - context_length: "1k", + quantization: "q4f32", family: "Phi 2", vram_required_MB: 2740.48, low_resource_required: true, @@ -1085,7 +1112,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ display_name: "Phi", provider: "Microsoft", quantization: "q4f16_1", - context_length: "2k", family: "Phi 1.5", vram_required_MB: 1210.09, low_resource_required: true, @@ -1099,8 +1125,7 @@ export const DEFAULT_MODELS: ModelRecord[] = [ name: "phi-1_5-q4f32_1-MLC", display_name: "Phi", provider: "Microsoft", - quantization: "q4f32_1", - context_length: "2k", + quantization: "q4f32", family: "Phi 1.5", vram_required_MB: 1682.09, low_resource_required: true, @@ -1114,7 +1139,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ display_name: "Phi", provider: "Microsoft", quantization: "q4f16_1", - context_length: "1k", family: "Phi 1.5", vram_required_MB: 1210.09, low_resource_required: true, @@ -1128,8 +1152,7 @@ export const DEFAULT_MODELS: ModelRecord[] = [ name: "phi-1_5-q4f32_1-MLC-1k", display_name: "Phi", provider: "Microsoft", - quantization: "q4f32_1", - context_length: "1k", + quantization: "q4f32", family: "Phi 1.5", vram_required_MB: 1682.09, low_resource_required: true, @@ -1144,7 +1167,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "Zhang Peiyuan", size: "1.1B", quantization: "q4f16_1", - context_length: "2k", family: "TinyLlama", vram_required_MB: 697.24, low_resource_required: true, @@ -1159,8 +1181,7 @@ export const DEFAULT_MODELS: ModelRecord[] = [ display_name: "TinyLlama", provider: "Zhang Peiyuan", size: "1.1B", - quantization: "q4f32_1", - context_length: "2k", + quantization: "q4f32", family: "TinyLlama", vram_required_MB: 839.98, low_resource_required: true, @@ -1175,7 +1196,6 @@ export const DEFAULT_MODELS: ModelRecord[] = [ provider: "Zhang Peiyuan", size: "1.1B", quantization: "q4f16_1", - context_length: "1k", family: "TinyLlama", vram_required_MB: 675.24, low_resource_required: true, @@ -1190,8 +1210,7 @@ export const DEFAULT_MODELS: ModelRecord[] = [ display_name: "TinyLlama", provider: "Zhang Peiyuan", size: "1.1B", - quantization: "q4f32_1", - context_length: "1k", + quantization: "q4f32", family: "TinyLlama", vram_required_MB: 795.98, low_resource_required: true, diff --git a/app/store/config.ts b/app/store/config.ts index ae1dd74d..784a0354 100644 --- a/app/store/config.ts +++ b/app/store/config.ts @@ -208,9 +208,9 @@ export const useAppConfig = createPersistStore( }), { name: StoreKey.Config, - version: 0.45, + version: 0.46, migrate: (persistedState, version) => { - if (version < 0.45) { + if (version < 0.46) { return { ...DEFAULT_CONFIG, ...(persistedState as any), diff --git a/package.json b/package.json index d0559bbd..7d8b252d 100644 --- a/package.json +++ b/package.json @@ -17,7 +17,7 @@ "dependencies": { "@fortaine/fetch-event-source": "^3.0.6", "@hello-pangea/dnd": "^16.5.0", - "@mlc-ai/web-llm": "^0.2.50", + "@mlc-ai/web-llm": "^0.2.51", "@serwist/next": "^9.0.2", "@svgr/webpack": "^6.5.1", "emoji-picker-react": "^4.9.2", diff --git a/yarn.lock b/yarn.lock index 1ecf776b..70e2aa1e 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1180,10 +1180,10 @@ "@jridgewell/resolve-uri" "^3.1.0" "@jridgewell/sourcemap-codec" "^1.4.14" -"@mlc-ai/web-llm@^0.2.50": - version "0.2.50" - resolved "https://registry.yarnpkg.com/@mlc-ai/web-llm/-/web-llm-0.2.50.tgz#d4e434dd9bbce4e0b84b5006a962ad7bfba07a8d" - integrity sha512-XWNcC6DtrYndA4OE/pVbxKY57WCdsvId9qE8K5iDfvVn5ZISRCxrcvRZOPVeMMmigr0hRwFWyTdHdGMSZiBz0w== +"@mlc-ai/web-llm@^0.2.51": + version "0.2.51" + resolved "https://registry.yarnpkg.com/@mlc-ai/web-llm/-/web-llm-0.2.51.tgz#73dced138262bf79a1bc082dd36dc6d1a928b54f" + integrity sha512-62aiDyiSzUah1vqnTcZvH3lFRjz4VM2CeIBp7QpcFgX2HPA3vVYhDbV0cOVGAfK1MfH1CipZiyjOjkQUGjjRTw== dependencies: loglevel "^1.9.1"