-
Notifications
You must be signed in to change notification settings - Fork 307
/
anymoe_lora.py
45 lines (43 loc) · 1.13 KB
/
anymoe_lora.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
from mistralrs import (
Runner,
Which,
ChatCompletionRequest,
Architecture,
AnyMoeConfig,
AnyMoeExpertType,
)
runner = Runner(
which=Which.Plain(
model_id="mistralai/Mistral-7B-Instruct-v0.1",
arch=Architecture.Mistral,
),
anymoe_config=AnyMoeConfig(
hidden_size=4096,
dataset_json="examples/amoe.json",
prefix="model.layers",
mlp="mlp",
expert_type=AnyMoeExpertType.LoraAdapter(
rank=64, alpha=16.0, target_modules=["gate_proj"]
),
lr=1e-3,
epochs=100,
batch_size=4,
model_ids=["typeof/zephyr-7b-beta-lora"],
# For inference (use a pretrained gating layer) see `anymoe_inference.py`
loss_csv_path="loss.csv",
),
)
res = runner.send_chat_completion_request(
ChatCompletionRequest(
model="mistral",
messages=[
{"role": "user", "content": "Tell me a story about the Rust type system."}
],
max_tokens=256,
presence_penalty=1.0,
top_p=0.1,
temperature=0.1,
)
)
print(res.choices[0].message.content)
print(res.usage)