Skip to content

Commit

Permalink
Add MixtureOfAgentsLLM docstring example
Browse files Browse the repository at this point in the history
  • Loading branch information
gabrielmbmb committed Jun 18, 2024
1 parent 44adb25 commit 51ed6d2
Show file tree
Hide file tree
Showing 11 changed files with 51 additions and 58 deletions.
4 changes: 0 additions & 4 deletions src/distilabel/llms/anthropic.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,11 +111,7 @@ class User(BaseModel):
llm.load()
# Synchronous request
output = llm.generate(inputs=[[{"role": "user", "content": "Create a user profile for the following marathon"}]])
# Asynchronous request
output = await llm.agenerate(input=[{"role": "user", "content": "Create a user profile for the following marathon"}])
```
"""

Expand Down
4 changes: 0 additions & 4 deletions src/distilabel/llms/anyscale.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,11 +50,7 @@ class AnyscaleLLM(OpenAILLM):
llm.load()
# Synchronous request
output = llm.generate(inputs=[[{"role": "user", "content": "Hello world!"}]])
# Asynchronous request
output = await llm.agenerate(input=[{"role": "user", "content": "Hello world!"}])
```
"""

Expand Down
4 changes: 0 additions & 4 deletions src/distilabel/llms/azure.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,11 +103,7 @@ class User(BaseModel):
llm.load()
# Synchronous request
output = llm.generate(inputs=[[{"role": "user", "content": "Create a user profile for the following marathon"}]])
# Asynchronous request
output = await llm.agenerate(input=[{"role": "user", "content": "Create a user profile for the following marathon"}])
```
"""

Expand Down
4 changes: 0 additions & 4 deletions src/distilabel/llms/cohere.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,11 +102,7 @@ class User(BaseModel):
llm.load()
# Synchronous request
output = llm.generate(inputs=[[{"role": "user", "content": "Create a user profile for the following marathon"}]])
# Asynchronous request
output = await llm.agenerate(input=[{"role": "user", "content": "Create a user profile for the following marathon"}])
```
"""

Expand Down
4 changes: 0 additions & 4 deletions src/distilabel/llms/groq.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,11 +95,7 @@ class User(BaseModel):
llm.load()
# Synchronous request
output = llm.generate(inputs=[[{"role": "user", "content": "Create a user profile for the following marathon"}]])
# Asynchronous request
output = await llm.agenerate(input=[{"role": "user", "content": "Create a user profile for the following marathon"}])
```
"""

Expand Down
12 changes: 0 additions & 12 deletions src/distilabel/llms/huggingface/inference_endpoints.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,11 +81,7 @@ class InferenceEndpointsLLM(AsyncLLM):
llm.load()
# Synchrounous request
output = llm.generate(inputs=[[{"role": "user", "content": "Hello world!"}]])
# Asynchronous request
output = await llm.agenerate(input=[{"role": "user", "content": "Hello world!"}])
```
Dedicated Inference Endpoints:
Expand All @@ -101,11 +97,7 @@ class InferenceEndpointsLLM(AsyncLLM):
llm.load()
# Synchrounous request
output = llm.generate(inputs=[[{"role": "user", "content": "Hello world!"}]])
# Asynchronous request
output = await llm.agenerate(input=[{"role": "user", "content": "Hello world!"}])
```
Dedicated Inference Endpoints or TGI:
Expand All @@ -120,11 +112,7 @@ class InferenceEndpointsLLM(AsyncLLM):
llm.load()
# Synchrounous request
output = llm.generate(inputs=[[{"role": "user", "content": "Hello world!"}]])
# Asynchronous request
output = await llm.agenerate(input=[{"role": "user", "content": "Hello world!"}])
```
"""

Expand Down
4 changes: 0 additions & 4 deletions src/distilabel/llms/litellm.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,11 +73,7 @@ class User(BaseModel):
llm.load()
# Synchronous request
output = llm.generate(inputs=[[{"role": "user", "content": "Create a user profile for the following marathon"}]])
# Asynchronous request
output = await llm.agenerate(input=[{"role": "user", "content": "Create a user profile for the following marathon"}])
```
"""

Expand Down
4 changes: 0 additions & 4 deletions src/distilabel/llms/mistral.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,11 +94,7 @@ class User(BaseModel):
llm.load()
# Synchronous request
output = llm.generate(inputs=[[{"role": "user", "content": "Create a user profile for the following marathon"}]])
# Asynchronous request
output = await llm.agenerate(input=[{"role": "user", "content": "Create a user profile for the following marathon"}])
```
"""

Expand Down
53 changes: 51 additions & 2 deletions src/distilabel/llms/moa.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,14 +26,18 @@
from distilabel.mixins.runtime_parameters import RuntimeParametersNames
from distilabel.steps.tasks.typing import FormattedInput

# Mixture-of-Agents system prompt from the paper with the addition instructing the LLM
# to not mention that it used responses from previous models to avoid having texts like
# "Based on the previous responses..." in the completion.
MOA_SYSTEM_PROMPT = (
"You have been provided with a set of responses from various open-source models to the"
" latest user query. Your task is to synthesize these responses into a single, high-quality"
" response. It is crucial to critically evaluate the information provided in these responses,"
" recognizing that some of it may be biased or incorrect. Your response should not simply"
" replicate the given answers but should offer a refined, accurate, and comprehensive"
" reply to the instruction. Ensure your response is well-structured, coherent, and adheres"
" to the highest standards of accuracy and reliability."
" to the highest standards of accuracy and reliability. Do not mention that you have used"
" the responses from previous models."
"\nResponses from models:"
)

Expand All @@ -55,6 +59,49 @@ class MixtureOfAgentsLLM(AsyncLLM):
References:
- [Mixture-of-Agents Enhances Large Language Model Capabilities](https://arxiv.org/abs/2406.04692)
Examples:
Generate text using open-source models as proposers and aggregator:
```python
from distilabel.llms import MixtureOfAgentsLLM, InferenceEndpointsLLM
llm = MixtureOfAgentsLLM(
aggregator_llm=InferenceEndpointsLLM(
model_id="meta-llama/Meta-Llama-3-70B-Instruct",
tokenizer_id="meta-llama/Meta-Llama-3-70B-Instruct",
),
proposers_llms=[
InferenceEndpointsLLM(
model_id="meta-llama/Meta-Llama-3-70B-Instruct",
tokenizer_id="meta-llama/Meta-Llama-3-70B-Instruct",
),
InferenceEndpointsLLM(
model_id="NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
tokenizer_id="NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
),
InferenceEndpointsLLM(
model_id="HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
tokenizer_id="HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
),
],
rounds=2,
)
llm.load()
output = llm.generate(
inputs=[
[
{
"role": "user",
"content": "My favorite witty review of The Rings of Power series is this: Input:",
}
]
]
)
```
"""

aggregator_llm: LLM
Expand Down Expand Up @@ -168,7 +215,9 @@ async def _agenerate(
A list containing the generations for each input.
"""
aggregator_llm_kwargs: Dict[str, Any] = kwargs.get("aggregator_llm", {})
proposers_llms_kwargs: List[Dict[str, Any]] = kwargs.get("proposers_llms", [])
proposers_llms_kwargs: List[Dict[str, Any]] = kwargs.get(
"proposers_llms", [{}] * len(self.proposers_llms)
)

prev_outputs = []
for round in range(self.rounds):
Expand Down
12 changes: 0 additions & 12 deletions src/distilabel/llms/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,11 +72,7 @@ class OpenAILLM(AsyncLLM):
llm.load()
# Synchronous request
output = llm.generate(inputs=[[{"role": "user", "content": "Hello world!"}]])
# Asynchronous request
output = await llm.agenerate(input=[{"role": "user", "content": "Hello world!"}])
```
Generate text from a custom endpoint following the OpenAI API:
Expand All @@ -91,11 +87,7 @@ class OpenAILLM(AsyncLLM):
llm.load()
# Synchronous request
output = llm.generate(inputs=[[{"role": "user", "content": "Hello world!"}]])
# Asynchronous request
output = await llm.agenerate(input=[{"role": "user", "content": "Hello world!"}])
```
Generate structured data:
Expand All @@ -117,11 +109,7 @@ class User(BaseModel):
llm.load()
# Synchronous request
output = llm.generate(inputs=[[{"role": "user", "content": "Create a user profile for the following marathon"}]])
# Asynchronous request
output = await llm.agenerate(input=[{"role": "user", "content": "Create a user profile for the following marathon"}])
```
"""

Expand Down
4 changes: 0 additions & 4 deletions src/distilabel/llms/together.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,11 +49,7 @@ class TogetherLLM(OpenAILLM):
llm.load()
# Synchronous request
output = llm.generate(inputs=[[{"role": "user", "content": "Hello world!"}]])
# Asynchronous request
output = await llm.agenerate(input=[{"role": "user", "content": "Hello world!"}])
```
"""

Expand Down

0 comments on commit 51ed6d2

Please sign in to comment.