Skip to content

Commit

Permalink
Apply suggestions from code review
Browse files Browse the repository at this point in the history
Co-authored-by: Alvaro Bartolome <[email protected]>
  • Loading branch information
gabrielmbmb and alvarobartt authored Dec 19, 2023
1 parent e3db5e8 commit 582485b
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions examples/pipeline-llamacpp-and-openai-process.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def load_llama_cpp_llm(task: "Task") -> "LLM":
from distilabel.llm import LlamaCppLLM

llama = Llama(
model_path="notus-7b-v1.Q4_0.gguf", n_gpu_layers=10, n_ctx=1024, verbose=True
model_path="<PATH_TO_GGUF_MODEL>", n_gpu_layers=10, n_ctx=1024, verbose=False
)
return LlamaCppLLM(
model=llama, task=task, max_new_tokens=512, prompt_format="zephyr"
Expand Down Expand Up @@ -70,7 +70,7 @@ def load_openai_llm(task: "Task") -> "LLM":
dataset = pipeline.generate(
dataset, # type: ignore
num_generations=2,
batch_size=2,
batch_size=1,
enable_checkpoints=True,
display_progress_bar=False,
)
Expand Down

0 comments on commit 582485b

Please sign in to comment.