Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Global instructions #234

Open
wants to merge 13 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 17 additions & 5 deletions holmes/core/conversations.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@
)
from holmes.plugins.prompts import load_and_render_prompt
from holmes.core.tool_calling_llm import ToolCallingLLM
from holmes.core.tool_calling_llm import Instructions
from holmes.utils.global_instructions import add_global_instructions_to_user_prompt

DEFAULT_TOOL_SIZE = 10000

Expand Down Expand Up @@ -134,7 +136,8 @@ def handle_issue_conversation(
return system_prompt


def build_issue_chat_messages(issue_chat_request: IssueChatRequest, ai: ToolCallingLLM):
def build_issue_chat_messages(issue_chat_request: IssueChatRequest, ai: ToolCallingLLM,
global_instructions: Optional[Instructions] = None):
"""
This function generates a list of messages for issue conversation and ensures that the message sequence adheres to the model's context window limitations
by truncating tool outputs as necessary before sending to llm.
Expand Down Expand Up @@ -177,8 +180,11 @@ def build_issue_chat_messages(issue_chat_request: IssueChatRequest, ai: ToolCall
tools_for_investigation = issue_chat_request.investigation_result.tools

if not conversation_history or len(conversation_history) == 0:
user_prompt = add_global_instructions_to_user_prompt(user_prompt, global_instructions)

number_of_tools_for_investigation = len(tools_for_investigation)
if number_of_tools_for_investigation == 0:

system_prompt = load_and_render_prompt(
template_path,
{
Expand Down Expand Up @@ -249,6 +255,8 @@ def build_issue_chat_messages(issue_chat_request: IssueChatRequest, ai: ToolCall
},
]

user_prompt = add_global_instructions_to_user_prompt(user_prompt, global_instructions)

conversation_history.append(
{
"role": "user",
Expand Down Expand Up @@ -302,12 +310,15 @@ def build_issue_chat_messages(issue_chat_request: IssueChatRequest, ai: ToolCall


def build_chat_messages(
ask: str, conversation_history: Optional[List[Dict[str, str]]], ai: ToolCallingLLM
ask: str, conversation_history: Optional[List[Dict[str, str]]], ai: ToolCallingLLM,
global_instructions: Optional[Instructions] = None
) -> List[dict]:
template_path = "builtin://generic_ask.jinja2"
template_path = "builtin://generic_ask_conversation.jinja2"

if not conversation_history or len(conversation_history) == 0:
system_prompt = load_and_render_prompt(template_path, {})
ask = add_global_instructions_to_user_prompt(ask, global_instructions)

messages = [
{
"role": "system",
Expand All @@ -319,7 +330,9 @@ def build_chat_messages(
},
]
return messages


ask = add_global_instructions_to_user_prompt(ask, global_instructions)

conversation_history.append(
{
"role": "user",
Expand All @@ -340,5 +353,4 @@ def build_chat_messages(
ai, conversation_history_without_tools, number_of_tools
)
truncate_tool_messages(conversation_history, tool_size)

return conversation_history
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The conversation history is including now the global instructions this means that this what the user will see in the history chat?

3 changes: 3 additions & 0 deletions holmes/core/investigation.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ def investigate_issues(investigate_request: InvestigateRequest, dal: SupabaseDal
resource_instructions = dal.get_resource_instructions(
"alert", investigate_request.context.get("issue_type")
)
global_instructions = dal.get_global_instructions_for_account()

raw_data = investigate_request.model_dump()
if context:
raw_data["extra_context"] = context
Expand All @@ -38,6 +40,7 @@ def investigate_issues(investigate_request: InvestigateRequest, dal: SupabaseDal
console=console,
post_processing_prompt=HOLMES_POST_PROCESSING_PROMPT,
instructions=resource_instructions,
global_instructions=global_instructions
)

return InvestigationResult(
Expand Down
21 changes: 20 additions & 1 deletion holmes/core/supabase_dal.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from uuid import uuid4

import yaml
from holmes.core.tool_calling_llm import ResourceInstructionDocument, ResourceInstructions
from holmes.core.tool_calling_llm import ResourceInstructionDocument, ResourceInstructions, Instructions
from holmes.utils.definitions import RobustaConfig
from postgrest.types import ReturnMethod
from supabase import create_client
Expand Down Expand Up @@ -194,6 +194,25 @@ def get_resource_instructions(self, type: str, name: Optional[str]) -> Optional[

return None

def get_global_instructions_for_account(self) -> Optional[Instructions]:
try:
res = (
self.client
.table(RUNBOOKS_TABLE)
.select("runbook")
.eq("account_id", self.account_id)
.eq("subject_type", "Account")
.execute()
)

if res.data:
instructions = res.data[0].get("runbook").get("instructions")
return Instructions(instructions=instructions)
except Exception:
logging.exception("Failed to fetch global instructions", exc_info=True)
moshemorad marked this conversation as resolved.
Show resolved Hide resolved

return None

def create_session_token(self) -> str:
token = str(uuid4())
self.client.table(SESSION_TOKENS_TABLE).insert(
Expand Down
11 changes: 10 additions & 1 deletion holmes/core/tool_calling_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,10 @@ class ResourceInstructionDocument(BaseModel):
url: str


class Instructions(BaseModel):
instructions: List[str] = []
moshemorad marked this conversation as resolved.
Show resolved Hide resolved


class ResourceInstructions(BaseModel):
instructions: List[str] = []
documents: List[ResourceInstructionDocument] = []
Expand Down Expand Up @@ -329,6 +333,7 @@ def investigate(
prompt: str,
console: Console,
instructions: Optional[ResourceInstructions],
global_instructions: Optional[Instructions] = None,
post_processing_prompt: Optional[str] = None,
) -> LLMResult:
runbooks = self.runbook_manager.get_instructions_for_issue(issue)
Expand Down Expand Up @@ -360,8 +365,12 @@ def investigate(
user_prompt += f"* {runbook_str}\n"

user_prompt = f'My instructions to check \n"""{user_prompt}"""'


if global_instructions and global_instructions.instructions and len(global_instructions.instructions[0]) > 0:
user_prompt += f"\n\nGlobal Instructions (use only if relevant): {global_instructions.instructions[0]}\n"

user_prompt = f"{user_prompt}\n This is context from the issue {issue.raw}"

logging.debug(
"Rendered system prompt:\n%s", textwrap.indent(system_prompt, " ")
)
Expand Down
11 changes: 11 additions & 0 deletions holmes/plugins/prompts/_global_instructions.jinja2
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
Global Instructions
You may receive a set of “Global Instructions” that describe how to perform certain tasks, handle certain situations, or apply certain best practices. They are not mandatory for every request, but serve as a reference resource and must be used if the current scenario or user prmopt aligns with one of the described methods or conditions.
Use these rules when deciding how to apply them:

* If the user prompt includes Global Instructions, treat them as a reference resource.
* Some Global Instructions may describe how to handle specific tasks or scenarios. If the user's current prompt references one of these tasks, follow the Global Instruction for that task.
* If some Global Instructions define general conditions (e.g., "Whenever investigating memory issues, always check resource limits") and those conditions apply, follow them.
* If user's prompt direct you to perform a task (e.g., “Find owner”) and there is a Global Instruction on how to do that task, follow the Global Instructions on how to perform it.
* If multiple Global Instructions are relevant, apply all that fit.
* If no Global Instruction is relevant, or no condition applies, ignore them and proceed as normal.
* Before finalizing your answer, double-check if any Global Instructions apply. If so, ensure you have correctly followed those instructions.
32 changes: 32 additions & 0 deletions holmes/plugins/prompts/generic_ask_conversation.jinja2
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
You are a tool-calling AI assist provided with common devops and IT tools that you can use to troubleshoot problems or answer questions.
Whenever possible you MUST first use tools to investigate then answer the question.
Do not say 'based on the tool output' or explicitly refer to tools at all.
If you output an answer and then realize you need to call more tools or there are possible next steps, you may do so by calling tools at that point in time.
If you have a good and concrete suggestion for how the user can fix something, tell them even if not asked explicitly

Use conversation history to maintain continuity when appropriate, ensuring efficiency in your responses.

{% include '_global_instructions.jinja2' %}

{% include '_general_instructions.jinja2' %}

Style guide:
* Reply with terse output.
* Be painfully concise.
* Leave out "the" and filler words when possible.
* Be terse but not at the expense of leaving out important data like the root cause and how to fix.

Examples:

User: Why did the webserver-example app crash?
(Call tool kubectl_find_resource kind=pod keyword=webserver`)
(Call tool kubectl_previous_logs namespace=demos pod=webserver-example-1299492-d9g9d # this pod name was found from the previous tool call)

AI: `webserver-example-1299492-d9g9d` crashed due to email validation error during HTTP request for /api/create_user
Relevant logs:

```
2021-01-01T00:00:00.000Z [ERROR] Missing required field 'email' in request body
```

Validation error led to unhandled Java exception causing a crash.
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@ Conversation history:
{{conversation_history}}
{% endif %}

{% include '_global_instructions.jinja2' %}

{% include '_general_instructions.jinja2' %}

Style guide:
Expand Down
13 changes: 13 additions & 0 deletions holmes/plugins/prompts/generic_investigation.jinja2
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,19 @@ Provide an terse analysis of the following {{ issue.source_type }} alert/issue a

If the user provides you with extra instructions in a triple quotes section, ALWAYS perform their instructions and then perform your investigation.


Global Instructions
moshemorad marked this conversation as resolved.
Show resolved Hide resolved
You may receive a set of “Global Instructions” that describe how to perform certain tasks, handle certain situations, or apply certain best practices. They are not mandatory for every request, but serve as a reference resource and must be used if the current scenario or user request aligns with one of the described methods or conditions.
Use these rules when deciding how to apply them:

* If the user prompt includes Global Instructions, treat them as a reference resource.
* Some Global Instructions may describe how to handle specific tasks or scenarios. If the user's current request or the instructions in a triple quotes section reference one of these tasks, follow the Global Instruction for that task.
* Some Global Instructions may define general conditions that always apply if a certain scenario occurs (e.g., "whenever investigating a memory issue, always check resource limits"). If such a condition matches the current situation, apply the Global Instruction accordingly.
* If user's prompt or the instructions in a triple quotes section direct you to perform a task (e.g., “Find owner”) and there is a Global Instruction on how to do that task, follow the Global Instructions on how to perform it.
* If multiple Global Instructions are relevant, apply all that fit.
* If no Global Instruction is relevant, or no condition applies, ignore them and proceed as normal.
* Before finalizing your answer double-check if any Global Instructions apply. If so, ensure you have correctly followed those instructions.

{% include '_general_instructions.jinja2' %}

Style Guide:
Expand Down
14 changes: 14 additions & 0 deletions holmes/plugins/prompts/kubernetes_workload_ask.jinja2
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,19 @@ If you output an answer and then realize you need to call more tools or there ar

If the user provides you with extra instructions in a triple single quotes section, ALWAYS perform their instructions and then perform your investigation.


Global Instructions
You may receive a set of “Global Instructions” that describe how to perform certain tasks, handle certain situations, or apply certain best practices. They are not mandatory for every request, but serve as a reference resource and must be used if the current scenario or user request aligns with one of the described methods or conditions.
Use these rules when deciding how to apply them:

* If the user prompt includes Global Instructions, treat them as a reference resource.
* Some Global Instructions may describe how to handle specific tasks or scenarios. If the user's current request or the instructions in a triple quotes section reference one of these tasks, ALWAYS follow the Global Instruction for that task.
* Some Global Instructions may define general conditions that always apply if a certain scenario occurs (e.g., "whenever investigating a memory issue, always check resource limits"). If such a condition matches the current situation, apply the Global Instruction accordingly.
* If user's prompt or the instructions in a triple quotes section direct you to perform a task (e.g., “Find owner”) and there is a Global Instruction on how to do that task, ALWAYS follow the Global Instructions on how to perform it.
* If multiple Global Instructions are relevant, apply all that fit.
* If no Global Instruction is relevant, or no condition applies, ignore them and proceed as normal.
* Before finalizing your answer double-check if any Global Instructions apply. If so, ensure you have correctly followed those instructions.

In general:
* when it can provide extra information, first run as many tools as you need to gather more information, then respond.
* if possible, do so repeatedly with different tool calls each time to gather more information.
Expand Down Expand Up @@ -37,6 +50,7 @@ Style guide:
* Be painfully concise.
* Leave out "the" and filler words when possible.
* Be terse but not at the expense of leaving out important data like the root cause and how to fix.
* if asked by Global Instructions or instructions in a triple single quotes section to explicitly include something in the answer, don't leave it out.
* return a json object with the following schema as a result:
{
"type": "object",
Expand Down
7 changes: 7 additions & 0 deletions holmes/utils/global_instructions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
from holmes.core.tool_calling_llm import Instructions


def add_global_instructions_to_user_prompt(user_prompt: str, global_instructions: Instructions) -> None:
if global_instructions and global_instructions.instructions and len(global_instructions.instructions[0]) > 0:
user_prompt += f"\n\nGlobal Instructions (use only if relevant): {global_instructions.instructions[0]}\n"
return user_prompt
15 changes: 12 additions & 3 deletions server.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@
)
from holmes.plugins.prompts import load_and_render_prompt
from holmes.utils.holmes_sync_toolsets import holmes_sync_toolsets_status
from holmes.utils.global_instructions import add_global_instructions_to_user_prompt


def init_logging():
Expand Down Expand Up @@ -120,12 +121,16 @@ def workload_health_check(request: WorkloadHealthRequest):
)
if stored_instructions:
instructions.extend(stored_instructions.instructions)

nl = "\n"
if instructions:
request.ask = f"{request.ask}\n My instructions for the investigation '''{nl.join(instructions)}'''"

global_instructions = dal.get_global_instructions_for_account()
request.ask = add_global_instructions_to_user_prompt(request.ask, global_instructions)

system_prompt = load_and_render_prompt(request.prompt_template, context={'alerts': workload_alerts})


ai = config.create_toolcalling_llm(console, dal=dal)

Expand Down Expand Up @@ -167,7 +172,9 @@ def issue_conversation(issue_chat_request: IssueChatRequest):
try:
load_robusta_api_key(dal=dal, config=config)
ai = config.create_toolcalling_llm(console, dal=dal)
messages = build_issue_chat_messages(issue_chat_request, ai)
global_instructions = dal.get_global_instructions_for_account()

messages = build_issue_chat_messages(issue_chat_request, ai, global_instructions)
llm_call = ai.messages_call(messages=messages)

return ChatResponse(
Expand All @@ -185,8 +192,10 @@ def chat(chat_request: ChatRequest):
load_robusta_api_key(dal=dal, config=config)

ai = config.create_toolcalling_llm(console, dal=dal)
global_instructions = dal.get_global_instructions_for_account()

messages = build_chat_messages(
chat_request.ask, chat_request.conversation_history, ai=ai
chat_request.ask, chat_request.conversation_history, ai=ai, global_instructions=global_instructions
)

llm_call = ai.messages_call(messages=messages)
Expand Down
Loading