From d0ad24feeb569007796098bbfa9e96e0f811ed69 Mon Sep 17 00:00:00 2001 From: Ondrej Platek Date: Wed, 6 Nov 2024 00:43:03 +0100 Subject: [PATCH] Add docstring how to connect when handling connection error --- factgenie/llm_campaign.py | 9 +++++++++ factgenie/models.py | 22 ++++++++++++++++++---- 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/factgenie/llm_campaign.py b/factgenie/llm_campaign.py index 3cb3d2fa..26f5f585 100644 --- a/factgenie/llm_campaign.py +++ b/factgenie/llm_campaign.py @@ -9,8 +9,11 @@ import ast import logging import traceback +import requests +import urllib3 from slugify import slugify + from factgenie.campaigns import CampaignMode, CampaignStatus, ExampleStatus from flask import jsonify import factgenie.utils as utils @@ -183,6 +186,12 @@ def run_llm_campaign(app, mode, campaign_id, announcer, campaign, datasets, mode res["output"] = generated_output["output"] elif mode == CampaignMode.LLM_GEN: res = model.generate_output(data=example) + except requests.exceptions.ConnectionError as e: + traceback.print_exc() + return utils.error( + f"Error processing example {dataset_id}-{split}-{example_idx}: {e.__class__.__name__}: {str(e)}\n\n{model.new_connection_error_advice_docstring}\n" + ) + except Exception as e: traceback.print_exc() return utils.error( diff --git a/factgenie/models.py b/factgenie/models.py index 518154af..0bf6036e 100644 --- a/factgenie/models.py +++ b/factgenie/models.py @@ -3,10 +3,7 @@ import traceback from openai import OpenAI from textwrap import dedent -import argparse -import yaml import json -import sys from pathlib import Path import os @@ -20,7 +17,6 @@ from factgenie.campaigns import CampaignMode # logging.basicConfig(format="%(message)s", level=logging.INFO, datefmt="%H:%M:%S") -# coloredlogs.install(level="INFO", fmt="%(asctime)s %(levelname)s %(message)s") logger = logging.getLogger(__name__) DIR_PATH = os.path.dirname(__file__) @@ -66,6 +62,10 @@ def __init__(self, config): # the key in the model output that contains the annotations self.annotation_key = config["extra_args"].get("annotation_key", "annotations") + @property + def new_connection_error_advice_docstring(self): + return """Please check the LLM engine documentation. The call to the LLM API server failed.""" + def get_annotator_id(self): return "llm-" + self.config["type"] + "-" + self.config["model"] @@ -233,6 +233,13 @@ def __init__(self, config): self.set_api_endpoint() + @property + def new_connection_error_advice_docstring(self): + return """\ +Please check the Ollama documentation: + https://github.com/ollama/ollama?tab=readme-ov-file#generate-a-response +""" + def set_api_endpoint(self): # make sure the API URL ends with the `generate` endpoint self.config["api_url"] = self.config["api_url"].rstrip("/") @@ -457,6 +464,13 @@ def __init__(self, config): self.set_api_endpoint() + @property + def new_connection_error_advice_docstring(self): + return """\ +Please check the Ollama documentation: + https://github.com/ollama/ollama?tab=readme-ov-file#generate-a-response +""" + def set_api_endpoint(self): # make sure the API URL ends with the `chat` endpoint self.config["api_url"] = self.config["api_url"].rstrip("/")