Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

use staging for integration tests #1140

Merged
merged 15 commits into from
Oct 29, 2024
9 changes: 3 additions & 6 deletions .github/actions/js-integration-tests/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,9 @@ inputs:
node-version:
description: "Node version"
required: true
langchain-api-key:
langchain-api-key-beta:
description: "Langchain"
required: true
langchain-endpoint:
description: "LangSmith Endpoint"
required: true
openai-api-key:
description: "OpenAI API key"
required: false
Expand Down Expand Up @@ -37,6 +34,6 @@ runs:
working-directory: js
env:
LANGCHAIN_TRACING_V2: "true"
LANGCHAIN_ENDPOINT: ${{ inputs.langchain-endpoint }}
LANGCHAIN_API_KEY: ${{ inputs.langchain-api-key }}
LANGCHAIN_ENDPOINT: https://beta.api.smith.langchain.com
LANGCHAIN_API_KEY: ${{ inputs.langchain-api-key-beta }}
OPENAI_API_KEY: ${{ inputs.openai-api-key }}
20 changes: 10 additions & 10 deletions .github/actions/python-integration-tests/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,11 @@ inputs:
python-version:
description: "Python version"
required: true
langchain-api-key:
description: "Langchain"
langchain-api-key-beta:
description: "LangSmith Beta Key"
required: true
langchain-endpoint:
description: "LangSmith Endpoint"
langchain-api-key-prod:
description: "LangSmith Key"
required: true
openai-api-key:
description: "OpenAI API key"
Expand Down Expand Up @@ -43,8 +43,8 @@ runs:
- name: Run integration tests
env:
LANGCHAIN_TRACING_V2: "true"
LANGCHAIN_ENDPOINT: ${{ inputs.langchain-endpoint }}
LANGCHAIN_API_KEY: ${{ inputs.langchain-api-key }}
LANGCHAIN_ENDPOINT: https://beta.api.smith.langchain.com
LANGCHAIN_API_KEY: ${{ inputs.langchain-api-key-beta }}
OPENAI_API_KEY: ${{ inputs.openai-api-key }}
run: make integration_tests_fast
shell: bash
Expand All @@ -53,8 +53,8 @@ runs:
- name: Run doctest
env:
LANGCHAIN_TRACING_V2: "true"
LANGCHAIN_ENDPOINT: ${{ inputs.langchain-endpoint }}
LANGCHAIN_API_KEY: ${{ inputs.langchain-api-key }}
LANGCHAIN_ENDPOINT: https://api.smith.langchain.com
LANGCHAIN_API_KEY: ${{ inputs.langchain-api-key-prod }}
OPENAI_API_KEY: ${{ inputs.openai-api-key }}
ANTHROPIC_API_KEY: ${{ inputs.anthropic-api-key }}
run: make doctest
Expand All @@ -65,8 +65,8 @@ runs:
- name: Run Evaluation
env:
LANGCHAIN_TRACING_V2: "true"
LANGCHAIN_ENDPOINT: ${{ inputs.langchain-endpoint }}
LANGCHAIN_API_KEY: ${{ inputs.langchain-api-key }}
LANGCHAIN_ENDPOINT: https://beta.api.smith.langchain.com
LANGCHAIN_API_KEY: ${{ inputs.langchain-api-key-beta }}
OPENAI_API_KEY: ${{ inputs.openai-api-key }}
ANTHROPIC_API_KEY: ${{ inputs.anthropic-api-key }}
run: make evals
Expand Down
7 changes: 3 additions & 4 deletions .github/workflows/integration_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,8 @@ jobs:
uses: ./.github/actions/python-integration-tests
with:
python-version: 3.11
langchain-endpoint: https://api.smith.langchain.com
langchain-api-key: ${{ secrets.LANGSMITH_API_KEY }}
langchain-api-key-beta: ${{ secrets.LANGSMITH_API_KEY_BETA }}
langchain-api-key-prod: ${{ secrets.LANGSMITH_API_KEY_PROD }}
openai-api-key: ${{ secrets.OPENAI_API_KEY }}
anthropic-api-key: ${{ secrets.ANTHROPIC_API_KEY }}

Expand Down Expand Up @@ -76,6 +76,5 @@ jobs:
uses: ./.github/actions/js-integration-tests
with:
node-version: 20.x
langchain-endpoint: https://api.smith.langchain.com
langchain-api-key: ${{ secrets.LANGSMITH_API_KEY }}
langchain-api-key-beta: ${{ secrets.LANGSMITH_API_KEY_BETA }}
openai-api-key: ${{ secrets.OPENAI_API_KEY }}
4 changes: 2 additions & 2 deletions js/src/tests/client.int.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1112,7 +1112,7 @@ test("Test pull prompt include model", async () => {
test("list shared examples can list shared examples", async () => {
const client = new Client();
const multiverseMathPublicDatasetShareToken =
"620596ee-570b-4d2b-8c8f-f828adbe5242";
"cce9c8a9-761a-4756-b159-58ed2640e274";
const sharedExamples = await client.listSharedExamples(
multiverseMathPublicDatasetShareToken
);
Expand All @@ -1123,7 +1123,7 @@ test("clonePublicDataset method can clone a dataset", async () => {
const client = new Client();
const datasetName = "multiverse_math_public_testing";
const multiverseMathPublicDatasetURL =
"https://smith.langchain.com/public/620596ee-570b-4d2b-8c8f-f828adbe5242/d";
"https://beta.smith.langchain.com/public/cce9c8a9-761a-4756-b159-58ed2640e274/d";

try {
await client.clonePublicDataset(multiverseMathPublicDatasetURL, {
Expand Down
2 changes: 1 addition & 1 deletion python/tests/integration_tests/test_async_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ async def wait_for(condition, timeout=10):
@pytest.fixture
async def async_client():
ls_utils.get_env_var.cache_clear()
client = AsyncClient(api_url="https://api.smith.langchain.com")
client = AsyncClient()
yield client
await client.aclose()

Expand Down
8 changes: 3 additions & 5 deletions python/tests/integration_tests/test_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def wait_for(
@pytest.fixture
def langchain_client() -> Client:
get_env_var.cache_clear()
return Client(api_url="https://api.smith.langchain.com")
return Client()


def test_datasets(langchain_client: Client) -> None:
Expand Down Expand Up @@ -356,11 +356,9 @@ def test_persist_update_run(langchain_client: Client) -> None:


@pytest.mark.parametrize("uri", ["http://localhost:1981", "http://api.langchain.minus"])
def test_error_surfaced_invalid_uri(monkeypatch: pytest.MonkeyPatch, uri: str) -> None:
def test_error_surfaced_invalid_uri(uri: str) -> None:
get_env_var.cache_clear()
monkeypatch.setenv("LANGCHAIN_ENDPOINT", uri)
monkeypatch.setenv("LANGCHAIN_API_KEY", "test")
client = Client()
client = Client(api_url=uri, api_key="test")
# expect connect error
with pytest.raises(LangSmithConnectionError):
client.create_run("My Run", inputs={"text": "hello world"}, run_type="llm")
Expand Down
2 changes: 1 addition & 1 deletion python/tests/integration_tests/test_llm_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ def test_from_model() -> None:
async def test_evaluate() -> None:
client = Client()
client.clone_public_dataset(
"https://smith.langchain.com/public/419dcab2-1d66-4b94-8901-0357ead390df/d"
"https://beta.smith.langchain.com/public/06785303-0f70-4466-b637-f23d38c0f28e/d"
)
dataset_name = "Evaluate Examples"

Expand Down
52 changes: 26 additions & 26 deletions python/tests/integration_tests/wrappers/test_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,12 @@
from langsmith.wrappers import wrap_openai


@mock.patch("langsmith.client.requests.Session")
@pytest.mark.parametrize("stream", [False, True])
def test_chat_sync_api(mock_session: mock.MagicMock, stream: bool):
def test_chat_sync_api(stream: bool):
import openai # noqa

client = langsmith.Client(session=mock_session())
mock_session = mock.MagicMock()
client = langsmith.Client(session=mock_session)
original_client = openai.Client()
patched_client = wrap_openai(openai.Client(), tracing_extra={"client": client})
messages = [{"role": "user", "content": "Say 'foo'"}]
Expand Down Expand Up @@ -47,16 +47,16 @@ def test_chat_sync_api(mock_session: mock.MagicMock, stream: bool):
assert original.choices == patched.choices
# Give the thread a chance.
time.sleep(0.01)
for call in mock_session.return_value.request.call_args_list[1:]:
for call in mock_session.request.call_args_list[1:]:
assert call[0][0].upper() == "POST"


@mock.patch("langsmith.client.requests.Session")
@pytest.mark.parametrize("stream", [False, True])
async def test_chat_async_api(mock_session: mock.MagicMock, stream: bool):
async def test_chat_async_api(stream: bool):
import openai # noqa

client = langsmith.Client(session=mock_session())
mock_session = mock.MagicMock()
client = langsmith.Client(session=mock_session)
original_client = openai.AsyncClient()
patched_client = wrap_openai(openai.AsyncClient(), tracing_extra={"client": client})
messages = [{"role": "user", "content": "Say 'foo'"}]
Expand All @@ -82,16 +82,16 @@ async def test_chat_async_api(mock_session: mock.MagicMock, stream: bool):
assert original.choices == patched.choices
# Give the thread a chance.
time.sleep(0.1)
for call in mock_session.return_value.request.call_args_list[1:]:
for call in mock_session.request.call_args_list[1:]:
assert call[0][0].upper() == "POST"


@mock.patch("langsmith.client.requests.Session")
@pytest.mark.parametrize("stream", [False, True])
def test_completions_sync_api(mock_session: mock.MagicMock, stream: bool):
def test_completions_sync_api(stream: bool):
import openai

client = langsmith.Client(session=mock_session())
mock_session = mock.MagicMock()
client = langsmith.Client(session=mock_session)
original_client = openai.Client()
patched_client = wrap_openai(openai.Client(), tracing_extra={"client": client})
prompt = ("Say 'Foo' then stop.",)
Expand Down Expand Up @@ -125,16 +125,16 @@ def test_completions_sync_api(mock_session: mock.MagicMock, stream: bool):
assert original.choices == patched.choices
# Give the thread a chance.
time.sleep(0.1)
for call in mock_session.return_value.request.call_args_list[1:]:
for call in mock_session.request.call_args_list[1:]:
assert call[0][0].upper() == "POST"


@mock.patch("langsmith.client.requests.Session")
@pytest.mark.parametrize("stream", [False, True])
async def test_completions_async_api(mock_session: mock.MagicMock, stream: bool):
async def test_completions_async_api(stream: bool):
import openai

client = langsmith.Client(session=mock_session())
mock_session = mock.MagicMock()
client = langsmith.Client(session=mock_session)

original_client = openai.AsyncClient()
patched_client = wrap_openai(
Expand Down Expand Up @@ -179,10 +179,10 @@ async def test_completions_async_api(mock_session: mock.MagicMock, stream: bool)
# Give the thread a chance.
for _ in range(10):
time.sleep(0.1)
if mock_session.return_value.request.call_count >= 1:
if mock_session.request.call_count >= 1:
break
assert mock_session.return_value.request.call_count >= 1
for call in mock_session.return_value.request.call_args_list[1:]:
assert mock_session.request.call_count >= 1
for call in mock_session.request.call_args_list[1:]:
assert call[0][0].upper() == "POST"


Expand All @@ -199,7 +199,7 @@ def __call__(self, run):


def _collect_requests(mock_session: mock.MagicMock, filename: str):
mock_requests = mock_session.return_value.request.call_args_list
mock_requests = mock_session.request.call_args_list
collected_requests = {}
for _ in range(10):
time.sleep(0.1)
Expand All @@ -215,7 +215,7 @@ def _collect_requests(mock_session: mock.MagicMock, filename: str):
# thread has finished processing the run
if any(event.get("end_time") for event in all_events):
break
mock_session.return_value.request.call_args_list.clear()
mock_session.request.call_args_list.clear()

if os.environ.get("WRITE_TOKEN_COUNTING_TEST_DATA") == "1":
dir_path = Path(__file__).resolve().parent.parent / "test_data"
Expand Down Expand Up @@ -274,13 +274,13 @@ def _collect_requests(mock_session: mock.MagicMock, filename: str):


@pytest.mark.parametrize("test_case", test_cases)
@mock.patch("langsmith.client.requests.Session")
def test_wrap_openai_chat_tokens(mock_session: mock.MagicMock, test_case):
def test_wrap_openai_chat_tokens(test_case):
import openai
from openai.types.chat import ChatCompletion, ChatCompletionChunk

oai_client = openai.Client()
ls_client = langsmith.Client(session=mock_session())
mock_session = mock.MagicMock()
ls_client = langsmith.Client(session=mock_session)
wrapped_oai_client = wrap_openai(oai_client, tracing_extra={"client": ls_client})

collect = Collect()
Expand Down Expand Up @@ -323,13 +323,13 @@ def test_wrap_openai_chat_tokens(mock_session: mock.MagicMock, test_case):

@pytest.mark.asyncio
@pytest.mark.parametrize("test_case", test_cases)
@mock.patch("langsmith.client.requests.Session")
async def test_wrap_openai_chat_async_tokens(mock_session: mock.MagicMock, test_case):
async def test_wrap_openai_chat_async_tokens(test_case):
import openai
from openai.types.chat import ChatCompletion, ChatCompletionChunk

oai_client = openai.AsyncClient()
ls_client = langsmith.Client(session=mock_session())
mock_session = mock.MagicMock()
ls_client = langsmith.Client(session=mock_session)
wrapped_oai_client = wrap_openai(oai_client, tracing_extra={"client": ls_client})

collect = Collect()
Expand Down
Loading