Skip to content

Commit 3ded6d5

Browse files
Add tests for prompts call and call_stream (#66)
* Add tests for prompts call and call_stream * fix: Type checking fixes
1 parent 6e1ba26 commit 3ded6d5

File tree

2 files changed

+70
-7
lines changed

2 files changed

+70
-7
lines changed

tests/integration/conftest.py

Lines changed: 24 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,14 @@
1-
from contextlib import contextmanager, redirect_stdout
2-
from dataclasses import dataclass
3-
import os
4-
from typing import Any, ContextManager, Generator
51
import io
6-
from typing import TextIO
2+
import os
73
import uuid
8-
import pytest
4+
from contextlib import contextmanager, redirect_stdout
5+
from dataclasses import dataclass
6+
from typing import Any, ContextManager, Generator, TextIO
7+
98
import dotenv
9+
import pytest
1010
from humanloop.client import Humanloop
11+
from humanloop.requests.prompt_kernel_request import PromptKernelRequestParams
1112

1213

1314
@dataclass
@@ -55,7 +56,7 @@ def sdk_test_dir(humanloop_test_client: Humanloop) -> Generator[str, None, None]
5556

5657

5758
@pytest.fixture(scope="function")
58-
def test_prompt_config() -> dict[str, Any]:
59+
def test_prompt_config() -> PromptKernelRequestParams:
5960
return {
6061
"provider": "openai",
6162
"model": "gpt-4o-mini",
@@ -119,6 +120,22 @@ def eval_prompt(
119120
pytest.fail(f"Failed to create prompt {prompt_path}: {e}")
120121

121122

123+
@pytest.fixture(scope="function")
124+
def prompt(
125+
humanloop_test_client: Humanloop, sdk_test_dir: str, openai_key: str, test_prompt_config: dict[str, Any]
126+
) -> Generator[TestIdentifiers, None, None]:
127+
prompt_path = f"{sdk_test_dir}/prompt"
128+
try:
129+
response = humanloop_test_client.prompts.upsert(
130+
path=prompt_path,
131+
**test_prompt_config,
132+
)
133+
yield TestIdentifiers(file_id=response.id, file_path=response.path)
134+
humanloop_test_client.prompts.delete(id=response.id)
135+
except Exception as e:
136+
pytest.fail(f"Failed to create prompt {prompt_path}: {e}")
137+
138+
122139
@pytest.fixture(scope="function")
123140
def output_not_null_evaluator(
124141
humanloop_test_client: Humanloop, sdk_test_dir: str

tests/integration/test_prompts.py

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
from humanloop.client import Humanloop
2+
3+
from tests.integration.conftest import TestIdentifiers
4+
5+
6+
def test_prompts_call(
7+
humanloop_test_client: Humanloop,
8+
prompt: TestIdentifiers,
9+
test_prompt_config: TestIdentifiers,
10+
) -> None:
11+
response = humanloop_test_client.prompts.call( # type: ignore [attr-defined]
12+
path=prompt.file_path,
13+
prompt={**test_prompt_config}, # type: ignore [misc, arg-type, typeddict-item, dict-item, list-item]
14+
inputs={"question": "What is the capital of the France?"},
15+
)
16+
assert response is not None
17+
assert response.log_id is not None
18+
assert response.logs is not None
19+
for log in response.logs:
20+
assert log is not None
21+
assert log.output is not None
22+
assert "Paris" in log.output
23+
assert response.prompt.path == prompt.file_path
24+
25+
26+
def test_prompts_call_stream(
27+
humanloop_test_client: Humanloop,
28+
prompt: TestIdentifiers,
29+
test_prompt_config: TestIdentifiers,
30+
) -> None:
31+
response = humanloop_test_client.prompts.call_stream( # type: ignore [attr-defined]
32+
path=prompt.file_path,
33+
prompt={**test_prompt_config}, # type: ignore [misc, arg-type, typeddict-item, dict-item, list-item]
34+
inputs={"question": "What is the capital of the France?"},
35+
)
36+
37+
output = ""
38+
for chunk in response:
39+
assert chunk is not None
40+
assert chunk.output is not None
41+
assert chunk.id is not None
42+
assert chunk.prompt_id is not None
43+
assert chunk.version_id is not None
44+
output += chunk.output
45+
46+
assert "Paris" in output

0 commit comments

Comments
 (0)