|
1 | | -from contextlib import contextmanager, redirect_stdout |
2 | | -from dataclasses import dataclass |
3 | | -import os |
4 | | -from typing import Any, ContextManager, Generator |
5 | 1 | import io |
6 | | -from typing import TextIO |
| 2 | +import os |
7 | 3 | import uuid |
8 | | -import pytest |
| 4 | +from contextlib import contextmanager, redirect_stdout |
| 5 | +from dataclasses import dataclass |
| 6 | +from typing import Any, ContextManager, Generator, TextIO |
| 7 | + |
9 | 8 | import dotenv |
| 9 | +import pytest |
10 | 10 | from humanloop.client import Humanloop |
| 11 | +from humanloop.requests.prompt_kernel_request import PromptKernelRequestParams |
11 | 12 |
|
12 | 13 |
|
13 | 14 | @dataclass |
@@ -55,7 +56,7 @@ def sdk_test_dir(humanloop_test_client: Humanloop) -> Generator[str, None, None] |
55 | 56 |
|
56 | 57 |
|
57 | 58 | @pytest.fixture(scope="function") |
58 | | -def test_prompt_config() -> dict[str, Any]: |
| 59 | +def test_prompt_config() -> PromptKernelRequestParams: |
59 | 60 | return { |
60 | 61 | "provider": "openai", |
61 | 62 | "model": "gpt-4o-mini", |
@@ -119,6 +120,22 @@ def eval_prompt( |
119 | 120 | pytest.fail(f"Failed to create prompt {prompt_path}: {e}") |
120 | 121 |
|
121 | 122 |
|
| 123 | +@pytest.fixture(scope="function") |
| 124 | +def prompt( |
| 125 | + humanloop_test_client: Humanloop, sdk_test_dir: str, openai_key: str, test_prompt_config: dict[str, Any] |
| 126 | +) -> Generator[TestIdentifiers, None, None]: |
| 127 | + prompt_path = f"{sdk_test_dir}/prompt" |
| 128 | + try: |
| 129 | + response = humanloop_test_client.prompts.upsert( |
| 130 | + path=prompt_path, |
| 131 | + **test_prompt_config, |
| 132 | + ) |
| 133 | + yield TestIdentifiers(file_id=response.id, file_path=response.path) |
| 134 | + humanloop_test_client.prompts.delete(id=response.id) |
| 135 | + except Exception as e: |
| 136 | + pytest.fail(f"Failed to create prompt {prompt_path}: {e}") |
| 137 | + |
| 138 | + |
122 | 139 | @pytest.fixture(scope="function") |
123 | 140 | def output_not_null_evaluator( |
124 | 141 | humanloop_test_client: Humanloop, sdk_test_dir: str |
|
0 commit comments