Skip to content

Commit 2455724

Browse files
author
Andrei Bratu
committed
Error handling in decorators
1 parent 02e1ed4 commit 2455724

File tree

8 files changed

+106
-40
lines changed

8 files changed

+106
-40
lines changed

src/humanloop/context.py

Lines changed: 12 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
11
from contextlib import contextmanager
22
from dataclasses import dataclass
33
import threading
4-
from typing import Callable, Generator, Optional
4+
from typing import Any, Callable, Generator, Literal, Optional
55
from opentelemetry import context as context_api
66

77
from humanloop.otel.constants import (
88
HUMANLOOP_CONTEXT_EVALUATION,
9-
HUMANLOOP_CONTEXT_PROMPT,
9+
HUMANLOOP_CONTEXT_DECORATOR,
1010
HUMANLOOP_CONTEXT_TRACE_ID,
1111
)
1212

@@ -25,14 +25,15 @@ def set_trace_id(flow_log_id: str) -> Generator[None, None, None]:
2525

2626

2727
@dataclass
28-
class PromptContext:
28+
class DecoratorContext:
2929
path: str
30-
template: Optional[str]
30+
type: Literal["prompt", "tool", "flow"]
31+
version: dict[str, Optional[Any]]
3132

3233

3334
@contextmanager
34-
def set_prompt_context(prompt_context: PromptContext) -> Generator[None, None, None]:
35-
key = hash((HUMANLOOP_CONTEXT_PROMPT, threading.get_ident()))
35+
def set_decorator_context(prompt_context: DecoratorContext) -> Generator[None, None, None]:
36+
key = hash((HUMANLOOP_CONTEXT_DECORATOR, threading.get_ident()))
3637
reset_token = context_api.attach(
3738
context_api.set_value(
3839
key=key,
@@ -43,15 +44,15 @@ def set_prompt_context(prompt_context: PromptContext) -> Generator[None, None, N
4344
context_api.detach(token=reset_token)
4445

4546

46-
def get_prompt_context() -> Optional[PromptContext]:
47-
key = hash((HUMANLOOP_CONTEXT_PROMPT, threading.get_ident()))
47+
def get_decorator_context() -> Optional[DecoratorContext]:
48+
key = hash((HUMANLOOP_CONTEXT_DECORATOR, threading.get_ident()))
4849
return context_api.get_value(key)
4950

5051

5152
class EvaluationContext:
5253
source_datapoint_id: str
5354
run_id: str
54-
callback: Callable[[str], None]
55+
logging_callback: Callable[[str], None]
5556
file_id: str
5657
path: str
5758
logging_counter: int
@@ -60,13 +61,13 @@ def __init__(
6061
self,
6162
source_datapoint_id: str,
6263
run_id: str,
63-
callback: Callable[[str], None],
64+
logging_callback: Callable[[str], None],
6465
file_id: str,
6566
path: str,
6667
):
6768
self.source_datapoint_id = source_datapoint_id
6869
self.run_id = run_id
69-
self.callback = callback
70+
self.logging_callback = logging_callback
7071
self.file_id = file_id
7172
self.path = path
7273
self.logging_counter = 0

src/humanloop/eval_utils/run.py

Lines changed: 58 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -67,9 +67,15 @@
6767
from humanloop.types.datapoint_response import DatapointResponse
6868
from humanloop.types.dataset_response import DatasetResponse
6969
from humanloop.types.evaluation_run_response import EvaluationRunResponse
70+
from humanloop.types.evaluator_log_response import EvaluatorLogResponse
71+
from humanloop.types.flow_log_response import FlowLogResponse
72+
from humanloop.types.log_response import LogResponse
73+
from humanloop.types.prompt_log_response import PromptLogResponse
7074
from humanloop.types.run_stats_response import RunStatsResponse
7175
from pydantic import ValidationError
7276

77+
from humanloop.types.tool_log_response import ToolLogResponse
78+
7379
if typing.TYPE_CHECKING:
7480
from humanloop.client import BaseHumanloop
7581

@@ -99,11 +105,13 @@
99105
CLIENT_TYPE = TypeVar("CLIENT_TYPE", PromptsClient, ToolsClient, FlowsClient, EvaluatorsClient)
100106

101107

102-
class HumanloopUtilityError(Exception):
103-
def __init__(self, message):
108+
class HumanloopDecoratorError(Exception):
109+
def __init__(self, message: Optional[str] = None):
104110
self.message = message
105111

106112
def __str__(self):
113+
if self.message is None:
114+
return super().__str__()
107115
return self.message
108116

109117

@@ -202,7 +210,7 @@ def upload_callback(log_id: str):
202210
with set_evaluation_context(
203211
EvaluationContext(
204212
source_datapoint_id=dp.id,
205-
callback=upload_callback,
213+
logging_callback=upload_callback,
206214
file_id=hl_file.id,
207215
run_id=run.id,
208216
path=hl_file.path,
@@ -219,17 +227,25 @@ def upload_callback(log_id: str):
219227
try:
220228
output = _call_function(function_, hl_file.type, dp)
221229
evaluation_context = get_evaluation_context()
222-
if not evaluation_context.logging_counter == 0:
230+
if evaluation_context is None:
231+
raise HumanloopDecoratorError(
232+
"Internal error: evaluation context is not set while processing a datapoint."
233+
)
234+
if evaluation_context.logging_counter == 0:
223235
# function_ did not Log against the source_datapoint_id/ run_id pair
224236
# so we need to create a Log
225-
log_func(
237+
log = log_func(
226238
inputs=dp.inputs,
227239
output=output,
228240
start_time=start_time,
229241
end_time=datetime.now(),
230242
source_datapoint_id=dp.id,
231243
run_id=run.id,
232244
)
245+
evaluation_context.logging_counter += 1
246+
evaluation_context.logging_callback(log.id)
247+
except HumanloopDecoratorError as e:
248+
raise e
233249
except Exception as e:
234250
log_func(
235251
inputs=dp.inputs,
@@ -648,13 +664,49 @@ def _call_function(
648664
return output
649665

650666

667+
def _get_log_func(
668+
client: "BaseHumanloop",
669+
file_type: Literal["flow"],
670+
file_id: str,
671+
version_id: str,
672+
run_id: str,
673+
) -> Callable[..., FlowLogResponse]: ...
674+
675+
676+
def _get_log_func(
677+
client: "BaseHumanloop",
678+
file_type: Literal["prompt"],
679+
file_id: str,
680+
version_id: str,
681+
run_id: str,
682+
) -> Callable[..., PromptLogResponse]: ...
683+
684+
685+
def _get_log_func(
686+
client: "BaseHumanloop",
687+
file_type: Literal["tool"],
688+
file_id: str,
689+
version_id: str,
690+
run_id: str,
691+
) -> Callable[..., ToolLogResponse]: ...
692+
693+
694+
def _get_log_func(
695+
client: "BaseHumanloop",
696+
file_type: Literal["evaluator"],
697+
file_id: str,
698+
version_id: str,
699+
run_id: str,
700+
) -> Callable[..., EvaluatorLogResponse]: ...
701+
702+
651703
def _get_log_func(
652704
client: "BaseHumanloop",
653705
file_type: FileType,
654706
file_id: str,
655707
version_id: str,
656708
run_id: str,
657-
) -> Callable:
709+
) -> Callable[..., LogResponse]:
658710
"""Returns the appropriate log function pre-filled with common parameters."""
659711
log_request = {
660712
# TODO: why does the Log `id` field refer to the file ID in the API?

src/humanloop/otel/constants.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,6 @@
66
HUMANLOOP_FILE_TYPE_KEY = "humanloop.file.type"
77
HUMANLOOP_PATH_KEY = "humanloop.file.path"
88
# Opentelemetry context
9-
HUMANLOOP_CONTEXT_PROMPT = "humanloop.context.prompt"
9+
HUMANLOOP_CONTEXT_DECORATOR = "humanloop.context.decorator"
1010
HUMANLOOP_CONTEXT_TRACE_ID = "humanloop.context.flow.trace_id"
1111
HUMANLOOP_CONTEXT_EVALUATION = "humanloop.context.evaluation"

src/humanloop/otel/exporter.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -201,6 +201,6 @@ def _do_work(self):
201201
else:
202202
if evaluation_context and file_path == evaluation_context.path:
203203
log_id = response.json()["records"][0]["log_id"]
204-
evaluation_context.callback(log_id)
204+
evaluation_context.logging_callback(log_id)
205205

206206
self._upload_queue.task_done()

src/humanloop/overload.py

Lines changed: 27 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,16 @@
1+
import inspect
12
import logging
23
import types
34
from typing import TypeVar, Union
45
import typing
56

6-
from humanloop.context import get_trace_id
7-
from humanloop.eval_utils.run import HumanloopUtilityError
7+
from humanloop.context import get_decorator_context, get_trace_id
8+
from humanloop.eval_utils.run import HumanloopDecoratorError
89

10+
from humanloop.evaluators.client import EvaluatorsClient
11+
from humanloop.flows.client import FlowsClient
912
from humanloop.prompts.client import PromptsClient
13+
from humanloop.tools.client import ToolsClient
1014
from humanloop.types.create_evaluator_log_response import CreateEvaluatorLogResponse
1115
from humanloop.types.create_flow_log_response import CreateFlowLogResponse
1216
from humanloop.types.create_prompt_log_response import CreatePromptLogResponse
@@ -16,7 +20,7 @@
1620
logger = logging.getLogger("humanloop.sdk")
1721

1822

19-
CLIENT_TYPE = TypeVar("CLIENT_TYPE")
23+
CLIENT_TYPE = TypeVar("CLIENT_TYPE", PromptsClient, FlowsClient, EvaluatorsClient, ToolsClient)
2024

2125

2226
def overload_log(client: CLIENT_TYPE) -> CLIENT_TYPE:
@@ -41,20 +45,29 @@ def _overload_log(
4145
CreateEvaluatorLogResponse,
4246
]:
4347
trace_id = get_trace_id()
48+
if trace_id is not None and type(client) is FlowsClient:
49+
context = get_decorator_context()
50+
if context is None:
51+
raise HumanloopDecoratorError("Internal error: trace_id context is set outside a decorator context.")
52+
raise HumanloopDecoratorError(
53+
f"Using flows.log() in this context is not allowed at line {inspect.currentframe().f_lineno}: "
54+
f"Flow decorator for File {context.path} manages the tracing and trace completion."
55+
)
4456
if trace_id is not None:
4557
if "trace_parent_id" in kwargs:
46-
# TODO: revisit
47-
logger.warning("Overriding trace_parent_id argument")
58+
logger.warning(
59+
"Ignoring trace_parent_id argument at line %d: the Flow decorator manages tracing.",
60+
inspect.currentframe().f_lineno,
61+
)
4862
kwargs = {
4963
**kwargs,
5064
"trace_parent_id": trace_id,
5165
}
5266
try:
5367
response = self._log(**kwargs)
5468
except Exception as e:
55-
# TODO handle
56-
# TODO: Bug found in backend: not specifying a model 400s but creates a File
57-
raise HumanloopUtilityError(message=str(e)) from e
69+
# Re-raising as HumanloopDecoratorError so the decorators don't catch it
70+
raise HumanloopDecoratorError from e
5871

5972
return response
6073

@@ -73,8 +86,10 @@ def _overload_call(self, **kwargs) -> PromptCallResponse:
7386
trace_id = get_trace_id()
7487
if trace_id is not None:
7588
if "trace_parent_id" in kwargs:
76-
# TODO: revisit
77-
logger.warning("Overriding trace_parent_id argument")
89+
logger.warning(
90+
"Ignoring trace_parent_id argument at line %d: the Flow decorator manages tracing.",
91+
inspect.currentframe().f_lineno,
92+
)
7893
kwargs = {
7994
**kwargs,
8095
"trace_parent_id": trace_id,
@@ -84,9 +99,8 @@ def _overload_call(self, **kwargs) -> PromptCallResponse:
8499
response = self._call(**kwargs)
85100
response = typing.cast(PromptCallResponse, response)
86101
except Exception as e:
87-
# TODO handle
88-
# TODO: Bug found in backend: not specifying a model 400s but creates a File
89-
raise HumanloopUtilityError(message=str(e)) from e
102+
# Re-raising as HumanloopDecoratorError so the decorators don't catch it
103+
raise HumanloopDecoratorError from e
90104

91105
return response
92106

src/humanloop/utilities/flow.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,11 +4,10 @@
44
from typing_extensions import ParamSpec
55

66
from opentelemetry.trace import Span, Tracer
7-
from opentelemetry import context as context_api
87
import requests
98

109
from humanloop.base_client import BaseHumanloop
11-
from humanloop.context import get_evaluation_context, get_trace_id, set_trace_id
10+
from humanloop.context import get_trace_id, set_trace_id
1211
from humanloop.types.chat_message import ChatMessage
1312
from humanloop.utilities.helpers import bind_args
1413
from humanloop.eval_utils.types import File

src/humanloop/utilities/prompt.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44

55
from typing import Callable, Optional
66

7-
from humanloop.context import PromptContext, set_prompt_context
7+
from humanloop.context import PromptContext, set_decorator_context
88

99
logger = logging.getLogger("humanloop.sdk")
1010

@@ -13,7 +13,7 @@ def prompt_decorator_factory(path: str, template: Optional[str]):
1313
def decorator(func: Callable):
1414
@wraps(func)
1515
def wrapper(*args, **kwargs):
16-
with set_prompt_context(
16+
with set_decorator_context(
1717
PromptContext(
1818
path=path,
1919
template=template,

tests/utilities/test_prompt.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
from groq import Groq
1414
from groq import NotFoundError as GroqNotFoundError
1515
from humanloop.client import Humanloop
16-
from humanloop.eval_utils.run import HumanloopUtilityError
16+
from humanloop.eval_utils.run import HumanloopDecoratorError
1717
from humanloop.utilities.prompt import prompt_decorator_factory
1818
from humanloop.otel.constants import HUMANLOOP_FILE_KEY
1919
from humanloop.otel.helpers import is_humanloop_span, read_from_opentelemetry_span
@@ -449,7 +449,7 @@ def call_llm_with_hl_call():
449449
)
450450
return response.logs[0].output_message.content # type: ignore [union-attr]
451451

452-
with pytest.raises(HumanloopUtilityError):
452+
with pytest.raises(HumanloopDecoratorError):
453453
call_llm_with_hl_call()
454454

455455
response = humanloop_client.directories.get(id=test_directory.id)
@@ -481,7 +481,7 @@ def call_llm_with_hl_call():
481481

482482
return response.logs[0].output_message.content
483483

484-
with pytest.raises(HumanloopUtilityError):
484+
with pytest.raises(HumanloopDecoratorError):
485485
call_llm_with_hl_call()
486486

487487
response = humanloop_client.directories.get(id=test_directory.id)
@@ -532,7 +532,7 @@ def call_llm_with_hl_call():
532532

533533
return response.logs[0].output_message.content
534534

535-
with pytest.raises(HumanloopUtilityError):
535+
with pytest.raises(HumanloopDecoratorError):
536536
call_llm_with_hl_call()
537537

538538
response = humanloop_client.directories.get(id=test_directory.id)

0 commit comments

Comments
 (0)