Skip to content

Commit ee10e5a

Browse files
author
Andrei Bratu
committed
Use /otel endpoint to process endpoints
1 parent ab78ce2 commit ee10e5a

File tree

12 files changed

+142
-96
lines changed

12 files changed

+142
-96
lines changed

src/humanloop/client.py

Lines changed: 18 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -8,15 +8,16 @@
88
from opentelemetry.sdk.trace import TracerProvider
99
from opentelemetry.trace import Tracer
1010

11+
from humanloop.context import PromptContext, reset_prompt_context, set_prompt_context
1112
from humanloop.core.client_wrapper import SyncClientWrapper
12-
from humanloop.eval_utils.run import prompt_call_evaluation_aware
1313

14-
from humanloop.eval_utils import log_with_evaluation_context, run_eval
14+
from humanloop.eval_utils import run_eval
1515
from humanloop.eval_utils.types import Dataset, Evaluator, EvaluatorCheck, File
1616

1717
from humanloop.base_client import AsyncBaseHumanloop, BaseHumanloop
18+
from humanloop.overload import overload_call, overload_log
1819
from humanloop.utilities.flow import flow as flow_decorator_factory
19-
from humanloop.utilities.prompt import prompt as prompt_decorator_factory
20+
from humanloop.utilities.prompt import prompt
2021
from humanloop.utilities.tool import tool as tool_decorator_factory
2122
from humanloop.environment import HumanloopEnvironment
2223
from humanloop.evaluations.client import EvaluationsClient
@@ -25,8 +26,6 @@
2526
from humanloop.otel.processor import HumanloopSpanProcessor
2627
from humanloop.prompt_utils import populate_template
2728
from humanloop.prompts.client import PromptsClient
28-
from humanloop.requests.flow_kernel_request import FlowKernelRequestParams
29-
from humanloop.requests.tool_kernel_request import ToolKernelRequestParams
3029

3130

3231
class ExtendedEvalsClient(EvaluationsClient):
@@ -119,9 +118,10 @@ def __init__(
119118
self.prompts = ExtendedPromptsClient(client_wrapper=self._client_wrapper)
120119

121120
# Overload the .log method of the clients to be aware of Evaluation Context
122-
self.prompts = log_with_evaluation_context(client=self.prompts)
123-
self.prompts = prompt_call_evaluation_aware(client=self.prompts)
124-
self.flows = log_with_evaluation_context(client=self.flows)
121+
self.prompts = overload_log(client=self.prompts)
122+
self.prompts = overload_call(client=self.prompts)
123+
self.flows = overload_log(client=self.flows)
124+
self.tools = overload_log(client=self.tools)
125125

126126
if opentelemetry_tracer_provider is not None:
127127
self._tracer_provider = opentelemetry_tracer_provider
@@ -135,10 +135,7 @@ def __init__(
135135
)
136136
instrument_provider(provider=self._tracer_provider)
137137
self._tracer_provider.add_span_processor(
138-
HumanloopSpanProcessor(
139-
client=self,
140-
exporter=HumanloopSpanExporter(client=self),
141-
),
138+
HumanloopSpanProcessor(exporter=HumanloopSpanExporter(client=self)),
142139
)
143140

144141
if opentelemetry_tracer is None:
@@ -151,6 +148,7 @@ def prompt(
151148
self,
152149
*,
153150
path: str,
151+
template: Optional[str] = None,
154152
):
155153
"""Decorator for declaring a [Prompt](https://humanloop.com/docs/explanation/prompts) in code.
156154
@@ -225,14 +223,16 @@ def call_llm(messages):
225223
226224
:param prompt_kernel: Attributes that define the Prompt. See `class:DecoratorPromptKernelRequestParams`
227225
"""
228-
return prompt_decorator_factory(path=path)
226+
227+
with prompt(path=path, template=template):
228+
yield
229229

230230
def tool(
231231
self,
232232
*,
233233
path: str,
234-
attributes: dict[str, Any] | None = None,
235-
setup_values: dict[str, Any] | None = None,
234+
attributes: Optional[dict[str, Any]] = None,
235+
setup_values: Optional[dict[str, Any]] = None,
236236
):
237237
"""Decorator for declaring a [Tool](https://humanloop.com/docs/explanation/tools) in code.
238238
@@ -312,8 +312,8 @@ def calculator(a: int, b: Optional[int]) -> int:
312312
def flow(
313313
self,
314314
*,
315-
path: str = None,
316-
attributes: dict[str, Any] | None = None,
315+
path: str,
316+
attributes: Optional[dict[str, Any]] = None,
317317
):
318318
"""Decorator for declaring a [Flow](https://humanloop.com/docs/explanation/flows) in code.
319319
@@ -366,6 +366,7 @@ def entrypoint():
366366
:param flow_kernel: Attributes that define the Flow. See `class:ToolKernelRequestParams`
367367
"""
368368
return flow_decorator_factory(
369+
client=self,
369370
opentelemetry_tracer=self._opentelemetry_tracer,
370371
path=path,
371372
attributes=attributes,

src/humanloop/context.py

Lines changed: 17 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515

1616
def get_trace_id() -> Optional[str]:
1717
key = hash((HUMANLOOP_CONTEXT_TRACE_ID, threading.get_ident()))
18-
context_api.get_value()
1918
return context_api.get_value(key=key)
2019

2120

@@ -28,16 +27,27 @@ def reset_trace_id_context(token: ResetToken):
2827
context_api.detach(token=token)
2928

3029

31-
def set_prompt_path(path: str) -> ResetToken:
30+
@dataclass
31+
class PromptContext:
32+
path: str
33+
template: Optional[str]
34+
35+
36+
def set_prompt_context(prompt_context: PromptContext) -> ResetToken:
3237
key = hash((HUMANLOOP_CONTEXT_PROMPT_PATH, threading.get_ident()))
33-
return context_api.set_value(key=key, value=path)
38+
return context_api.attach(
39+
context_api.set_value(
40+
key=key,
41+
value=prompt_context,
42+
)
43+
)
3444

3545

36-
def reset_prompt_path(token: ResetToken):
46+
def reset_prompt_context(token: ResetToken):
3747
context_api.detach(token=token)
3848

3949

40-
def get_prompt_path() -> Optional[str]:
50+
def get_prompt_context() -> Optional[PromptContext]:
4151
key = hash((HUMANLOOP_CONTEXT_PROMPT_PATH, threading.get_ident()))
4252
return context_api.get_value(key)
4353

@@ -51,9 +61,9 @@ class EvaluationContext:
5161
path: str
5262

5363

54-
def set_evaluation_context(evaluation_context: EvaluationContext):
64+
def set_evaluation_context(evaluation_context: EvaluationContext) -> ResetToken:
5565
key = hash((HUMANLOOP_CONTEXT_EVALUATION, threading.get_ident()))
56-
context_api.set_value(key, evaluation_context)
66+
return context_api.attach(context_api.set_value(key, evaluation_context))
5767

5868

5969
def get_evaluation_context() -> Optional[EvaluationContext]:
Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from .run import log_with_evaluation_context, run_eval
1+
from .run import run_eval
22
from .types import File
33

4-
__all__ = ["run_eval", "log_with_evaluation_context", "File"]
4+
__all__ = ["run_eval", "File"]

src/humanloop/eval_utils/run.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -166,7 +166,7 @@ def upload_callback(log_id: str):
166166
set_evaluation_context(
167167
EvaluationContext(
168168
source_datapoint_id=dp.id,
169-
upload_callback=upload_callback,
169+
callback=upload_callback,
170170
file_id=hl_file.id,
171171
run_id=run.id,
172172
path=hl_file.path,
@@ -741,32 +741,32 @@ def _run_local_evaluators(
741741
else:
742742
log_dict = log
743743
datapoint_dict = datapoint.dict() if datapoint else None
744-
for local_evaluator, eval_function in local_evaluators:
744+
for local_evaluator in local_evaluators:
745745
start_time = datetime.now()
746746
try:
747-
if local_evaluator.spec.arguments_type == "target_required":
748-
judgement = eval_function(
747+
if local_evaluator.hl_evaluator.spec.arguments_type == "target_required":
748+
judgement = local_evaluator.function(
749749
log_dict,
750750
datapoint_dict,
751751
)
752752
else:
753-
judgement = eval_function(log_dict)
753+
judgement = local_evaluator.function(log_dict)
754754

755755
_ = client.evaluators.log(
756-
version_id=local_evaluator.version_id,
756+
version_id=local_evaluator.hl_evaluator.version_id,
757757
parent_id=log_id,
758758
judgment=judgement,
759-
id=local_evaluator.id,
759+
id=local_evaluator.hl_evaluator.id,
760760
start_time=start_time,
761761
end_time=datetime.now(),
762762
)
763763
except Exception as e:
764764
_ = client.evaluators.log(
765765
parent_id=log_id,
766-
id=local_evaluator.id,
766+
id=local_evaluator.hl_evaluator.id,
767767
error=str(e),
768768
start_time=start_time,
769769
end_time=datetime.now(),
770770
)
771-
logger.warning(f"\nEvaluator {local_evaluator.path} failed with error {str(e)}")
771+
logger.warning(f"\nEvaluator {local_evaluator.hl_evaluator.path} failed with error {str(e)}")
772772
progress_bar.increment()

src/humanloop/otel/constants.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,10 +5,7 @@
55
HUMANLOOP_LOG_ID_KEY = "humanloop.log_id"
66
HUMANLOOP_FILE_TYPE_KEY = "humanloop.file.type"
77
HUMANLOOP_PATH_KEY = "humanloop.file.path"
8-
# Required for the exporter to know when to mark the Flow Log as complete
9-
HUMANLOOP_FLOW_PREREQUISITES_KEY = "humanloop.flow.prerequisites"
10-
HUMANLOOP_INTERCEPTED_PROMPT_CALL_SPAN_NAME = "humanloop_intercepted_hl_call"
11-
HUMANLOOP_INTERCEPTED_PROMPT_CALL_RESPONSE = "intercepted_call_response"
8+
# Opentelemetry context
129
HUMANLOOP_CONTEXT_PROMPT_PATH = "humanloop.context.prompt.path"
1310
HUMANLOOP_CONTEXT_TRACE_ID = "humanloop.context.flow.trace_id"
1411
HUMANLOOP_CONTEXT_EVALUATION = "humanloop.context.evaluation"

src/humanloop/otel/exporter.py

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
from opentelemetry.sdk.trace import ReadableSpan
1010
from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
1111

12+
import requests
1213
from humanloop.context import get_evaluation_context, EvaluationContext
1314
from humanloop.otel.constants import (
1415
HUMANLOOP_FILE_TYPE_KEY,
@@ -156,8 +157,17 @@ def _do_work(self):
156157
value=log_args,
157158
)
158159

159-
id = self._client.export(span_to_export.to_json())
160-
if evaluation_context:
161-
evaluation_context.callback(id)
160+
response = requests.post(
161+
f"{self._client._client_wrapper.get_base_url()}/import/otel",
162+
headers=self._client._client_wrapper.get_headers(),
163+
data=span_to_export.to_json().encode("ascii"),
164+
)
165+
if response.status_code != 200:
166+
# TODO: handle
167+
pass
168+
else:
169+
if evaluation_context and span_file_path == evaluation_context.path:
170+
log_id = response.json()["log_id"]
171+
evaluation_context.callback(log_id)
162172

163173
self._upload_queue.task_done()

src/humanloop/otel/helpers.py

Lines changed: 1 addition & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55
from opentelemetry.trace import SpanKind
66
from opentelemetry.util.types import AttributeValue
77

8-
from humanloop.otel.constants import HUMANLOOP_INTERCEPTED_PROMPT_CALL_SPAN_NAME
98

109
NestedDict = dict[str, Union["NestedDict", AttributeValue]]
1110
NestedList = list[Union["NestedList", NestedDict]]
@@ -178,12 +177,6 @@ def read_from_opentelemetry_span(span: ReadableSpan, key: str = "") -> NestedDic
178177
# Remove the key prefix and the first dot
179178
to_process.append((span_key, span_value))
180179

181-
if not to_process:
182-
if key == "":
183-
# Empty span attributes
184-
return result
185-
raise KeyError(f"Key {key} not found in span attributes")
186-
187180
for span_key, span_value in to_process: # type: ignore
188181
parts = span_key.split(".")
189182
len_parts = len(parts)
@@ -264,10 +257,6 @@ def is_llm_provider_call(span: ReadableSpan) -> bool:
264257
)
265258

266259

267-
def is_intercepted_call(span: ReadableSpan) -> bool:
268-
return span.name == HUMANLOOP_INTERCEPTED_PROMPT_CALL_SPAN_NAME
269-
270-
271260
def is_humanloop_span(span: ReadableSpan) -> bool:
272261
"""Check if the Span was created by the Humanloop SDK."""
273262
return span.name.startswith("humanloop.")
@@ -285,7 +274,7 @@ def module_is_installed(module_name: str) -> bool:
285274
return True
286275

287276

288-
def jsonify_if_not_string(func: Callable, output: Any) -> str:
277+
def process_output(func: Callable, output: Any) -> str:
289278
if not isinstance(output, str):
290279
try:
291280
output = json.dumps(output)

src/humanloop/otel/processor.py

Lines changed: 20 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,11 @@
55
from opentelemetry.sdk.trace import ReadableSpan, Span
66
from opentelemetry.sdk.trace.export import SimpleSpanProcessor, SpanExporter
77

8-
from humanloop.context import get_prompt_path
8+
from humanloop.context import get_prompt_context, get_trace_id
99
from humanloop.otel.constants import (
10+
HUMANLOOP_FILE_KEY,
11+
HUMANLOOP_FILE_TYPE_KEY,
12+
HUMANLOOP_LOG_KEY,
1013
HUMANLOOP_PATH_KEY,
1114
)
1215
from humanloop.otel.helpers import is_llm_provider_call
@@ -39,10 +42,21 @@ class HumanloopSpanProcessor(SimpleSpanProcessor):
3942
def __init__(self, exporter: SpanExporter) -> None:
4043
super().__init__(exporter)
4144

42-
def on_start(self, span: Span):
45+
def on_start(self, span: Span, parent_context):
4346
if is_llm_provider_call(span):
44-
prompt_path = get_prompt_path()
45-
if prompt_path:
46-
span.set_attribute(HUMANLOOP_PATH_KEY, prompt_path)
47+
context = get_prompt_context()
48+
prompt_path, prompt_template = context.path, context.template
49+
if context:
50+
span.set_attribute(HUMANLOOP_PATH_KEY, context.path)
51+
span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, "prompt")
52+
if prompt_template:
53+
span.set_attribute(
54+
f"{HUMANLOOP_FILE_KEY}.template",
55+
prompt_template,
56+
)
4757
else:
48-
raise ValueError("Provider call outside @prompt context manager")
58+
raise ValueError(f"Provider call outside @prompt context manager: {prompt_path}")
59+
trace_id = get_trace_id()
60+
if trace_id:
61+
span.set_attribute(f"{HUMANLOOP_LOG_KEY}.trace_parent_id", trace_id)
62+
print(span)

src/humanloop/overload.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
logger = logging.getLogger("humanloop.sdk")
1818

1919

20-
CLIENT_TYPE = TypeVar("CLIENT_TYPE", PromptsClient, FlowsClient)
20+
CLIENT_TYPE = TypeVar("CLIENT_TYPE")
2121

2222

2323
def overload_log(client: CLIENT_TYPE) -> CLIENT_TYPE:
@@ -51,8 +51,7 @@ def _overload_log(
5151
"trace_parent_id": trace_id,
5252
}
5353
try:
54-
response = self._call(**kwargs)
55-
response = typing.cast(PromptCallResponse, response)
54+
response = self._log(**kwargs)
5655
except Exception as e:
5756
# TODO handle
5857
# TODO: Bug found in backend: not specifying a model 400s but creates a File
@@ -67,7 +66,7 @@ def _overload_log(
6766
return client
6867

6968

70-
def overload_prompt_call(client: PromptsClient) -> PromptsClient:
69+
def overload_call(client: PromptsClient) -> PromptsClient:
7170
client._call = client.call
7271

7372
def _overload_call(self, **kwargs) -> PromptCallResponse:

0 commit comments

Comments
 (0)