Skip to content

Commit 49291d0

Browse files
author
Andrei Bratu
committed
Harry PR feedback
1 parent c887f6e commit 49291d0

File tree

6 files changed

+63
-12
lines changed

6 files changed

+63
-12
lines changed

src/humanloop/decorators/flow.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import logging
2-
from functools import wraps
32
import os
43
import sys
4+
from functools import wraps
55
from typing import Any, Callable, Mapping, Optional, Sequence
66

77
from opentelemetry.sdk.trace import Span

src/humanloop/decorators/prompt.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
from opentelemetry.sdk.trace import Span
77
from opentelemetry.trace import Tracer
88

9-
109
if typing.TYPE_CHECKING:
1110
from humanloop import ToolFunctionParams
1211
from humanloop.decorators.helpers import args_to_inputs

src/humanloop/eval_utils/context.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
from contextvars import ContextVar, Token
21
from typing import Callable, TypedDict
32

43

src/humanloop/eval_utils/run.py

Lines changed: 26 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88
not be called directly.
99
"""
1010

11-
from contextvars import ContextVar
1211
import copy
1312
import inspect
1413
import json
@@ -19,6 +18,7 @@
1918
import types
2019
import typing
2120
from concurrent.futures import ThreadPoolExecutor
21+
from contextvars import ContextVar
2222
from datetime import datetime
2323
from functools import partial
2424
from logging import INFO
@@ -144,6 +144,12 @@ def _overloaded_log(
144144
kwargs[attribute] = evaluation_context[attribute] # type: ignore
145145

146146
# Call the original .log method
147+
logger.debug(
148+
"Logging %s inside _overloaded_log on Thread %s",
149+
kwargs,
150+
evaluation_context,
151+
threading.get_ident(),
152+
)
147153
response = self._log(**kwargs)
148154

149155
# Call the callback so the Evaluation can be updated
@@ -174,6 +180,7 @@ def _overloaded_log(
174180
# Replace the original log method with the overloaded one
175181
client.log = types.MethodType(_overloaded_log, client) # type: ignore
176182
# Return the client with the overloaded log method
183+
logger.debug("Overloaded the .log method of %s", client)
177184
return client
178185

179186

@@ -246,7 +253,7 @@ def run_eval(
246253
global _PROGRESS_BAR
247254

248255
if hasattr(file["callable"], "file"):
249-
# When the decorator inside `file`` is a decorated function,
256+
# When the decorator inside `file` is a decorated function,
250257
# we need to validate that the other parameters of `file`
251258
# match the attributes of the decorator
252259
inner_file: File = file["callable"].file
@@ -429,6 +436,12 @@ def run_eval(
429436
def process_datapoint(dp: Datapoint, file_id: str, file_path: str, run_id: str):
430437
def upload_callback(log: dict):
431438
"""Logic ran after the Log has been created."""
439+
logger.debug(
440+
"upload_callback on Thread %s: log %s datapoint_target %s",
441+
threading.get_ident(),
442+
log,
443+
dp.target,
444+
)
432445
_run_local_evaluators(
433446
client=client,
434447
log=log,
@@ -448,6 +461,12 @@ def upload_callback(log: dict):
448461
path=file_path,
449462
)
450463
)
464+
logger.debug(
465+
"process_datapoint on Thread %s: evaluating Datapoint %s with EvaluationContext %s",
466+
threading.get_ident(),
467+
datapoint_dict,
468+
evaluation_context_variable.get(),
469+
)
451470
log_func = _get_log_func(
452471
client=client,
453472
file_type=type_,
@@ -482,6 +501,11 @@ def upload_callback(log: dict):
482501
if context_variable is not None:
483502
# Evaluation Context has not been consumed
484503
# function_ is a plain callable so we need to create a Log
504+
logger.debug(
505+
"process_datapoint on Thread %s: function_ %s is a simple callable, context was not consumed",
506+
threading.get_ident(),
507+
function_.__name__,
508+
)
485509
log_func(
486510
inputs=datapoint.inputs,
487511
output=output,

src/humanloop/otel/exporter.py

Lines changed: 35 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
1-
import copy
1+
import contextvars
22
import json
33
import logging
4+
import threading
45
import typing
56
from queue import Empty as EmptyQueue
67
from queue import Queue
@@ -62,6 +63,7 @@ def __init__(
6263
self._shutdown: bool = False
6364
for thread in self._threads:
6465
thread.start()
66+
logger.debug("Exporter Thread %s started", thread.ident)
6567

6668
def export(self, spans: trace.Sequence[ReadableSpan]) -> SpanExportResult:
6769
def is_evaluated_file(
@@ -89,15 +91,25 @@ def is_evaluated_file(
8991
self._upload_queue.put(
9092
(
9193
span,
92-
copy.deepcopy(evaluation_context),
93-
)
94+
contextvars.copy_context()[self._client.evaluation_context_variable],
95+
),
96+
)
97+
logger.debug(
98+
"Span %s with EvaluationContext %s added to upload queue",
99+
span.attributes,
100+
contextvars.copy_context()[self._client.evaluation_context_variable],
94101
)
95102
# Reset the EvaluationContext so run eval does not
96103
# create a duplicate Log
97104
if evaluation_context is not None and is_evaluated_file(
98105
spans[0],
99106
evaluation_context,
100107
):
108+
logger.debug(
109+
"EvaluationContext %s marked as exhausted for Log in Span %s",
110+
evaluation_context,
111+
spans[0].attributes,
112+
)
101113
# Mark the EvaluationContext as used
102114
self._client.evaluation_context_variable.set(None)
103115
return SpanExportResult.SUCCESS
@@ -109,6 +121,7 @@ def shutdown(self) -> None:
109121
self._shutdown = True
110122
for thread in self._threads:
111123
thread.join()
124+
logger.debug("Exporter Thread %s joined", thread.ident)
112125

113126
def force_flush(self, timeout_millis: int = 3000) -> bool:
114127
self._shutdown = True
@@ -146,21 +159,37 @@ def _do_work(self):
146159
# Set the EvaluationContext for the thread so the .log action works as expected
147160
# NOTE: Expecting the evaluation thread to send a single span so we are
148161
# not resetting the EvaluationContext in the scope of the export thread
149-
self._client.evaluation_context_variable.set(
150-
copy.deepcopy(evaluation_context),
151-
)
162+
self._client.evaluation_context_variable.set(evaluation_context)
152163
except EmptyQueue:
153164
continue
154165
trace_metadata = TRACE_FLOW_CONTEXT.get(span_to_export.get_span_context().span_id)
155166
if trace_metadata is None:
156167
# Span is not part of a Flow Log
157168
self._export_span_dispatch(span_to_export)
169+
logger.debug(
170+
"_do_work on Thread %s: Dispatched span %s with FlowContext %s which is not part of a Flow",
171+
threading.get_ident(),
172+
span_to_export.attributes,
173+
trace_metadata,
174+
)
158175
elif trace_metadata["trace_parent_id"] is None:
159176
# Span is the head of a Flow Trace
160177
self._export_span_dispatch(span_to_export)
178+
logger.debug(
179+
"Dispatched span %s which is a Flow Log with FlowContext %s",
180+
span_to_export.attributes,
181+
trace_metadata,
182+
)
161183
elif trace_metadata["trace_parent_id"] in self._span_id_to_uploaded_log_id:
162184
# Span is part of a Flow and its parent has been uploaded
163185
self._export_span_dispatch(span_to_export)
186+
logger.debug(
187+
"_do_work on Thread %s: Dispatched span %s after its parent %s with FlowContext %s",
188+
threading.get_ident(),
189+
span_to_export.attributes,
190+
trace_metadata["trace_parent_id"],
191+
trace_metadata,
192+
)
164193
else:
165194
# Requeue the Span to be uploaded later
166195
self._upload_queue.put((span_to_export, evaluation_context))

src/humanloop/otel/helpers.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -196,7 +196,7 @@ def read_from_opentelemetry_span(span: ReadableSpan, key: str = "") -> NestedDic
196196
sub_result[part] = span_value
197197
else:
198198
if part not in sub_result:
199-
# New dict since
199+
# Create new dict for a previously unseen part of the key
200200
sub_result[part] = {}
201201
sub_result = sub_result[part] # type: ignore
202202

0 commit comments

Comments
 (0)