Skip to content

Commit f58f037

Browse files
author
Andrei Bratu
committed
type checks
1 parent 7ea0269 commit f58f037

File tree

11 files changed

+1537
-1142
lines changed

11 files changed

+1537
-1142
lines changed

poetry.lock

Lines changed: 1441 additions & 1065 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pyproject.toml

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,6 @@
1+
[project]
2+
name = "humanloop"
3+
14
[tool.poetry]
25
name = "humanloop"
36
version = "0.8.24"
@@ -27,12 +30,7 @@ packages = [
2730
{ include = "humanloop", from = "src"}
2831
]
2932

30-
[tool.poetry.group.dev.dependencies]
31-
python-dotenv = "^1.0.1"
3233

33-
chromadb = "^0.6.3"
34-
pandas = "^2.2.3"
35-
pyarrow = "^19.0.0"
3634
[project.urls]
3735
Repository = 'https://github.com/humanloop/humanloop-python'
3836

@@ -70,6 +68,9 @@ python-dotenv = "^1.0.1"
7068
replicate = "^1.0.3"
7169
ruff = "^0.5.6"
7270
types-jsonschema = "^4.23.0.20240813"
71+
chromadb="^0.3.8"
72+
pandas = "^2.2.3"
73+
pyarrow = "^19.0.0"
7374

7475
[tool.pytest.ini_options]
7576
testpaths = [ "tests" ]
@@ -84,4 +85,4 @@ line-length = 120
8485

8586
[build-system]
8687
requires = ["poetry-core"]
87-
build-backend = "poetry.core.masonry.api"
88+
build-backend = "poetry.core.masonry.api"

src/humanloop/otel/exporter.py

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
from queue import Empty as EmptyQueue
66
from queue import Queue
77
from threading import Thread
8-
from typing import Any, Optional
8+
from typing import Any, Optional, Sequence
99

1010
from opentelemetry.sdk.trace import ReadableSpan
1111
from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
@@ -94,7 +94,7 @@ def __init__(
9494
# Flow Log Span ID mapping to children Spans that must be uploaded first
9595
self._spans_left_in_trace: dict[int, set[int]] = {}
9696

97-
def export(self, spans: list[ReadableSpan]) -> SpanExportResult:
97+
def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult:
9898
if self._shutdown:
9999
logger.warning("[HumanloopSpanExporter] Shutting down, not accepting new spans")
100100
return SpanExportResult.FAILURE
@@ -205,15 +205,14 @@ def _export_span_dispatch(self, span: ReadableSpan) -> None:
205205
file_type,
206206
)
207207

208-
match file_type:
209-
case "prompt":
210-
self._export_prompt_span(span=span)
211-
case "tool":
212-
self._export_tool_span(span=span)
213-
case "flow":
214-
self._export_flow_span(span=span)
215-
case _:
216-
raise NotImplementedError(f"Unknown span type: {hl_file}")
208+
if file_type == "prompt":
209+
self._export_prompt_span(span=span)
210+
if file_type == "tool":
211+
self._export_tool_span(span=span)
212+
if file_type == "flow":
213+
self._export_flow_span(span=span)
214+
else:
215+
raise NotImplementedError(f"Unknown span type: {hl_file}")
217216

218217
def _export_prompt_span(self, span: ReadableSpan) -> None:
219218
file_object: dict[str, Any] = read_from_opentelemetry_span(
@@ -305,7 +304,7 @@ def _export_flow_span(self, span: ReadableSpan) -> None:
305304
# passed by the Processor. Each uploaded child in the trace
306305
# will check if it's the last one and mark the Flow Log as complete
307306
try:
308-
prerequisites: list[int] = read_from_opentelemetry_span(
307+
prerequisites: list[int] = read_from_opentelemetry_span( # type: ignore
309308
span=span,
310309
key=HUMANLOOP_FLOW_PREREQUISITES_KEY,
311310
)

src/humanloop/otel/helpers.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,7 @@ def pseudo_to_list(sub_dict):
244244

245245
def is_llm_provider_call(span: ReadableSpan) -> bool:
246246
"""Determines if the span was created by an Instrumentor for LLM provider clients."""
247-
if not hasattr(span, "instrumentation_scope"):
247+
if not hasattr(span, "instrumentation_scope") or span.instrumentation_scope is None:
248248
return False
249249
span_instrumentor_name = span.instrumentation_scope.name
250250
# Match against the prefix of the Instrumentor name since

src/humanloop/otel/processor/__init__.py

Lines changed: 38 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
from dataclasses import dataclass
22
import logging
33
from collections import defaultdict
4+
from typing import Optional
45

56
from opentelemetry.sdk.trace import ReadableSpan
67
from opentelemetry.sdk.trace.export import SimpleSpanProcessor, SpanExporter
@@ -81,7 +82,10 @@ def on_end(self, span: ReadableSpan) -> None:
8182
span_id = span.context.span_id
8283
if is_humanloop_span(span=span):
8384
if not self._must_wait(span):
84-
self._send_to_exporter(span, self._dependencies[span.context.span_id])
85+
self._send_to_exporter(
86+
span=span,
87+
dependencies=[dependency.span for dependency in self._dependencies[span.context.span_id]],
88+
)
8589
else:
8690
# Must wait for dependencies
8791
self._waiting[span_id] = span
@@ -93,7 +97,10 @@ def on_end(self, span: ReadableSpan) -> None:
9397

9498
waiting_span = self._get_waiting_parent(span)
9599
if waiting_span is not None:
96-
self._send_to_exporter(span, self._dependencies[span])
100+
self._send_to_exporter(
101+
span=span,
102+
dependencies=[dependency.span for dependency in self._dependencies[span.context.span_id]],
103+
)
97104
return
98105

99106
# Be unopinionated and pass all other spans to Exporter
@@ -106,18 +113,20 @@ def _must_wait(self, span: ReadableSpan) -> bool:
106113
return False
107114
return True
108115

109-
def _get_waiting_parent(self, span: ReadableSpan) -> ReadableSpan | None:
110-
parent_span_id = span.parent.span_id
116+
def _get_waiting_parent(self, span: ReadableSpan) -> Optional[ReadableSpan]:
117+
# We know this span has a parent, need to satisfy the type checker
118+
parent_span_id = span.parent.span_id # type: ignore
111119
if parent_span_id in self._waiting:
112120
if all([dependency.finished for dependency in self._dependencies[parent_span_id]]):
113121
waiting_span = self._waiting[parent_span_id]
114122
del self._dependencies[parent_span_id]
115-
del waiting_span[parent_span_id]
123+
del self._waiting[parent_span_id]
116124
return waiting_span
117125
return None
118126

119127
def _add_dependency_to_await(self, span: ReadableSpan):
120-
parent_span_id = span.parent.span_id if span.parent else None
128+
# We know this span has a parent, need to satisfy the type checker
129+
parent_span_id = span.parent.span_id # type: ignore
121130
if self._is_dependency(span):
122131
self._dependencies[parent_span_id].append(DependantSpan(span=span, finished=False))
123132

@@ -138,7 +147,8 @@ def _track_flow_traces(self, span: ReadableSpan):
138147

139148
def _mark_dependency_arrival(self, span: ReadableSpan):
140149
span_id = span.context.span_id
141-
parent_span_id = span.parent.span_id
150+
# We know this span has a parent, need to satisfy type checker
151+
parent_span_id = span.parent.span_id # type: ignore
142152
self._dependencies[parent_span_id] = [
143153
dependency if dependency.span.context.span_id != span_id else DependantSpan(span=span, finished=True)
144154
for dependency in self._dependencies[parent_span_id]
@@ -160,28 +170,27 @@ def _send_to_exporter(
160170
# Processing specific to each Humanloop File type
161171
file_type = span.attributes[HUMANLOOP_FILE_TYPE_KEY] # type: ignore
162172
span_id = span.context.span_id
163-
match file_type:
164-
case "prompt":
165-
enhance_prompt_span(
166-
prompt_span=span,
167-
dependencies=[dependency.span for dependency in dependencies],
168-
)
169-
case "tool":
170-
# No extra processing needed
171-
pass
172-
case "flow":
173-
trace = self._spans_to_complete_flow_trace.get(span_id, [])
174-
write_to_opentelemetry_span(
175-
span=span,
176-
key=HUMANLOOP_FLOW_PREREQUISITES_KEY,
177-
value=trace,
178-
)
179-
case _:
180-
logger.error(
181-
"[HumanloopSpanProcessor] Unknown Humanloop File span %s %s",
182-
span_id,
183-
span.name,
184-
)
173+
if file_type == "prompt":
174+
enhance_prompt_span(
175+
prompt_span=span,
176+
dependencies=dependencies,
177+
)
178+
elif file_type == "tool":
179+
# No extra processing needed
180+
pass
181+
elif file_type == "flow":
182+
trace = self._spans_to_complete_flow_trace.get(span_id, [])
183+
write_to_opentelemetry_span(
184+
span=span,
185+
key=HUMANLOOP_FLOW_PREREQUISITES_KEY,
186+
value=trace,
187+
)
188+
else:
189+
logger.error(
190+
"[HumanloopSpanProcessor] Unknown Humanloop File span %s %s",
191+
span_id,
192+
span.name,
193+
)
185194

186195
self.span_exporter.export([span])
187196

src/humanloop/otel/processor/prompts.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
import logging
12
from typing import Any
23
from opentelemetry.sdk.trace import ReadableSpan
34
from pydantic import ValidationError as PydanticValidationError
@@ -10,6 +11,8 @@
1011
)
1112
from humanloop.types.prompt_kernel_request import PromptKernelRequest
1213

14+
logger = logging.getLogger("humanloop.sdk")
15+
1316

1417
def enhance_prompt_span(prompt_span: ReadableSpan, dependencies: list[ReadableSpan]):
1518
"""Add information from the LLM provider span to the Prompt span.
@@ -57,7 +60,7 @@ def _enrich_prompt_kernel(prompt_span: ReadableSpan, llm_provider_call_span: Rea
5760

5861
try:
5962
# Validate the Prompt Kernel
60-
PromptKernelRequest.model_validate(obj=prompt)
63+
PromptKernelRequest.model_validate(obj=prompt) # type: ignore
6164
except PydanticValidationError as e:
6265
logger.error(
6366
"[HumanloopSpanProcessor] Could not validate Prompt Kernel extracted from span: %s %s. Error: %s",

tests/integration/chat_agent/conftest.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
from dataclasses import dataclass
2-
from typing import Callable
2+
from typing import Any, Callable
33

44
import pytest
55

@@ -44,7 +44,7 @@ def surfer_agent_scenario(
4444
client = OpenAI(api_key=api_keys.openai)
4545

4646
@humanloop_client.tool(path=get_test_path("Calculator"))
47-
def calculator(operation: str, num1: int, num2: int) -> str:
47+
def calculator(operation: str, num1: int, num2: int) -> float:
4848
"""Do arithmetic operations on two numbers."""
4949
if operation == "add":
5050
return num1 + num2
@@ -70,8 +70,8 @@ def pick_random_number():
7070
calculator.json_schema,
7171
],
7272
)
73-
def call_agent(messages: list[str]) -> str:
74-
output = client.chat.completions.create(
73+
def call_agent(messages: list[dict[str, Any]]) -> str: # type: ignore [call-arg]
74+
output = client.chat.completions.create( # type: ignore [call-overload]
7575
model="gpt-4o-mini",
7676
messages=messages,
7777
# Use .json_schema property on decorated functions to easily access

tests/integration/chat_agent/test_chat_agent.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
import time
99
from typing import ContextManager, TextIO
1010
from unittest.mock import MagicMock, patch
11-
from src.humanloop import Humanloop
11+
from humanloop import Humanloop
1212
from tests.integration.chat_agent.conftest import SurferAgentScenario
1313
from tests.integration.conftest import DirectoryIdentifiers
1414

@@ -27,7 +27,7 @@ def test_scenario_runs(
2727
"exit",
2828
]
2929
mocked_input.side_effect = scenario_io
30-
with capture_stdout() as console_output:
30+
with capture_stdout() as console_output: # type: ignore [operator]
3131
surfer_agent_scenario.agent_chat_workflow()
3232

3333
time.sleep(5)
@@ -46,7 +46,9 @@ def test_scenario_runs(
4646
# List will not pass the children to the trace_children attribute
4747
assert len(flow_log.trace_children) == 0
4848
response = humanloop_client.logs.get(flow_log.id)
49-
assert response["trace_status"] == "complete"
49+
if not isinstance(response, dict):
50+
response = response.dict()
51+
assert response["trace_status"] == "complete" # type: ignore [attr-defined]
5052
assert len(response["trace_children"]) == 2
5153
messages = response["trace_children"][1]["messages"]
5254
assert len(messages) == 4

tests/integration/conftest.py

Lines changed: 19 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
from typing import Generator, TextIO
1111
import pytest
1212
from dotenv import load_dotenv
13-
from src.humanloop import Humanloop
13+
from humanloop import Humanloop
1414

1515

1616
@dataclass
@@ -49,9 +49,13 @@ def root_integration_directory(humanloop_client: Humanloop) -> Generator[str, No
4949

5050
@pytest.fixture(scope="session")
5151
def api_keys() -> APIKeys:
52+
openai_key = os.getenv("OPENAI_API_KEY")
53+
humanloop_key = os.getenv("HUMANLOOP_API_KEY")
54+
if openai_key is None or humanloop_key is None:
55+
raise ValueError("API keys are not set in .env file")
5256
api_keys = APIKeys(
53-
openai=os.getenv("OPENAI_API_KEY"),
54-
humanloop=os.getenv("HUMANLOOP_API_KEY"),
57+
openai=openai_key,
58+
humanloop=humanloop_key,
5559
)
5660
for key, value in asdict(api_keys).items():
5761
if value is None:
@@ -68,17 +72,18 @@ def _directory_cleanup(directory_id: str, humanloop_client: Humanloop):
6872
response = humanloop_client.directories.get(directory_id)
6973
for file in response.files:
7074
file_id = file.id
71-
match file.type:
72-
case "prompt":
73-
client = humanloop_client.prompts
74-
case "tool":
75-
client = humanloop_client.tools
76-
case "dataset":
77-
client = humanloop_client.datasets
78-
case "evaluator":
79-
client = humanloop_client.evaluators
80-
case "flow":
81-
client = humanloop_client.flows
75+
if file.type == "prompt":
76+
client = humanloop_client.prompts # type: ignore [assignment]
77+
elif file.type == "tool":
78+
client = humanloop_client.tools # type: ignore [assignment]
79+
elif file.type == "dataset":
80+
client = humanloop_client.datasets # type: ignore [assignment]
81+
elif file.type == "evaluator":
82+
client = humanloop_client.evaluators # type: ignore [assignment]
83+
elif file.type == "flow":
84+
client = humanloop_client.flows # type: ignore [assignment]
85+
else:
86+
raise NotImplementedError(f"Unknown HL file type {file.type}")
8287
client.delete(file_id)
8388

8489
for subdirectory in response.subdirectories:

tests/integration/evaluate_medqa/conftest.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -47,17 +47,17 @@ class MedQAScenario:
4747

4848
@pytest.fixture()
4949
def evaluate_medqa_scenario(
50-
humanloop_client: Humanloop,
50+
humanloop_client: "Humanloop",
5151
get_test_path: Callable[[str], str],
5252
api_keys: APIKeys,
5353
medqa_knowledge_base_path: str,
5454
medqa_dataset_path: str,
5555
) -> MedQAScenario:
5656
import inspect
5757

58-
from chromadb import chromadb
58+
from chromadb import chromadb # type: ignore
5959
from openai import OpenAI
60-
import pandas as pd
60+
import pandas as pd # type: ignore
6161

6262
chroma = chromadb.Client()
6363
collection = chroma.get_or_create_collection(name="MedQA")

0 commit comments

Comments
 (0)