2222from humanloop .otel import instrument_provider
2323from humanloop .otel .exporter import HumanloopSpanExporter
2424from humanloop .otel .processor import HumanloopSpanProcessor
25- from humanloop .prompt_utils import populate_template
26- from humanloop .prompts .client import PromptsClient
2725
2826
2927class ExtendedEvalsClient (EvaluationsClient ):
@@ -70,14 +68,6 @@ def run(
7068 )
7169
7270
73- class ExtendedPromptsClient (PromptsClient ):
74- """
75- Adds utility for populating Prompt template inputs.
76- """
77-
78- populate_template = staticmethod (populate_template ) # type: ignore [assignment]
79-
80-
8171class Humanloop (BaseHumanloop ):
8272 """
8373 See docstring of :class:`BaseHumanloop`.
@@ -119,7 +109,6 @@ def __init__(
119109 eval_client = ExtendedEvalsClient (client_wrapper = self ._client_wrapper )
120110 eval_client .client = self
121111 self .evaluations = eval_client
122- self .prompts = ExtendedPromptsClient (client_wrapper = self ._client_wrapper )
123112
124113 # Overload the .log method of the clients to be aware of Evaluation Context
125114 # and the @flow decorator providing the trace_id
@@ -144,7 +133,9 @@ def __init__(
144133 )
145134
146135 if opentelemetry_tracer is None :
147- self ._opentelemetry_tracer = self ._tracer_provider .get_tracer ("humanloop.sdk" )
136+ self ._opentelemetry_tracer = self ._tracer_provider .get_tracer (
137+ "humanloop.sdk"
138+ )
148139 else :
149140 self ._opentelemetry_tracer = opentelemetry_tracer
150141
@@ -153,7 +144,7 @@ def prompt(
153144 * ,
154145 path : str ,
155146 ):
156- """Auto-instrument LLM provider and create [Prompt](https://humanloop.com/docs/explanation/prompts)
147+ """Auto-instrument LLM providers and create [Prompt](https://humanloop.com/docs/explanation/prompts)
157148 Logs on Humanloop from them.
158149
159150 ```python
0 commit comments