@@ -138,6 +138,8 @@ def _call_llm_base(provider: ModelProviders, model: str, messages: list[dict]) -
138138 return _call_llm_base
139139
140140
141+ # Providers are flaky, so we retry the test
142+ @pytest .mark .flaky (retries = 3 , delay = 60 )
141143@pytest .mark .parametrize ("provider_model" , _PROVIDER_AND_MODEL )
142144def test_prompt_decorator (
143145 provider_model : tuple [str , str ],
@@ -167,6 +169,8 @@ def test_prompt_decorator(
167169 assert spans [1 ].attributes .get ("prompt" ) is None # type: ignore
168170
169171
172+ # Providers are flaky, so we retry the test
173+ @pytest .mark .flaky (retries = 3 , delay = 60 )
170174@pytest .mark .parametrize ("provider_model" , _PROVIDER_AND_MODEL )
171175def test_prompt_decorator_with_hl_processor (
172176 provider_model : tuple [str , str ],
@@ -207,6 +211,8 @@ def test_prompt_decorator_with_hl_processor(
207211 assert prompt_kernel .top_p is None
208212
209213
214+ # Providers are flaky, so we retry the test
215+ @pytest .mark .flaky (retries = 3 , delay = 60 )
210216@pytest .mark .parametrize ("provider_model" , _PROVIDER_AND_MODEL )
211217def test_prompt_decorator_with_defaults (
212218 provider_model : tuple [str , str ],
@@ -244,6 +250,8 @@ def test_prompt_decorator_with_defaults(
244250 assert prompt .model == model
245251
246252
253+ # Providers are flaky, so we retry the test
254+ @pytest .mark .flaky (retries = 3 , delay = 60 )
247255@pytest .mark .parametrize (
248256 "attributes_test_expected" ,
249257 [
0 commit comments