@@ -391,7 +391,7 @@ def _get_langfuse_data_from_kwargs(resource: OpenAiDefinition, kwargs: Any) -> A
391391
392392 if resource .type == "completion" :
393393 prompt = kwargs .get ("prompt" , None )
394- elif resource .object == "Responses" :
394+ elif resource .object == "Responses" or resource . object == "AsyncResponses" :
395395 prompt = kwargs .get ("input" , None )
396396 elif resource .type == "chat" :
397397 prompt = _extract_chat_prompt (kwargs )
@@ -672,7 +672,7 @@ def _get_langfuse_data_from_default_response(
672672
673673 completion = choice .text if _is_openai_v1 () else choice .get ("text" , None )
674674
675- elif resource .object == "Responses" :
675+ elif resource .object == "Responses" or resource . object == "AsyncResponses" :
676676 output = response .get ("output" , {})
677677
678678 if not isinstance (output , list ):
@@ -922,6 +922,7 @@ def _finalize(self) -> None:
922922 model , completion , usage , metadata = (
923923 _extract_streamed_response_api_response (self .items )
924924 if self .resource .object == "Responses"
925+ or self .resource .object == "AsyncResponses"
925926 else _extract_streamed_openai_response (self .resource , self .items )
926927 )
927928
@@ -992,6 +993,7 @@ async def _finalize(self) -> None:
992993 model , completion , usage , metadata = (
993994 _extract_streamed_response_api_response (self .items )
994995 if self .resource .object == "Responses"
996+ or self .resource .object == "AsyncResponses"
995997 else _extract_streamed_openai_response (self .resource , self .items )
996998 )
997999
0 commit comments