|
67 | 67 | ) |
68 | 68 |
|
69 | 69 | LANGCHAIN_VERSION = package_version("langchain") |
| 70 | +LANGCHAIN_OPENAI_VERSION = package_version("langchain-openai") |
70 | 71 |
|
71 | 72 |
|
72 | 73 | @tool |
@@ -170,6 +171,68 @@ def test_langchain_text_completion( |
170 | 171 | assert llm_span["data"]["gen_ai.usage.output_tokens"] == 15 |
171 | 172 |
|
172 | 173 |
|
| 174 | +def test_langchain_chat_with_run_name( |
| 175 | + sentry_init, |
| 176 | + capture_events, |
| 177 | + get_model_response, |
| 178 | + nonstreaming_chat_completions_model_response, |
| 179 | +): |
| 180 | + sentry_init( |
| 181 | + integrations=[ |
| 182 | + LangchainIntegration( |
| 183 | + include_prompts=True, |
| 184 | + ) |
| 185 | + ], |
| 186 | + traces_sample_rate=1.0, |
| 187 | + send_default_pii=True, |
| 188 | + ) |
| 189 | + events = capture_events() |
| 190 | + |
| 191 | + request_headers = {} |
| 192 | + # Changed in https://github.com/langchain-ai/langchain/pull/32655 |
| 193 | + if LANGCHAIN_OPENAI_VERSION >= (0, 3, 32): |
| 194 | + request_headers["X-Stainless-Raw-Response"] = "True" |
| 195 | + |
| 196 | + model_response = get_model_response( |
| 197 | + nonstreaming_chat_completions_model_response( |
| 198 | + response_id="chat-id", |
| 199 | + response_model="response-model-id", |
| 200 | + message_content="the model response", |
| 201 | + created=10000000, |
| 202 | + usage=CompletionUsage( |
| 203 | + prompt_tokens=20, |
| 204 | + completion_tokens=10, |
| 205 | + total_tokens=30, |
| 206 | + ), |
| 207 | + ), |
| 208 | + serialize_pydantic=True, |
| 209 | + request_headers=request_headers, |
| 210 | + ) |
| 211 | + |
| 212 | + llm = ChatOpenAI( |
| 213 | + model_name="gpt-3.5-turbo", |
| 214 | + temperature=0, |
| 215 | + openai_api_key="badkey", |
| 216 | + ) |
| 217 | + |
| 218 | + with patch.object( |
| 219 | + llm.client._client._client, |
| 220 | + "send", |
| 221 | + return_value=model_response, |
| 222 | + ) as _: |
| 223 | + with start_transaction(): |
| 224 | + llm.invoke( |
| 225 | + "How many letters in the word eudca", |
| 226 | + config={"run_name": "my-snazzy-pipeline"}, |
| 227 | + ) |
| 228 | + |
| 229 | + tx = events[0] |
| 230 | + |
| 231 | + chat_spans = list(x for x in tx["spans"] if x["op"] == "gen_ai.chat") |
| 232 | + assert len(chat_spans) == 1 |
| 233 | + assert chat_spans[0]["data"][SPANDATA.GEN_AI_FUNCTION_ID] == "my-snazzy-pipeline" |
| 234 | + |
| 235 | + |
173 | 236 | @pytest.mark.skipif( |
174 | 237 | LANGCHAIN_VERSION < (1,), |
175 | 238 | reason="LangChain 1.0+ required (ONE AGENT refactor)", |
|
0 commit comments