Skip to content

Commit ab56cdc

Browse files
committed
chore: remove unnecessary deps
1 parent 7cbca81 commit ab56cdc

6 files changed

Lines changed: 1679 additions & 5441 deletions

File tree

poetry.lock

Lines changed: 1679 additions & 5083 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pyproject.toml

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -23,37 +23,14 @@ opentelemetry-exporter-otlp = "^1.33.1"
2323

2424
[tool.poetry.group.dev.dependencies]
2525
pytest = ">=7.4,<9.0"
26-
chromadb = ">=0.4.2,<0.6.0"
27-
tiktoken = "0.7.0"
2826
pytest-timeout = "^2.1.0"
2927
pytest-xdist = "^3.3.1"
30-
respx = ">=0.20.2,<0.22.0"
31-
google-search-results = "^2.4.2"
32-
huggingface_hub = ">=0.16.4,<0.25.0"
3328
pre-commit = "^3.2.2"
34-
anthropic = ">=0.17.0,<1"
35-
bs4 = ">=0.0.1,<0.0.3"
36-
lark = "^1.1.7"
3729
pytest-asyncio = ">=0.21.1,<0.24.0"
3830
pytest-httpserver = "^1.0.8"
39-
boto3 = "^1.28.59"
4031
ruff = ">=0.1.8,<0.6.0"
4132
mypy = "^1.0.0"
42-
langchain-mistralai = ">=0.0.1,<0.3"
43-
google-cloud-aiplatform = "^1.38.1"
44-
cohere = ">=4.46,<6.0"
45-
langchain-google-vertexai = ">=1.0.0,<3.0.0"
4633
langchain-openai = ">=0.0.5,<0.3"
47-
dashscope = "^1.14.1"
48-
pymongo = "^4.6.1"
49-
llama-index-llms-anthropic = ">=0.1.1,<0.6"
50-
bson = "^0.5.10"
51-
langchain-anthropic = ">=0.1.4,<0.4"
52-
langchain-groq = ">=0.1.3,<0.3"
53-
langchain-aws = ">=0.1.3,<0.3"
54-
langchain-ollama = "^0.2.0"
55-
langchain-cohere = "^0.3.3"
56-
langchain-community = ">=0.2.14,<0.4"
5734
langgraph = "^0.2.62"
5835

5936
[tool.poetry.group.docs.dependencies]
@@ -62,7 +39,6 @@ pdoc = "^14.4.0"
6239
[tool.poetry.extras]
6340
openai = ["openai"]
6441
langchain = ["langchain"]
65-
llama-index = ["llama-index"]
6642

6743
[build-system]
6844
requires = ["poetry-core>=1.0.0"]

tests/test_extract_model.py

Lines changed: 0 additions & 158 deletions
This file was deleted.

tests/test_json.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77
from unittest.mock import patch
88

99
import pytest
10-
from bson import ObjectId
1110
from langchain.schema.messages import HumanMessage
1211
from pydantic import BaseModel
1312

@@ -129,11 +128,3 @@ def test_observation_level():
129128
result = json.dumps(ObservationLevel.ERROR, cls=EventSerializer)
130129

131130
assert result == '"ERROR"'
132-
133-
134-
def test_mongo_cursor():
135-
test_id = ObjectId("5f3e3e3e3e3e3e3e3e3e3e3e")
136-
137-
result = json.dumps(test_id, cls=EventSerializer)
138-
139-
assert isinstance(result, str)

tests/test_langchain.py

Lines changed: 0 additions & 138 deletions
Original file line numberDiff line numberDiff line change
@@ -7,20 +7,14 @@
77

88
import pytest
99
from langchain.chains import (
10-
ConversationalRetrievalChain,
1110
ConversationChain,
1211
LLMChain,
13-
RetrievalQA,
1412
SimpleSequentialChain,
1513
)
1614
from langchain.chains.openai_functions import create_openai_fn_chain
1715
from langchain.memory import ConversationBufferMemory
1816
from langchain.prompts import ChatPromptTemplate, PromptTemplate
1917
from langchain.schema import HumanMessage, SystemMessage
20-
from langchain.text_splitter import CharacterTextSplitter
21-
from langchain_community.document_loaders import TextLoader
22-
from langchain_community.embeddings import OpenAIEmbeddings
23-
from langchain_community.vectorstores import Chroma
2418
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
2519
from langchain_core.language_models.llms import LLM
2620
from langchain_core.output_parsers import StrOutputParser
@@ -35,7 +29,6 @@
3529
from langfuse._client.client import Langfuse
3630
from langfuse.langchain import CallbackHandler
3731
from langfuse.langchain.CallbackHandler import LANGSMITH_TAG_HIDDEN
38-
from tests.api_wrapper import LangfuseAPI
3932
from tests.utils import create_uuid, encode_file_to_base64, get_api
4033

4134

@@ -226,89 +219,6 @@ def test_basic_chat_openai():
226219
assert generation.output is not None
227220

228221

229-
def test_callback_retriever():
230-
langfuse = Langfuse()
231-
232-
with langfuse.start_as_current_span(name="retriever_test") as span:
233-
trace_id = span.trace_id
234-
handler = CallbackHandler()
235-
236-
loader = TextLoader("./static/state_of_the_union.txt", encoding="utf8")
237-
llm = OpenAI()
238-
239-
documents = loader.load()
240-
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
241-
texts = text_splitter.split_documents(documents)
242-
243-
embeddings = OpenAIEmbeddings()
244-
docsearch = Chroma.from_documents(texts, embeddings)
245-
246-
query = "What did the president say about Ketanji Brown Jackson"
247-
248-
chain = RetrievalQA.from_chain_type(
249-
llm,
250-
retriever=docsearch.as_retriever(),
251-
)
252-
253-
chain.run(query, callbacks=[handler])
254-
255-
langfuse.flush()
256-
257-
trace = get_api().trace.get(trace_id)
258-
259-
assert len(trace.observations) == 6
260-
for observation in trace.observations:
261-
if observation.type == "GENERATION":
262-
assert observation.usage_details["input"] > 0
263-
assert observation.usage_details["output"] > 0
264-
assert observation.usage_details["total"] > 0
265-
assert observation.input is not None
266-
assert observation.input != ""
267-
assert observation.output is not None
268-
assert observation.output != ""
269-
270-
271-
def test_callback_retriever_with_sources():
272-
langfuse = Langfuse()
273-
274-
with langfuse.start_as_current_span(name="retriever_with_sources_test") as span:
275-
trace_id = span.trace_id
276-
handler = CallbackHandler()
277-
278-
loader = TextLoader("./static/state_of_the_union.txt", encoding="utf8")
279-
llm = OpenAI()
280-
281-
documents = loader.load()
282-
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
283-
texts = text_splitter.split_documents(documents)
284-
285-
embeddings = OpenAIEmbeddings()
286-
docsearch = Chroma.from_documents(texts, embeddings)
287-
288-
query = "What did the president say about Ketanji Brown Jackson"
289-
290-
chain = RetrievalQA.from_chain_type(
291-
llm, retriever=docsearch.as_retriever(), return_source_documents=True
292-
)
293-
294-
chain(query, callbacks=[handler])
295-
296-
langfuse.flush()
297-
298-
trace = get_api().trace.get(trace_id)
299-
300-
assert len(trace.observations) == 6
301-
for observation in trace.observations:
302-
if observation.type == "GENERATION":
303-
assert observation.usage_details["input"] > 0
304-
assert observation.usage_details["output"] > 0
305-
assert observation.usage_details["total"] > 0
306-
assert observation.input is not None
307-
assert observation.input != ""
308-
assert observation.output is not None
309-
assert observation.output != ""
310-
311-
312222
def test_callback_retriever_conversational_with_memory():
313223
langfuse = Langfuse()
314224

@@ -347,54 +257,6 @@ def test_callback_retriever_conversational_with_memory():
347257
assert generation.usage_details["output"] is not None
348258

349259

350-
def test_callback_retriever_conversational():
351-
langfuse = Langfuse()
352-
353-
with langfuse.start_as_current_span(name="retriever_conversational_test") as span:
354-
trace_id = span.trace_id
355-
api_wrapper = LangfuseAPI()
356-
handler = CallbackHandler()
357-
358-
loader = TextLoader("./static/state_of_the_union.txt", encoding="utf8")
359-
360-
documents = loader.load()
361-
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
362-
texts = text_splitter.split_documents(documents)
363-
364-
embeddings = OpenAIEmbeddings(openai_api_key=os.environ.get("OPENAI_API_KEY"))
365-
docsearch = Chroma.from_documents(texts, embeddings)
366-
367-
query = "What did the president say about Ketanji Brown Jackson"
368-
369-
chain = ConversationalRetrievalChain.from_llm(
370-
ChatOpenAI(
371-
openai_api_key=os.environ.get("OPENAI_API_KEY"),
372-
temperature=0.5,
373-
model="gpt-3.5-turbo-16k",
374-
),
375-
docsearch.as_retriever(search_kwargs={"k": 6}),
376-
return_source_documents=True,
377-
)
378-
379-
chain({"question": query, "chat_history": []}, callbacks=[handler])
380-
381-
handler.client.flush()
382-
383-
trace = api_wrapper.get_trace(trace_id)
384-
385-
# Add 1 to account for the wrapping span
386-
assert len(trace["observations"]) == 6
387-
for observation in trace["observations"]:
388-
if observation["type"] == "GENERATION":
389-
assert observation["promptTokens"] > 0
390-
assert observation["completionTokens"] > 0
391-
assert observation["totalTokens"] > 0
392-
assert observation["input"] is not None
393-
assert observation["input"] != ""
394-
assert observation["output"] is not None
395-
assert observation["output"] != ""
396-
397-
398260
def test_callback_simple_openai():
399261
langfuse = Langfuse()
400262

0 commit comments

Comments
 (0)