Skip to content

Commit d1ba80f

Browse files
Second task
1 parent 288a76b commit d1ba80f

10 files changed

Lines changed: 169 additions & 95 deletions

File tree

backend/__init__.py

Whitespace-only changes.
Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
\# Popes Education and Secular Jobs
2+
3+
4+
5+
\## Pope Francis (Jorge Mario Bergoglio)
6+
7+
\- Education: Philosophy and Theology
8+
9+
\- Secular jobs: Chemistry lab technician, literature teacher
10+
11+
12+
13+
\## Pope Benedict XVI (Joseph Ratzinger)
14+
15+
\- Education: Philosophy and Theology
16+
17+
\- Secular jobs: None (mostly academic)
18+
19+
20+
21+
\## Pope John Paul II (Karol Wojtyła)
22+
23+
\- Education: Philosophy, Theology, Literature
24+
25+
\- Secular jobs: Actor, poet
26+
27+
28+
29+
\## Pope John Paul I (Albino Luciani)
30+
31+
\- Education: Theology
32+
33+
\- Secular jobs: None
34+
35+
36+
37+
\## Pope Paul VI (Giovanni Battista Montini)
38+
39+
\- Education: Theology, Canon Law
40+
41+
\- Secular jobs: Journalist (early career)
42+
43+
44+
Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
# LangGraph Interrupt Example
2+
3+
LangGraph is a Python library for building stateful LLM applications
4+
using graphs or functional workflows.
5+
6+
This document shows how interrupts can be implemented
7+
using both the Functional API and the Graph API.
8+
9+
## Functional API
10+
11+
The Functional API allows defining agent logic as a Python function.
12+
Interrupts are typically implemented using control flow statements.
13+
14+
```python
15+
from langgraph.functional import agent
16+
17+
def my_agent():
18+
while True:
19+
inp = input(">>> ")
20+
if inp == "STOP":
21+
return "Interrupted"
22+
print(f"Echo: {inp}")

backend/examples/cli_research.py

Lines changed: 19 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -1,43 +1,29 @@
1+
import os
12
import argparse
2-
from langchain_core.messages import HumanMessage
33
from agent.graph import graph
4+
from agent.state import OverallState
5+
from langchain_core.messages import HumanMessage
46

5-
6-
def main() -> None:
7-
"""Run the research agent from the command line."""
8-
parser = argparse.ArgumentParser(description="Run the LangGraph research agent")
9-
parser.add_argument("question", help="Research question")
10-
parser.add_argument(
11-
"--initial-queries",
12-
type=int,
13-
default=3,
14-
help="Number of initial search queries",
15-
)
16-
parser.add_argument(
17-
"--max-loops",
18-
type=int,
19-
default=2,
20-
help="Maximum number of research loops",
21-
)
22-
parser.add_argument(
23-
"--reasoning-model",
24-
default="gemini-2.5-pro-preview-05-06",
25-
help="Model for the final answer",
26-
)
7+
def main():
8+
parser = argparse.ArgumentParser()
9+
parser.add_argument("question", nargs="?", default=None, help="Question to ask")
10+
parser.add_argument("--dir", required=True, help="Directory for local Markdown sources")
11+
parser.add_argument("--loops", type=int, default=3, help="Max research loops")
2712
args = parser.parse_args()
2813

29-
state = {
30-
"messages": [HumanMessage(content=args.question)],
31-
"initial_search_query_count": args.initial_queries,
32-
"max_research_loops": args.max_loops,
33-
"reasoning_model": args.reasoning_model,
34-
}
14+
state = OverallState(
15+
messages=[HumanMessage(content=args.question or "")],
16+
search_dir=args.dir,
17+
max_research_loops=args.loops,
18+
research_loop_count=0,
19+
is_sufficient=False,
20+
)
3521

3622
result = graph.invoke(state)
37-
messages = result.get("messages", [])
38-
if messages:
39-
print(messages[-1].content)
23+
24+
for msg in result["messages"]:
25+
print("\n" + msg.content)
4026

4127

4228
if __name__ == "__main__":
43-
main()
29+
main()

backend/src/__init__.py

Whitespace-only changes.

backend/src/agent/graph.py

Lines changed: 21 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,12 @@
11
from langgraph.graph import StateGraph, START, END
2-
from langchain_core.messages import AIMessage, HumanMessage
2+
from langchain_core.messages import AIMessage
33

44
from agent.state import OverallState
55
from agent.tools_and_schemas import SearchQueryList, Reflection
66
from agent.llm.groq import GroqLLM
77

8+
from .search.local_markdown import search_markdown
9+
810
llm = GroqLLM()
911

1012

@@ -17,31 +19,41 @@ def generate_search_queries(state: OverallState):
1719
queries = llm.invoke_structured(prompt, SearchQueryList)
1820

1921
return {
20-
"search_query": queries.query
22+
"search_query": [q.model_dump() for q in queries.query]
2123
}
2224

2325

2426
def web_research(state: OverallState):
25-
# ❗ тимчасовий fake research
26-
results = [f"Result for: {q}" for q in state["search_query"]]
27+
all_results = []
28+
29+
search_dir = state.get("search_dir")
30+
if not search_dir:
31+
return {"web_research_result": [], "sources_gathered": []}
32+
33+
for q in state["search_query"]:
34+
query_text = q["query"]
35+
matches = search_markdown(search_dir, query_text)
36+
all_results.extend(matches)
2737

2838
return {
29-
"web_research_result": results
39+
"web_research_result": all_results,
40+
"sources_gathered": []
3041
}
3142

3243

3344
def reflect(state: OverallState):
3445
prompt = (
35-
f"Question: {state['messages'][-1].content}\n\n"
36-
f"Research results:\n" + "\n".join(state["web_research_result"])
46+
f"Question:\n{state['messages'][-1].content}\n\n"
47+
f"Research:\n" + "\n".join(state["web_research_result"])
3748
)
3849

3950
reflection = llm.invoke_structured(prompt, Reflection)
4051

4152
return {
4253
"is_sufficient": reflection.is_sufficient,
43-
"knowledge_gap": reflection.knowledge_gap,
44-
"follow_up_queries": reflection.follow_up_queries,
54+
"follow_up_queries": [
55+
q.model_dump() for q in reflection.follow_up_queries
56+
]
4557
}
4658

4759

backend/src/agent/search/__init__.py

Whitespace-only changes.
Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
from pathlib import Path
2+
import re
3+
from typing import List
4+
5+
6+
def text_to_paragraphs(text: str):
7+
8+
return [p.strip() for p in text.split("\n\n") if p.strip()]
9+
10+
def score_paragraph(query_tokens, paragraph):
11+
12+
p = paragraph.lower()
13+
return sum(1 for token in query_tokens if token in p)
14+
15+
def search_markdown(search_dir: str, query: str) -> List[str]:
16+
"""
17+
Very simple local markdown search:
18+
- scans all .md files
19+
- returns paragraphs containing query keywords
20+
"""
21+
22+
results: List[str] = []
23+
path = Path(search_dir)
24+
25+
if not path.exists():
26+
return results
27+
28+
keywords = [w.lower() for w in query.split() if len(w) > 3]
29+
30+
for md_file in path.rglob("*.md"):
31+
text = md_file.read_text(encoding="utf-8", errors="ignore")
32+
paragraphs = text.split("\n\n")
33+
34+
for p in paragraphs:
35+
content = p.strip()
36+
if not content:
37+
continue
38+
39+
score = sum(k in content.lower() for k in keywords)
40+
if score >= 1:
41+
results.append(
42+
f"[{md_file.name}]\n{content}"
43+
)
44+
45+
return results

backend/src/agent/state.py

Lines changed: 9 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -1,50 +1,21 @@
11
from __future__ import annotations
22

3-
from dataclasses import dataclass, field
4-
from typing import TypedDict
5-
6-
from langgraph.graph import add_messages
3+
from typing import TypedDict, List
74
from typing_extensions import Annotated
8-
9-
5+
from langgraph.graph import add_messages
106
import operator
117

128

13-
14-
159
class OverallState(TypedDict):
1610
messages: Annotated[list, add_messages]
17-
search_query: Annotated[list, operator.add]
18-
web_research_result: Annotated[list, operator.add]
19-
sources_gathered: Annotated[list, operator.add]
20-
initial_search_query_count: int
21-
max_research_loops: int
22-
research_loop_count: int
23-
reasoning_model: str
2411

12+
search_query: Annotated[List[dict], operator.add]
13+
web_research_result: Annotated[List[str], operator.add]
14+
sources_gathered: Annotated[List[str], operator.add]
2515

26-
class ReflectionState(TypedDict):
27-
is_sufficient: bool
28-
knowledge_gap: str
29-
follow_up_queries: Annotated[list, operator.add]
16+
initial_search_query_count: int
17+
max_research_loops: int
3018
research_loop_count: int
31-
number_of_ran_queries: int
32-
33-
34-
class Query(TypedDict):
35-
query: str
36-
rationale: str
37-
3819

39-
class QueryGenerationState(TypedDict):
40-
search_query: list[Query]
41-
42-
43-
class WebSearchState(TypedDict):
44-
search_query: str
45-
id: str
46-
47-
48-
@dataclass(kw_only=True)
49-
class SearchStateOutput:
50-
running_summary: str = field(default=None) # Final report
20+
reasoning_model: str
21+
search_dir: str

backend/src/agent/tools_and_schemas.py

Lines changed: 9 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -2,22 +2,16 @@
22
from pydantic import BaseModel, Field
33

44

5+
class SearchQuery(BaseModel):
6+
query: str
7+
rationale: str
8+
9+
510
class SearchQueryList(BaseModel):
6-
query: List[str] = Field(
7-
description="A list of search queries to be used for web research."
8-
)
9-
rationale: str = Field(
10-
description="A brief explanation of why these queries are relevant to the research topic."
11-
)
11+
query: List[SearchQuery]
1212

1313

1414
class Reflection(BaseModel):
15-
is_sufficient: bool = Field(
16-
description="Whether the provided summaries are sufficient to answer the user's question."
17-
)
18-
knowledge_gap: str = Field(
19-
description="A description of what information is missing or needs clarification."
20-
)
21-
follow_up_queries: List[str] = Field(
22-
description="A list of follow-up queries to address the knowledge gap."
23-
)
15+
is_sufficient: bool = Field(..., description="Is research sufficient?")
16+
knowledge_gap: str = Field(..., description="What is missing?")
17+
follow_up_queries: List[SearchQuery] = Field(default_factory=list)

0 commit comments

Comments
 (0)