Skip to content

Commit 9f29f60

Browse files
wesmclaude
andauthored
Add token usage tracking via agentsview (#525)
## Summary - Add `internal/tokens` package that shells out to `agentsview token-use <session-id>` to fetch per-session token consumption (peak context tokens + total output tokens) - Store token usage as a JSON blob in `review_jobs.token_usage` (SQLite migration + PostgreSQL v8 schema) - Collect token usage automatically after each job completes in the worker pool (best-effort, skips resumed sessions to avoid cumulative overcounting) - Thread `token_usage` through PostgreSQL sync (push and pull paths) so data is available across machines - Display token summary in TUI review screen header and `roborev show` CLI output (e.g. `118.0k ctx · 28.8k out`) - Add `roborev backfill-tokens` command to retroactively fetch token data for completed jobs that have session IDs but no stored usage 🤖 Generated with [Claude Code](https://claude.com/claude-code) --------- Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
1 parent 6ae4067 commit 9f29f60

15 files changed

Lines changed: 752 additions & 28 deletions

File tree

cmd/roborev/backfill_tokens.go

Lines changed: 142 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,142 @@
1+
package main
2+
3+
import (
4+
"context"
5+
"fmt"
6+
"log"
7+
"time"
8+
9+
"github.com/roborev-dev/roborev/internal/storage"
10+
"github.com/roborev-dev/roborev/internal/tokens"
11+
"github.com/spf13/cobra"
12+
)
13+
14+
func backfillTokensCmd() *cobra.Command {
15+
var dryRun bool
16+
17+
cmd := &cobra.Command{
18+
Use: "backfill-tokens",
19+
Short: "Backfill token usage for completed jobs via agentsview",
20+
Long: `Scan completed jobs that have a session ID but no token usage data,
21+
and attempt to fetch token consumption from agentsview.
22+
23+
This is best-effort: jobs whose session files have been deleted
24+
will be skipped.`,
25+
RunE: func(cmd *cobra.Command, args []string) error {
26+
db, err := storage.Open(storage.DefaultDBPath())
27+
if err != nil {
28+
return fmt.Errorf("open database: %w", err)
29+
}
30+
defer db.Close()
31+
32+
// Query all jobs (no status filter) and filter for
33+
// terminal states that could have token data.
34+
jobs, err := db.ListJobs("", "", 0, 0)
35+
if err != nil {
36+
return fmt.Errorf("list jobs: %w", err)
37+
}
38+
39+
candidates := backfillCandidates(jobs)
40+
41+
var total, updated, skipped, failed int
42+
for _, job := range candidates {
43+
total++
44+
45+
ctx, cancel := context.WithTimeout(
46+
context.Background(), 15*time.Second,
47+
)
48+
usage, fetchErr := tokens.FetchForSession(
49+
ctx, job.SessionID,
50+
)
51+
cancel()
52+
53+
if fetchErr != nil {
54+
log.Printf(
55+
"job %d: fetch error: %v", job.ID, fetchErr,
56+
)
57+
failed++
58+
continue
59+
}
60+
if usage == nil {
61+
skipped++
62+
continue
63+
}
64+
65+
if dryRun {
66+
fmt.Printf(
67+
"job %d (%s): %s\n",
68+
job.ID, job.Agent, usage.FormatSummary(),
69+
)
70+
updated++
71+
continue
72+
}
73+
74+
j := tokens.ToJSON(usage)
75+
if err := db.SaveJobTokenUsage(job.ID, j); err != nil {
76+
log.Printf(
77+
"job %d: save error: %v", job.ID, err,
78+
)
79+
failed++
80+
continue
81+
}
82+
updated++
83+
fmt.Printf(
84+
"job %d (%s): %s\n",
85+
job.ID, job.Agent, usage.FormatSummary(),
86+
)
87+
}
88+
89+
action := "Updated"
90+
if dryRun {
91+
action = "Would update"
92+
}
93+
fmt.Printf(
94+
"\n%s %d/%d jobs (%d skipped, %d failed)\n",
95+
action, updated, total, skipped, failed,
96+
)
97+
return nil
98+
},
99+
}
100+
101+
cmd.Flags().BoolVar(
102+
&dryRun, "dry-run", false,
103+
"show what would be updated without writing",
104+
)
105+
return cmd
106+
}
107+
108+
// backfillCandidates filters jobs to those eligible for token
109+
// backfill: completed, has a session ID, no existing token data,
110+
// and the session was not reused by another started job.
111+
func backfillCandidates(
112+
jobs []storage.ReviewJob,
113+
) []storage.ReviewJob {
114+
// Count jobs that actually started per session ID. If
115+
// multiple jobs ran on the same session, it was resumed
116+
// and agentsview totals are cumulative — skip to avoid
117+
// overcounting.
118+
sessionCount := make(map[string]int)
119+
for _, job := range jobs {
120+
if job.SessionID != "" && job.StartedAt != nil {
121+
sessionCount[job.SessionID]++
122+
}
123+
}
124+
125+
var out []storage.ReviewJob
126+
for _, job := range jobs {
127+
if !job.HasViewableOutput() {
128+
continue
129+
}
130+
if job.TokenUsage != "" {
131+
continue
132+
}
133+
if job.SessionID == "" {
134+
continue
135+
}
136+
if sessionCount[job.SessionID] > 1 {
137+
continue
138+
}
139+
out = append(out, job)
140+
}
141+
return out
142+
}
Lines changed: 163 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,163 @@
1+
package main
2+
3+
import (
4+
"testing"
5+
"time"
6+
7+
"github.com/roborev-dev/roborev/internal/storage"
8+
"github.com/stretchr/testify/assert"
9+
)
10+
11+
func timePtr(t time.Time) *time.Time { return &t }
12+
13+
func TestBackfillCandidates(t *testing.T) {
14+
now := time.Now()
15+
16+
tests := []struct {
17+
name string
18+
jobs []storage.ReviewJob
19+
wantIDs []int64
20+
}{
21+
{
22+
name: "empty input",
23+
jobs: nil,
24+
wantIDs: nil,
25+
},
26+
{
27+
name: "single completed job with session",
28+
jobs: []storage.ReviewJob{
29+
{
30+
ID: 1, Status: storage.JobStatusDone,
31+
SessionID: "s1", StartedAt: timePtr(now),
32+
},
33+
},
34+
wantIDs: []int64{1},
35+
},
36+
{
37+
name: "skip job that already has token data",
38+
jobs: []storage.ReviewJob{
39+
{
40+
ID: 1, Status: storage.JobStatusDone,
41+
SessionID: "s1", StartedAt: timePtr(now),
42+
TokenUsage: `{"peak_context_tokens":100}`,
43+
},
44+
},
45+
wantIDs: nil,
46+
},
47+
{
48+
name: "skip job with no session ID",
49+
jobs: []storage.ReviewJob{
50+
{
51+
ID: 1, Status: storage.JobStatusDone,
52+
StartedAt: timePtr(now),
53+
},
54+
},
55+
wantIDs: nil,
56+
},
57+
{
58+
name: "skip queued job",
59+
jobs: []storage.ReviewJob{
60+
{
61+
ID: 1, Status: storage.JobStatusQueued,
62+
SessionID: "s1",
63+
},
64+
},
65+
wantIDs: nil,
66+
},
67+
{
68+
name: "resumed session: two started jobs share session",
69+
jobs: []storage.ReviewJob{
70+
{
71+
ID: 1, Status: storage.JobStatusDone,
72+
SessionID: "s1", StartedAt: timePtr(now),
73+
},
74+
{
75+
ID: 2, Status: storage.JobStatusDone,
76+
SessionID: "s1", StartedAt: timePtr(now),
77+
},
78+
},
79+
wantIDs: nil,
80+
},
81+
{
82+
name: "canceled-before-start sibling does not block backfill",
83+
jobs: []storage.ReviewJob{
84+
{
85+
ID: 1, Status: storage.JobStatusDone,
86+
SessionID: "s1", StartedAt: timePtr(now),
87+
},
88+
{
89+
ID: 2, Status: storage.JobStatusCanceled,
90+
SessionID: "s1", StartedAt: nil,
91+
},
92+
},
93+
wantIDs: []int64{1},
94+
},
95+
{
96+
name: "canceled-after-start sibling blocks backfill",
97+
jobs: []storage.ReviewJob{
98+
{
99+
ID: 1, Status: storage.JobStatusDone,
100+
SessionID: "s1", StartedAt: timePtr(now),
101+
},
102+
{
103+
ID: 2, Status: storage.JobStatusCanceled,
104+
SessionID: "s1", StartedAt: timePtr(now),
105+
},
106+
},
107+
wantIDs: nil,
108+
},
109+
{
110+
name: "failed-after-start sibling blocks backfill",
111+
jobs: []storage.ReviewJob{
112+
{
113+
ID: 1, Status: storage.JobStatusDone,
114+
SessionID: "s1", StartedAt: timePtr(now),
115+
},
116+
{
117+
ID: 2, Status: storage.JobStatusFailed,
118+
SessionID: "s1", StartedAt: timePtr(now),
119+
},
120+
},
121+
wantIDs: nil,
122+
},
123+
{
124+
name: "independent sessions are both eligible",
125+
jobs: []storage.ReviewJob{
126+
{
127+
ID: 1, Status: storage.JobStatusDone,
128+
SessionID: "s1", StartedAt: timePtr(now),
129+
},
130+
{
131+
ID: 2, Status: storage.JobStatusDone,
132+
SessionID: "s2", StartedAt: timePtr(now),
133+
},
134+
},
135+
wantIDs: []int64{1, 2},
136+
},
137+
{
138+
name: "applied/rebased jobs are eligible",
139+
jobs: []storage.ReviewJob{
140+
{
141+
ID: 1, Status: storage.JobStatusApplied,
142+
SessionID: "s1", StartedAt: timePtr(now),
143+
},
144+
{
145+
ID: 2, Status: storage.JobStatusRebased,
146+
SessionID: "s2", StartedAt: timePtr(now),
147+
},
148+
},
149+
wantIDs: []int64{1, 2},
150+
},
151+
}
152+
153+
for _, tt := range tests {
154+
t.Run(tt.name, func(t *testing.T) {
155+
got := backfillCandidates(tt.jobs)
156+
var gotIDs []int64
157+
for _, j := range got {
158+
gotIDs = append(gotIDs, j.ID)
159+
}
160+
assert.Equal(t, tt.wantIDs, gotIDs)
161+
})
162+
}
163+
}

cmd/roborev/main.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,7 @@ func main() {
5353
rootCmd.AddCommand(summaryCmd())
5454
rootCmd.AddCommand(backfillVerdictsCmd())
5555
rootCmd.AddCommand(configCmd())
56+
rootCmd.AddCommand(backfillTokensCmd())
5657
rootCmd.AddCommand(updateCmd())
5758
rootCmd.AddCommand(versionCmd())
5859

cmd/roborev/show.go

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ import (
1010

1111
"github.com/roborev-dev/roborev/internal/git"
1212
"github.com/roborev-dev/roborev/internal/storage"
13+
"github.com/roborev-dev/roborev/internal/tokens"
1314
"github.com/spf13/cobra"
1415
)
1516

@@ -125,6 +126,11 @@ Examples:
125126
} else {
126127
fmt.Printf("Review for %s (job %d, by %s)\n", displayRef, review.JobID, review.Agent)
127128
}
129+
if review.Job != nil {
130+
if tu := tokens.ParseJSON(review.Job.TokenUsage); tu != nil {
131+
fmt.Printf("Tokens: %s\n", tu.FormatSummary())
132+
}
133+
}
128134
fmt.Println(strings.Repeat("-", 60))
129135
if showPrompt {
130136
fmt.Println(review.Prompt)

cmd/roborev/tui/render_review.go

Lines changed: 16 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ import (
88
"github.com/charmbracelet/lipgloss"
99
xansi "github.com/charmbracelet/x/ansi"
1010
"github.com/mattn/go-runewidth"
11+
"github.com/roborev-dev/roborev/internal/tokens"
1112
)
1213

1314
func (m model) renderReviewView() string {
@@ -56,9 +57,13 @@ func (m model) renderReviewView() string {
5657
b.WriteString(statusStyle.Render(locationLine))
5758
b.WriteString("\x1b[K") // Clear to end of line
5859

59-
// Show verdict and closed status on next line (skip verdict for fix jobs)
60+
// Show verdict, closed status, and token usage on next line (skip verdict for fix jobs)
6061
hasVerdict := review.Job.Verdict != nil && *review.Job.Verdict != "" && !review.Job.IsFixJob()
61-
if hasVerdict || review.Closed {
62+
tokenSummary := ""
63+
if tu := tokens.ParseJSON(review.Job.TokenUsage); tu != nil {
64+
tokenSummary = tu.FormatSummary()
65+
}
66+
if hasVerdict || review.Closed || tokenSummary != "" {
6267
b.WriteString("\n")
6368
if hasVerdict {
6469
v := *review.Job.Verdict
@@ -75,6 +80,12 @@ func (m model) renderReviewView() string {
7580
}
7681
b.WriteString(closedStyle.Render("[CLOSED]"))
7782
}
83+
if tokenSummary != "" {
84+
if hasVerdict || review.Closed {
85+
b.WriteString(" ")
86+
}
87+
b.WriteString(statusStyle.Render("[" + tokenSummary + "]"))
88+
}
7889
b.WriteString("\x1b[K") // Clear to end of line
7990
}
8091
b.WriteString("\n")
@@ -140,8 +151,9 @@ func (m model) renderReviewView() string {
140151
// Reserve title, location, footer status, help, and optional verdict.
141152
headerHeight := titleLines + locationLines + 1 + helpLines
142153
hasVerdict := review.Job != nil && review.Job.Verdict != nil && *review.Job.Verdict != "" && !review.Job.IsFixJob()
143-
if hasVerdict || review.Closed {
144-
headerHeight++ // Add 1 for verdict/closed line
154+
hasTokens := review.Job != nil && tokens.ParseJSON(review.Job.TokenUsage) != nil
155+
if hasVerdict || review.Closed || hasTokens {
156+
headerHeight++ // Add 1 for verdict/closed/tokens line
145157
}
146158
panelReserve := 0
147159
if m.reviewFixPanelOpen {

0 commit comments

Comments
 (0)