add posthog analytics for llm usage and auth events

Captures per-LLM-call token usage tagged by feature (copilot chat,
track block, meeting note, knowledge sync), plus sign-in / sign-out
and identity. Renderer and main share one PostHog identity so events
from either process resolve to the same user.

See apps/x/ANALYTICS.md for the event catalog, person properties,
use-case taxonomy, and how to add new events.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
Ramnique Singh 2026-04-28 19:53:40 +05:30
parent d42fb26bcc
commit 43c1ba719f
31 changed files with 625 additions and 36 deletions

View file

@ -37,6 +37,7 @@
"openid-client": "^6.8.1",
"papaparse": "^5.5.3",
"pdf-parse": "^2.4.5",
"posthog-node": "^4.18.0",
"react": "^19.2.3",
"xlsx": "^0.18.5",
"yaml": "^2.8.2",

View file

@ -164,7 +164,11 @@ async function runAgent(
try {
// Create a new run via core (resolves agent + default model+provider).
const run = await createRun({ agentId: agentName });
const run = await createRun({
agentId: agentName,
useCase: 'copilot_chat',
subUseCase: 'scheduled',
});
console.log(`[AgentRunner] Created run ${run.id} for agent ${agentName}`);
// Add the starting message as a user message

View file

@ -26,6 +26,8 @@ import { IRunsLock } from "../runs/lock.js";
import { IAbortRegistry } from "../runs/abort-registry.js";
import { PrefixLogger } from "@x/shared";
import { parse } from "yaml";
import { captureLlmUsage } from "../analytics/usage.js";
import { enterUseCase, type UseCase } from "../analytics/use_case.js";
import { getRaw as getNoteCreationRaw } from "../knowledge/note_creation.js";
import { getRaw as getLabelingAgentRaw } from "../knowledge/labeling_agent.js";
import { getRaw as getNoteTaggingAgentRaw } from "../knowledge/note_tagging_agent.js";
@ -650,6 +652,8 @@ export class AgentState {
agentName: string | null = null;
runModel: string | null = null;
runProvider: string | null = null;
runUseCase: UseCase | null = null;
runSubUseCase: string | null = null;
messages: z.infer<typeof MessageList> = [];
lastAssistantMsg: z.infer<typeof AssistantMessage> | null = null;
subflowStates: Record<string, AgentState> = {};
@ -765,6 +769,8 @@ export class AgentState {
this.agentName = event.agentName;
this.runModel = event.model;
this.runProvider = event.provider;
this.runUseCase = event.useCase ?? null;
this.runSubUseCase = event.subUseCase ?? null;
break;
case "spawn-subflow":
// Seed the subflow state with its agent so downstream loadAgent works.
@ -775,6 +781,8 @@ export class AgentState {
this.subflowStates[event.toolCallId].agentName = event.agentName;
this.subflowStates[event.toolCallId].runModel = this.runModel;
this.subflowStates[event.toolCallId].runProvider = this.runProvider;
this.subflowStates[event.toolCallId].runUseCase = this.runUseCase;
this.subflowStates[event.toolCallId].runSubUseCase = this.runSubUseCase;
break;
case "message":
this.messages.push(event.message);
@ -881,6 +889,14 @@ export async function* streamAgent({
const model = provider.languageModel(modelId);
logger.log(`using model: ${modelId} (provider: ${state.runProvider})`);
// Install use-case context for tool-internal LLM calls (e.g. parseFile)
// so they can tag their `llm_usage` events with the parent run's category.
enterUseCase({
useCase: state.runUseCase ?? "copilot_chat",
...(state.runSubUseCase ? { subUseCase: state.runSubUseCase } : {}),
...(state.agentName ? { agentName: state.agentName } : {}),
});
let loopCounter = 0;
let voiceInput = false;
let voiceOutput: 'summary' | 'full' | null = null;
@ -1114,6 +1130,13 @@ export async function* streamAgent({
instructionsWithDateTime,
tools,
signal,
{
useCase: state.runUseCase ?? "copilot_chat",
...(state.runSubUseCase ? { subUseCase: state.runSubUseCase } : {}),
agentName: state.agentName ?? undefined,
modelId,
providerName: state.runProvider!,
},
)) {
messageBuilder.ingest(event);
yield* processEvent({
@ -1201,12 +1224,21 @@ export async function* streamAgent({
}
}
interface StreamLlmAnalytics {
useCase: UseCase;
subUseCase?: string;
agentName?: string;
modelId: string;
providerName: string;
}
async function* streamLlm(
model: LanguageModel,
messages: z.infer<typeof MessageList>,
instructions: string,
tools: ToolSet,
signal?: AbortSignal,
analytics?: StreamLlmAnalytics,
): AsyncGenerator<z.infer<typeof LlmStepStreamEvent>, void, unknown> {
const converted = convertFromMessages(messages);
console.log(`! SENDING payload to model: `, JSON.stringify(converted))
@ -1277,6 +1309,16 @@ async function* streamLlm(
};
break;
case "finish-step":
if (analytics) {
captureLlmUsage({
useCase: analytics.useCase,
...(analytics.subUseCase ? { subUseCase: analytics.subUseCase } : {}),
...(analytics.agentName ? { agentName: analytics.agentName } : {}),
model: analytics.modelId,
provider: analytics.providerName,
usage: event.usage,
});
}
yield {
type: "finish-step",
usage: event.usage,

View file

@ -0,0 +1,37 @@
import fs from 'node:fs';
import path from 'node:path';
import { randomUUID } from 'node:crypto';
import { WorkDir } from '../config/config.js';
const INSTALLATION_PATH = path.join(WorkDir, 'config', 'installation.json');
let cached: string | null = null;
export function getInstallationId(): string {
if (cached) return cached;
try {
if (fs.existsSync(INSTALLATION_PATH)) {
const raw = fs.readFileSync(INSTALLATION_PATH, 'utf-8');
const parsed = JSON.parse(raw) as { installationId?: string };
if (parsed.installationId && typeof parsed.installationId === 'string') {
cached = parsed.installationId;
return cached;
}
}
} catch (err) {
console.error('[Analytics] Failed to read installation.json:', err);
}
const id = randomUUID();
try {
const dir = path.dirname(INSTALLATION_PATH);
if (!fs.existsSync(dir)) {
fs.mkdirSync(dir, { recursive: true });
}
fs.writeFileSync(INSTALLATION_PATH, JSON.stringify({ installationId: id }, null, 2));
} catch (err) {
console.error('[Analytics] Failed to write installation.json:', err);
}
cached = id;
return id;
}

View file

@ -0,0 +1,90 @@
import { PostHog } from 'posthog-node';
import { getInstallationId } from './installation.js';
import { API_URL } from '../config/env.js';
// Build-time injected via esbuild `define` (apps/main/bundle.mjs).
// In dev/tsc, fall back to process.env so local runs work too.
const POSTHOG_KEY = process.env.POSTHOG_KEY ?? process.env.VITE_PUBLIC_POSTHOG_KEY ?? '';
const POSTHOG_HOST = process.env.POSTHOG_HOST ?? process.env.VITE_PUBLIC_POSTHOG_HOST ?? 'https://us.i.posthog.com';
let client: PostHog | null = null;
let initAttempted = false;
let identifiedUserId: string | null = null;
function getClient(): PostHog | null {
if (initAttempted) return client;
initAttempted = true;
if (!POSTHOG_KEY) {
console.log('[Analytics] POSTHOG_KEY not set; analytics disabled');
return null;
}
try {
client = new PostHog(POSTHOG_KEY, {
host: POSTHOG_HOST,
flushAt: 20,
flushInterval: 10_000,
});
// Tag the install with api_url as a person property up-front,
// so anonymous users are also segmentable by environment (api_url
// distinguishes prod / staging / custom — meaning is assigned in PostHog).
client.identify({
distinctId: getInstallationId(),
properties: { api_url: API_URL },
});
} catch (err) {
console.error('[Analytics] Failed to init PostHog:', err);
client = null;
}
return client;
}
function activeDistinctId(): string {
return identifiedUserId ?? getInstallationId();
}
export function capture(event: string, properties?: Record<string, unknown>): void {
const ph = getClient();
if (!ph) return;
try {
ph.capture({
distinctId: activeDistinctId(),
event,
properties,
});
} catch (err) {
console.error('[Analytics] capture failed:', err);
}
}
export function identify(userId: string, properties?: Record<string, unknown>): void {
const ph = getClient();
if (!ph) return;
try {
// Alias the anonymous installation ID to the rowboat user ID so historical
// anonymous events are linked to the identified user.
ph.alias({ distinctId: userId, alias: getInstallationId() });
ph.identify({
distinctId: userId,
properties: {
...properties,
api_url: API_URL,
},
});
identifiedUserId = userId;
} catch (err) {
console.error('[Analytics] identify failed:', err);
}
}
export function reset(): void {
identifiedUserId = null;
}
export async function shutdown(): Promise<void> {
if (!client) return;
try {
await client.shutdown();
} catch (err) {
console.error('[Analytics] shutdown failed:', err);
}
}

View file

@ -0,0 +1,38 @@
import { capture } from './posthog.js';
import type { UseCase } from './use_case.js';
// Shape compatible with ai-sdk v5 `LanguageModelUsage`.
// All fields are optional because providers report subsets.
export interface LlmUsageInput {
inputTokens?: number;
outputTokens?: number;
totalTokens?: number;
reasoningTokens?: number;
cachedInputTokens?: number;
}
export interface CaptureLlmUsageArgs {
useCase: UseCase;
subUseCase?: string;
agentName?: string;
model: string;
provider: string;
usage: LlmUsageInput | undefined;
}
export function captureLlmUsage(args: CaptureLlmUsageArgs): void {
const usage = args.usage ?? {};
const properties: Record<string, unknown> = {
use_case: args.useCase,
model: args.model,
provider: args.provider,
input_tokens: usage.inputTokens ?? 0,
output_tokens: usage.outputTokens ?? 0,
total_tokens: usage.totalTokens ?? (usage.inputTokens ?? 0) + (usage.outputTokens ?? 0),
};
if (args.subUseCase) properties.sub_use_case = args.subUseCase;
if (args.agentName) properties.agent_name = args.agentName;
if (usage.cachedInputTokens != null) properties.cached_input_tokens = usage.cachedInputTokens;
if (usage.reasoningTokens != null) properties.reasoning_tokens = usage.reasoningTokens;
capture('llm_usage', properties);
}

View file

@ -0,0 +1,28 @@
import { AsyncLocalStorage } from 'node:async_hooks';
export type UseCase = 'copilot_chat' | 'track_block' | 'meeting_note' | 'knowledge_sync';
export interface UseCaseContext {
useCase: UseCase;
subUseCase?: string;
agentName?: string;
}
const storage = new AsyncLocalStorage<UseCaseContext>();
export function withUseCase<T>(ctx: UseCaseContext, fn: () => T): T {
return storage.run(ctx, fn);
}
/**
* Permanently install a use-case context for the current async chain.
* Use inside generator functions where wrapping with `withUseCase()` doesn't
* compose. Child async work (e.g. tool execution) will inherit it.
*/
export function enterUseCase(ctx: UseCaseContext): void {
storage.enterWith(ctx);
}
export function getCurrentUseCase(): UseCaseContext | undefined {
return storage.getStore();
}

View file

@ -22,6 +22,8 @@ import type { ToolContext } from "./exec-tool.js";
import { generateText } from "ai";
import { createProvider } from "../../models/models.js";
import { getDefaultModelAndProvider, resolveProviderConfig } from "../../models/defaults.js";
import { captureLlmUsage } from "../../analytics/usage.js";
import { getCurrentUseCase } from "../../analytics/use_case.js";
import { isSignedIn } from "../../account/account.js";
import { getAccessToken } from "../../auth/tokens.js";
import { API_URL } from "../../config/env.js";
@ -764,6 +766,16 @@ export const BuiltinTools: z.infer<typeof BuiltinToolsSchema> = {
],
});
const ctx = getCurrentUseCase();
captureLlmUsage({
useCase: ctx?.useCase ?? 'copilot_chat',
subUseCase: 'file_parse',
...(ctx?.agentName ? { agentName: ctx.agentName } : {}),
model: modelId,
provider: providerName,
usage: response.usage,
});
return {
success: true,
fileName,

View file

@ -1,2 +1,2 @@
export const API_URL =
process.env.API_URL || 'https://api.x.rowboatlabs.com';
process.env.API_URL || 'https://api.x.rowboatlabs.com';

View file

@ -306,7 +306,12 @@ async function processAgentNotes(): Promise<void> {
const timestamp = new Date().toISOString();
const message = `Current timestamp: ${timestamp}\n\nProcess the following source material and update the Agent Notes folder accordingly.\n\n${messageParts.join('\n\n')}`;
const agentRun = await createRun({ agentId: AGENT_ID, model: await getKgModel() });
const agentRun = await createRun({
agentId: AGENT_ID,
model: await getKgModel(),
useCase: 'knowledge_sync',
subUseCase: 'agent_notes',
});
await createMessage(agentRun.id, message);
await waitForRunCompletion(agentRun.id);

View file

@ -252,6 +252,8 @@ async function createNotesFromBatch(
// Create a run for the note creation agent
const run = await createRun({
agentId: NOTE_CREATION_AGENT,
useCase: 'knowledge_sync',
subUseCase: 'build_graph',
});
const suggestedTopicsContent = readSuggestedTopicsFile();

View file

@ -10,6 +10,7 @@ import type { IModelConfigRepo } from '../models/repo.js';
import { createProvider } from '../models/models.js';
import { inlineTask } from '@x/shared';
import { extractAgentResponse, waitForRunCompletion } from '../agents/utils.js';
import { captureLlmUsage } from '../analytics/usage.js';
const SYNC_INTERVAL_MS = 15 * 1000; // 15 seconds
const INLINE_TASK_AGENT = 'inline_task_agent';
@ -468,7 +469,12 @@ async function processInlineTasks(): Promise<void> {
console.log(`[InlineTasks] Running task: "${task.instruction.slice(0, 80)}..."`);
try {
const run = await createRun({ agentId: INLINE_TASK_AGENT, model: await getKgModel() });
const run = await createRun({
agentId: INLINE_TASK_AGENT,
model: await getKgModel(),
useCase: 'knowledge_sync',
subUseCase: 'inline_task_run',
});
const message = [
`Execute the following instruction from the note "${relativePath}":`,
@ -548,7 +554,12 @@ export async function processRowboatInstruction(
scheduleLabel: string | null;
response: string | null;
}> {
const run = await createRun({ agentId: INLINE_TASK_AGENT, model: await getKgModel() });
const run = await createRun({
agentId: INLINE_TASK_AGENT,
model: await getKgModel(),
useCase: 'knowledge_sync',
subUseCase: 'inline_task_run',
});
const message = [
`Process the following @rowboat instruction from the note "${notePath}":`,
@ -659,6 +670,14 @@ Respond with ONLY valid JSON: either a schedule object or null. No other text.`;
prompt: instruction,
});
captureLlmUsage({
useCase: 'knowledge_sync',
subUseCase: 'inline_task_classify',
model: config.model,
provider: config.provider.flavor,
usage: result.usage,
});
let text = result.text.trim();
console.log('[classifySchedule] LLM response:', text);
// Strip markdown code fences if the LLM wraps the JSON

View file

@ -73,6 +73,8 @@ async function labelEmailBatch(
const run = await createRun({
agentId: LABELING_AGENT,
model: await getKgModel(),
useCase: 'knowledge_sync',
subUseCase: 'label_emails',
});
let message = `Label the following ${files.length} email files by prepending YAML frontmatter.\n\n`;

View file

@ -4,6 +4,7 @@ import { generateText } from 'ai';
import { createProvider } from '../models/models.js';
import { getDefaultModelAndProvider, getMeetingNotesModel, resolveProviderConfig } from '../models/defaults.js';
import { WorkDir } from '../config/config.js';
import { captureLlmUsage } from '../analytics/usage.js';
const CALENDAR_SYNC_DIR = path.join(WorkDir, 'calendar_sync');
@ -157,5 +158,12 @@ export async function summarizeMeeting(transcript: string, meetingStartTime?: st
prompt,
});
captureLlmUsage({
useCase: 'meeting_note',
model: modelId,
provider: providerName,
usage: result.usage,
});
return result.text.trim();
}

View file

@ -86,6 +86,8 @@ async function tagNoteBatch(
const run = await createRun({
agentId: NOTE_TAGGING_AGENT,
model: await getKgModel(),
useCase: 'knowledge_sync',
subUseCase: 'tag_notes',
});
let message = `Tag the following ${files.length} knowledge notes by prepending YAML frontmatter with appropriate tags.\n\n`;

View file

@ -3,6 +3,7 @@ import { trackBlock, PrefixLogger } from '@x/shared';
import type { KnowledgeEvent } from '@x/shared/dist/track-block.js';
import { createProvider } from '../../models/models.js';
import { getDefaultModelAndProvider, getTrackBlockModel, resolveProviderConfig } from '../../models/defaults.js';
import { captureLlmUsage } from '../../analytics/usage.js';
const log = new PrefixLogger('TrackRouting');
@ -34,10 +35,14 @@ Rules:
- For each candidate, return BOTH trackId and filePath exactly as given. trackIds are not globally unique.`;
async function resolveModel() {
const model = await getTrackBlockModel();
const modelId = await getTrackBlockModel();
const { provider } = await getDefaultModelAndProvider();
const config = await resolveProviderConfig(provider);
return createProvider(config).languageModel(model);
return {
model: createProvider(config).languageModel(modelId),
modelId,
providerName: provider,
};
}
function buildRoutingPrompt(event: KnowledgeEvent, batch: ParsedTrack[]): string {
@ -84,19 +89,26 @@ export async function findCandidates(
log.log(`Routing event ${event.id} against ${filtered.length} track(s)`);
const model = await resolveModel();
const { model, modelId, providerName } = await resolveModel();
const candidateKeys = new Set<string>();
for (let i = 0; i < filtered.length; i += BATCH_SIZE) {
const batch = filtered.slice(i, i + BATCH_SIZE);
try {
const { object } = await generateObject({
const result = await generateObject({
model,
system: ROUTING_SYSTEM_PROMPT,
prompt: buildRoutingPrompt(event, batch),
schema: trackBlock.Pass1OutputSchema,
});
for (const c of object.candidates) {
captureLlmUsage({
useCase: 'track_block',
subUseCase: 'routing',
model: modelId,
provider: providerName,
usage: result.usage,
});
for (const c of result.object.candidates) {
candidateKeys.add(trackKey(c.trackId, c.filePath));
}
} catch (err) {

View file

@ -110,6 +110,8 @@ export async function triggerTrackUpdate(
agentId: 'track-run',
model,
...(track.track.provider ? { provider: track.track.provider } : {}),
useCase: 'track_block',
subUseCase: 'run',
});
// Set lastRunAt and lastRunId immediately (before agent executes) so

View file

@ -43,6 +43,8 @@ async function runAgent(agentName: string): Promise<void> {
const run = await createRun({
agentId: agentName,
model: await getKgModel(),
useCase: 'knowledge_sync',
subUseCase: 'pre_built',
});
// Build trigger message with user context

View file

@ -5,7 +5,7 @@ import path from "path";
import fsp from "fs/promises";
import fs from "fs";
import readline from "readline";
import { Run, RunEvent, StartEvent, CreateRunOptions, ListRunsResponse, MessageEvent } from "@x/shared/dist/runs.js";
import { Run, RunEvent, StartEvent, ListRunsResponse, MessageEvent, UseCase } from "@x/shared/dist/runs.js";
import { getDefaultModelAndProvider } from "../models/defaults.js";
/**
@ -24,7 +24,13 @@ const LegacyStartEvent = StartEvent.extend({
});
const ReadRunEvent = RunEvent.or(LegacyStartEvent);
export type CreateRunRepoOptions = Required<z.infer<typeof CreateRunOptions>>;
export type CreateRunRepoOptions = {
agentId: string;
model: string;
provider: string;
useCase: z.infer<typeof UseCase>;
subUseCase?: string;
};
export interface IRunsRepo {
create(options: CreateRunRepoOptions): Promise<z.infer<typeof Run>>;
@ -187,6 +193,8 @@ export class FSRunsRepo implements IRunsRepo {
agentName: options.agentId,
model: options.model,
provider: options.provider,
useCase: options.useCase,
...(options.subUseCase ? { subUseCase: options.subUseCase } : {}),
subflow: [],
ts,
};
@ -197,6 +205,8 @@ export class FSRunsRepo implements IRunsRepo {
agentId: options.agentId,
model: options.model,
provider: options.provider,
useCase: options.useCase,
...(options.subUseCase ? { subUseCase: options.subUseCase } : {}),
log: [start],
};
}
@ -230,6 +240,8 @@ export class FSRunsRepo implements IRunsRepo {
agentId: start.agentName,
model: start.model,
provider: start.provider,
...(start.useCase ? { useCase: start.useCase } : {}),
...(start.subUseCase ? { subUseCase: start.subUseCase } : {}),
log: events,
};
}

View file

@ -23,8 +23,15 @@ export async function createRun(opts: z.infer<typeof CreateRunOptions>): Promise
const defaults = await getDefaultModelAndProvider();
const model = opts.model ?? agent.model ?? defaults.model;
const provider = opts.provider ?? agent.provider ?? defaults.provider;
const useCase = opts.useCase ?? "copilot_chat";
const run = await repo.create({ agentId: opts.agentId, model, provider });
const run = await repo.create({
agentId: opts.agentId,
model,
provider,
useCase,
...(opts.subUseCase ? { subUseCase: opts.subUseCase } : {}),
});
await bus.publish(run.log[0]);
return run;
}