Complete guide to integrating TOON format with agent frameworks.
from langgraph.graph import StateGraph
from toon_backend import create_server
from toon_backend.event_emitter import EventType, AgentEvent
# Create your LangGraph workflow
workflow = StateGraph(...)
# Create event emitter
event_emitter = EventEmitter()
# Hook into LangGraph callbacks
def on_state_change(state):
event_emitter.emit(AgentEvent(
event_type=EventType.STATE_UPDATE,
event_id=f"state_{time.time()}",
timestamp=int(time.time() * 1000),
state=state
))
from crewai import Crew, Agent, Task
from toon_backend import create_server, EventEmitter
# Create your CrewAI crew
crew = Crew(
agents=[...],
tasks=[...],
)
# Create event emitter
event_emitter = EventEmitter()
# Hook into CrewAI callbacks
def on_task_start(task):
event_emitter.emit(AgentEvent(
event_type=EventType.ACTION_START,
event_id=f"task_{task.id}",
timestamp=int(time.time() * 1000),
actionName=task.description,
endpoint="/tasks",
method="POST"
))
import { useCopilotAction } from '@copilotkit/react-core';
import { encodeToon } from '@programsmagic/toon-format';
useCopilotAction({
name: 'processData',
description: 'Process data in TOON format',
handler: async ({ data }) => {
const toon = encodeToon(data, { minimize: true });
// Use TOON format for LLM
},
});
import { FormatViewer, StreamViewer } from '@programsmagic/toon-frontend';
import '@programsmagic/toon-viewer/styles';
function App() {
return (
<>
<FormatViewer
initialContent={data}
showTokenCounter={true}
llmMode={true}
/>
<StreamViewer
url="http://localhost:3000/stream/toon?file=data.json"
protocol="sse"
/>
</>
);
}
import { createServer } from '@programsmagic/toon-backend-node';
const server = await createServer({
port: 3000,
schemaSource: './schema.json',
cors: true,
});
// TOON streaming endpoint available at:
// GET /stream/toon?file=data.json&format=json&model=gpt-4
// Format conversion endpoint:
// POST /convert
// Body: { content: "...", from: "json", to: "toon" }
// Optimization endpoint:
// POST /optimize
// Body: { content: "...", model: "gpt-4" }
// Token counting endpoint:
// POST /tokens
// Body: { content: "...", model: "gpt-4" }
await server.start();
MIT