Quick Start
By the end of this guide, you'll have a running Dakera server with persistent agent memory. You'll store memories, recall them with natural language queries, and see how semantic search finds relevant context even when the wording doesn't match.
- Docker installed and running
curlor a REST client (for testing without an SDK)- Python 3.8+ / Node 18+ / Go 1.21+ / Rust 1.70+ (depending on your SDK choice)
- ~5 minutes to complete
Run the server
Pull and start the official Docker image from GitHub Container Registry:
docker run -d \
--name dakera \
--restart unless-stopped \
-p 3300:3300 \
-e DAKERA_PORT=3300 \
-e DAKERA_ROOT_API_KEY=my-dev-key \
-e DAKERA_STORAGE=filesystem \
-e DAKERA_STORAGE_PATH=/data \
-v dakera-data:/data \
ghcr.io/dakera-ai/dakera:latest
Verify the server is running:
curl http://localhost:3300/health
Expected response:
{
"status": "healthy",
"version": "0.11.55",
"uptime_secs": 3
}
Use your server's public IP instead of localhost if running remotely. See Deployment for Compose, K8s, and Helm.
Troubleshooting
Port 3300 already in use — Change the host port: -p 3301:3300 and use :3301 in subsequent commands.
Cannot connect to Docker daemon — Start Docker Desktop (macOS/Windows) or run sudo systemctl start docker (Linux).
Connection refused on health check — The server needs a few seconds to start. Wait 5 seconds and retry. Check container logs with docker logs dakera.
Install an SDK
pip install dakeranpm install dakera
# yarn add dakera | pnpm add dakerago get github.com/dakera-ai/dakera-go# Cargo.toml
[dependencies]
dakera-client = "0.11"cargo install dakera-cli
dk init # interactive setup wizardStore memories
Store three memories to see how semantic search works. Set your server URL and API key, then store each one:
# Memory 1: UI preference
curl -X POST http://localhost:3300/v1/memory/store \
-H "Authorization: Bearer my-dev-key" \
-H "Content-Type: application/json" \
-d '{
"agent_id": "my-agent",
"content": "User prefers dark mode and minimal UI animations",
"importance": 0.8,
"tags": ["preference", "ui"]
}'
# Memory 2: Team process
curl -X POST http://localhost:3300/v1/memory/store \
-H "Authorization: Bearer my-dev-key" \
-H "Content-Type: application/json" \
-d '{
"agent_id": "my-agent",
"content": "Team standup is every morning at 9:30 AM in the #daily channel",
"importance": 0.7,
"tags": ["team", "process"]
}'
# Memory 3: Infrastructure
curl -X POST http://localhost:3300/v1/memory/store \
-H "Authorization: Bearer my-dev-key" \
-H "Content-Type: application/json" \
-d '{
"agent_id": "my-agent",
"content": "The deployment pipeline uses GitHub Actions with a staging environment on port 8080",
"importance": 0.9,
"tags": ["infra", "deployment"]
}'import os
from dakera import DakeraClient
# export DAKERA_URL=http://<YOUR_SERVER_IP>:3300
# export DAKERA_API_KEY=my-dev-key
client = DakeraClient(
base_url=os.getenv("DAKERA_URL", "http://localhost:3300"),
api_key=os.getenv("DAKERA_API_KEY")
)
# Memory 1: UI preference
client.memories.store(
agent_id="my-agent",
content="User prefers dark mode and minimal UI animations",
importance=0.8,
tags=["preference", "ui"]
)
# Memory 2: Team process
client.memories.store(
agent_id="my-agent",
content="Team standup is every morning at 9:30 AM in the #daily channel",
importance=0.7,
tags=["team", "process"]
)
# Memory 3: Infrastructure
client.memories.store(
agent_id="my-agent",
content="The deployment pipeline uses GitHub Actions with a staging environment on port 8080",
importance=0.9,
tags=["infra", "deployment"]
)import { DakeraClient } from 'dakera';
const client = new DakeraClient({
baseUrl: process.env.DAKERA_URL ?? 'http://localhost:3300',
apiKey: process.env.DAKERA_API_KEY,
});
// Memory 1: UI preference
await client.memories.store({
agentId: 'my-agent',
content: 'User prefers dark mode and minimal UI animations',
importance: 0.8,
tags: ['preference', 'ui'],
});
// Memory 2: Team process
await client.memories.store({
agentId: 'my-agent',
content: 'Team standup is every morning at 9:30 AM in the #daily channel',
importance: 0.7,
tags: ['team', 'process'],
});
// Memory 3: Infrastructure
await client.memories.store({
agentId: 'my-agent',
content: 'The deployment pipeline uses GitHub Actions with a staging environment on port 8080',
importance: 0.9,
tags: ['infra', 'deployment'],
});import (
dakera "github.com/dakera-ai/dakera-go"
"os"
)
serverURL := os.Getenv("DAKERA_URL")
if serverURL == "" { serverURL = "http://localhost:3300" }
client := dakera.NewClientWithOptions(dakera.ClientOptions{
BaseURL: serverURL,
APIKey: os.Getenv("DAKERA_API_KEY"),
})
// Memory 1: UI preference
client.Memories.Store(ctx, &dakera.StoreRequest{
AgentID: "my-agent",
Content: "User prefers dark mode and minimal UI animations",
Importance: 0.8,
Tags: []string{"preference", "ui"},
})
// Memory 2: Team process
client.Memories.Store(ctx, &dakera.StoreRequest{
AgentID: "my-agent",
Content: "Team standup is every morning at 9:30 AM in the #daily channel",
Importance: 0.7,
Tags: []string{"team", "process"},
})
// Memory 3: Infrastructure
client.Memories.Store(ctx, &dakera.StoreRequest{
AgentID: "my-agent",
Content: "The deployment pipeline uses GitHub Actions with a staging environment on port 8080",
Importance: 0.9,
Tags: []string{"infra", "deployment"},
})use dakera_client::{DakeraClient, Config, memory::StoreRequest};
let client = DakeraClient::new(Config {
base_url: std::env::var("DAKERA_URL")
.unwrap_or_else(|_| "http://localhost:3300".into()),
api_key: std::env::var("DAKERA_API_KEY").ok(),
..Default::default()
})?;
// Memory 1: UI preference
client.memories().store(StoreRequest {
agent_id: "my-agent".into(),
content: "User prefers dark mode and minimal UI animations".into(),
importance: Some(0.8),
tags: vec!["preference".into(), "ui".into()],
..Default::default()
}).await?;
// Memory 2: Team process
client.memories().store(StoreRequest {
agent_id: "my-agent".into(),
content: "Team standup is every morning at 9:30 AM in the #daily channel".into(),
importance: Some(0.7),
tags: vec!["team".into(), "process".into()],
..Default::default()
}).await?;
// Memory 3: Infrastructure
client.memories().store(StoreRequest {
agent_id: "my-agent".into(),
content: "The deployment pipeline uses GitHub Actions with a staging environment on port 8080".into(),
importance: Some(0.9),
tags: vec!["infra".into(), "deployment".into()],
..Default::default()
}).await?;Each store call returns a response confirming the memory was saved:
{
"id": "mem_18a3b7c92d4f1e06",
"agent_id": "my-agent",
"content": "User prefers dark mode and minimal UI animations",
"importance": 0.8,
"created_at": 1778827500,
"tags": ["preference", "ui"],
"memory_type": "episodic"
}
Recall with semantic search
Now ask a question that doesn't match the exact wording of any memory. Dakera's semantic search finds the right one anyway:
curl -X POST http://localhost:3300/v1/memory/recall \
-H "Authorization: Bearer my-dev-key" \
-H "Content-Type: application/json" \
-d '{"agent_id": "my-agent", "query": "how does the team deploy code?"}'response = client.memories.recall(
agent_id="my-agent",
query="how does the team deploy code?"
)
for m in response.memories:
print(f"{m.content} (score: {m.score:.2f})")const response = await client.memories.recall({
agentId: 'my-agent',
query: 'how does the team deploy code?',
});
response.memories.forEach(m =>
console.log(`${m.content} (score: ${m.score.toFixed(2)})`)
);response, err := client.Memories.Recall(ctx, &dakera.RecallRequest{
AgentID: "my-agent",
Query: "how does the team deploy code?",
})
for _, m := range response.Memories {
fmt.Printf("%s (score: %.2f)\n", m.Content, m.Score)
}let response = client.memories().recall(RecallRequest {
agent_id: "my-agent".into(),
query: "how does the team deploy code?".into(),
..Default::default()
}).await?;
for m in &response.memories {
println!("{} (score: {:.2})", m.content, m.score);
}Expected response — notice Memory 3 ranks highest even though your query said "deploy code" while the memory says "deployment pipeline":
{
"memories": [
{
"id": "mem_18a3b7ca1e5f2a09",
"content": "The deployment pipeline uses GitHub Actions with a staging environment on port 8080",
"importance": 0.9,
"score": 0.92,
"tags": ["infra", "deployment"],
"created_at": 1778827502
},
{
"id": "mem_18a3b7c9a8b14d07",
"content": "Team standup is every morning at 9:30 AM in the #daily channel",
"importance": 0.7,
"score": 0.54,
"tags": ["team", "process"],
"created_at": 1778827501
}
],
"total": 2
}
Group memories with sessions
Sessions let you group related memories together — perfect for conversations, task workflows, or debugging sessions:
# Start a session
curl -X POST http://localhost:3300/v1/sessions/start \
-H "Authorization: Bearer my-dev-key" \
-H "Content-Type: application/json" \
-d '{"agent_id": "my-agent", "metadata": {"task": "onboarding"}}'
# → {"session": {"id": "sess_18a3b8e01a2c3d04", ...}}
# Store a memory within the session
curl -X POST http://localhost:3300/v1/memory/store \
-H "Authorization: Bearer my-dev-key" \
-H "Content-Type: application/json" \
-d '{
"agent_id": "my-agent",
"session_id": "sess_18a3b8e01a2c3d04",
"content": "User completed the quickstart guide and stored 3 test memories",
"importance": 0.6
}'
# End the session with a summary
curl -X POST http://localhost:3300/v1/sessions/sess_18a3b8e01a2c3d04/end \
-H "Authorization: Bearer my-dev-key" \
-H "Content-Type: application/json" \
-d '{"summary": "Completed user onboarding — stored preferences, team info, and infra context"}'# Start a session
session = client.sessions.start(
agent_id="my-agent",
metadata={"task": "onboarding"}
)
# Store a memory within the session
client.memories.store(
agent_id="my-agent",
session_id=session.id,
content="User completed the quickstart guide and stored 3 test memories",
importance=0.6
)
# End the session with a summary
client.sessions.end(
session_id=session.id,
summary="Completed user onboarding — stored preferences, team info, and infra context"
)// Start a session
const session = await client.sessions.start({
agentId: 'my-agent',
metadata: { task: 'onboarding' },
});
// Store a memory within the session
await client.memories.store({
agentId: 'my-agent',
sessionId: session.id,
content: 'User completed the quickstart guide and stored 3 test memories',
importance: 0.6,
});
// End the session with a summary
await client.sessions.end({
sessionId: session.id,
summary: 'Completed user onboarding — stored preferences, team info, and infra context',
});// Start a session
session, err := client.Sessions.Start(ctx, &dakera.SessionStartRequest{
AgentID: "my-agent",
Metadata: map[string]interface{}{"task": "onboarding"},
})
// Store a memory within the session
client.Memories.Store(ctx, &dakera.StoreRequest{
AgentID: "my-agent",
SessionID: session.ID,
Content: "User completed the quickstart guide and stored 3 test memories",
Importance: 0.6,
})
// End the session with a summary
client.Sessions.End(ctx, session.ID, "Completed user onboarding — stored preferences, team info, and infra context")// Start a session
let session = client.sessions().start(StartRequest {
agent_id: "my-agent".into(),
metadata: Some(serde_json::json!({"task": "onboarding"})),
}).await?;
// Store a memory within the session
client.memories().store(StoreRequest {
agent_id: "my-agent".into(),
session_id: Some(session.id.clone()),
content: "User completed the quickstart guide and stored 3 test memories".into(),
importance: Some(0.6),
..Default::default()
}).await?;
// End the session with a summary
client.sessions().end(
&session.id,
Some("Completed user onboarding — stored preferences, team info, and infra context"),
).await?;Sessions provide: auto-generated summaries when ended, metadata for filtering, and scoped recall — pass session_id in a recall request to search only within that session's memories.