raiph_ai
an hour ago
This project looks super cool. I love having the idea of having openclaw on a low powered device. I am working on something and should have it out next week. It was designed to run on a raspberry pi and would be a great companion to your project. I will post back when it is live and would love for you to take a look.
pycoclaw
28 minutes ago
Hope its a cool local LLM.
Also will soon have this running on RP2350 soon, so stay tuned for that!
picoClaw has a built in provider router to that heartbeats etc can go local to save tokens and $$$
eg
"providers": {
"default": {
"api_key": "",
"base_url": "https://api.openai.com/v1",
"model": "gpt-4o-mini",
"type": "chat",
"cost": { "input": 0.15, "output": 0.60 }
},
"gemini": {
"api_key": "",
"base_url": "https://generativelanguage.googleapis.com/v1beta/openai",
"model": "gemini-3.1-pro-preview",
"type": "chat",
"cost": { "input": 0.15, "output": 0.60 }
},
"fast": {
"api_key": "",
"base_url": "https://generativelanguage.googleapis.com/v1beta/openai",
"model": "gemini-2.0-flash-lite",
"type": "chat",
"cost": { "input": 0.075, "output": 0.30 }
},
"local": {
"base_url": "http://192.168.1.238:1337/v1",
"model": "gemma-3n-e4b-it-lm-4bit",
"type": "chat",
"tools": false,
"context_window": 4096,
"cost": { "input": 0, "output": 0 }
},
"image": {
"api_key": "",
"base_url": "https://generativelanguage.googleapis.com/v1beta",
"model": "gemini-3-pro-image-preview",
"type": "image"
}
},
"routing": {
"heartbeat": "default",
"cron": "default",
"chat": "default",
"subagent": "default",
"monthly_budget_usd": 5.0,
"warn_at_pct": 80,
"throttle_at_pct": 95,
"throttle_to": "default"
},
"channels": {
"telegram": {
"enabled": false,
"bot_token": "",
"default_chat_id": ""
}
},
"memory": {
"vector_weight": 0.7,
"text_weight": 0.3,
"min_score": 0.35,
"max_result_chars": 2000,
"max_results": 5,
"chunk_size": 400,
"chunk_overlap": 80,
"reindex_on_boot": true,
"embedding_cache": true,
"embedding_provider": "openai",
"embed_async": true
},
"compaction": {
"context_window": 128000,
"reserve_tokens": 4000,
"history_share": 0.6
},
"memory_flush": {
"enabled": true,
"soft_threshold_tokens": 4000,
"force_flush_transcript_bytes": 65536,
"prompt": "",
"system_prompt": ""
},
"temperature": {
"heartbeat": 0.9,
"reflection": 1
},
etc