feat: remove rss, status, and badge features

This commit is contained in:
Jet 2026-03-19 01:33:56 -07:00
parent 553d7d1780
commit 36720e2ba5
No known key found for this signature in database
21 changed files with 904 additions and 1200 deletions

16
Cargo.lock generated
View file

@ -838,22 +838,6 @@ dependencies = [
"tracing-subscriber",
]
[[package]]
name = "noisebell-rss"
version = "0.1.0"
dependencies = [
"anyhow",
"axum",
"noisebell-common",
"reqwest",
"serde",
"time",
"tokio",
"tower-http",
"tracing",
"tracing-subscriber",
]
[[package]]
name = "nu-ansi-term"
version = "0.50.3"

View file

@ -4,6 +4,5 @@ members = [
"remote/noisebell-common",
"remote/cache-service",
"remote/discord-bot",
"remote/rss-service",
]
resolver = "2"

View file

@ -2,12 +2,12 @@
Monitors the door at [Noisebridge](https://www.noisebridge.net) and tells you whether it's open or closed.
A Raspberry Pi reads a magnetic sensor on the door and pushes state changes to a cache server. The cache fans updates out to Discord and an Atom feed.
A Raspberry Pi reads a magnetic sensor on the door and pushes state changes to a cache server. The cache keeps the latest state and fans updates out to Discord.
```
Pi (door sensor) ──webhook──> Cache ──webhook──> Discord
|
polls Pi <-+ RSS reads from Cache
polls Pi <-+
```
## Layout
@ -15,6 +15,6 @@ Pi (door sensor) ──webhook──> Cache ──webhook──> Discord
| Directory | What it is |
|-----------|------------|
| [`pi/`](pi/) | NixOS config + Rust service for the Pi |
| [`remote/`](remote/) | Server-side services (cache, discord bot, rss feed) |
| [`remote/`](remote/) | Server-side services (cache and Discord bot) |
Each directory has its own README with setup and configuration details.

View file

@ -14,7 +14,14 @@
};
};
outputs = { self, nixpkgs, agenix, crane, rust-overlay }:
outputs =
{
self,
nixpkgs,
agenix,
crane,
rust-overlay,
}:
let
system = "x86_64-linux";
pkgs = import nixpkgs {
@ -22,15 +29,13 @@
overlays = [ rust-overlay.overlays.default ];
};
# --- Remote services (x86_64-linux) ---
rustToolchain = pkgs.rust-bin.stable.latest.default;
craneLib = (crane.mkLib pkgs).overrideToolchain rustToolchain;
src = pkgs.lib.cleanSourceWith {
src = ./.;
filter = path: type:
(builtins.match ".*\.png$" path != null) || (craneLib.filterCargoSources path type);
filter =
path: type: (builtins.match ".*\.png$" path != null) || (craneLib.filterCargoSources path type);
};
remoteArgs = {
@ -43,16 +48,18 @@
remoteArtifacts = craneLib.buildDepsOnly remoteArgs;
buildRemoteMember = name: craneLib.buildPackage (remoteArgs // {
buildRemoteMember =
name:
craneLib.buildPackage (
remoteArgs
// {
cargoArtifacts = remoteArtifacts;
cargoExtraArgs = "-p ${name}";
});
}
);
noisebell-cache = buildRemoteMember "noisebell-cache";
noisebell-discord = buildRemoteMember "noisebell-discord";
noisebell-rss = buildRemoteMember "noisebell-rss";
# --- Pi service (cross-compiled to aarch64-linux) ---
crossPkgs = import nixpkgs {
inherit system;
@ -74,8 +81,7 @@
doCheck = false;
CARGO_BUILD_TARGET = "aarch64-unknown-linux-gnu";
CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER =
"${crossPkgs.stdenv.cc.targetPrefix}cc";
CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER = "${crossPkgs.stdenv.cc.targetPrefix}cc";
TARGET_CC = "${crossPkgs.stdenv.cc.targetPrefix}cc";
CC_aarch64_unknown_linux_gnu = "${crossPkgs.stdenv.cc.targetPrefix}cc";
HOST_CC = "${pkgs.stdenv.cc.nativePrefix}cc";
@ -86,14 +92,16 @@
piArtifacts = piCraneLib.buildDepsOnly piArgs;
noisebell-pi = piCraneLib.buildPackage (piArgs // {
noisebell-pi = piCraneLib.buildPackage (
piArgs
// {
cargoArtifacts = piArtifacts;
});
}
);
in
{
packages.${system} = {
inherit noisebell-cache noisebell-discord noisebell-rss;
inherit noisebell-cache noisebell-discord;
default = noisebell-cache;
};
@ -105,12 +113,12 @@
nixosModules = {
cache = import ./remote/cache-service/module.nix noisebell-cache;
discord = import ./remote/discord-bot/module.nix noisebell-discord;
rss = import ./remote/rss-service/module.nix noisebell-rss;
default = { imports = [
default = {
imports = [
(import ./remote/cache-service/module.nix noisebell-cache)
(import ./remote/discord-bot/module.nix noisebell-discord)
(import ./remote/rss-service/module.nix noisebell-rss)
]; };
];
};
};
nixosConfigurations.pi = nixpkgs.lib.nixosSystem {

View file

@ -113,27 +113,3 @@ All endpoints require `Authorization: Bearer <token>`.
```json
{"status": "open", "timestamp": 1710000000}
```
**`GET /info`** — system health + GPIO config
```json
{
"uptime_secs": 3600,
"started_at": 1710000000,
"cpu_temp_celsius": 42.3,
"memory_available_kb": 350000,
"memory_total_kb": 512000,
"disk_total_bytes": 16000000000,
"disk_available_bytes": 12000000000,
"load_average": [0.01, 0.05, 0.10],
"nixos_version": "24.11.20240308.9dcb002",
"commit": "c6e726c",
"gpio": {
"pin": 17,
"active_low": true,
"pull": "up",
"open_level": "low",
"current_raw_level": "low"
}
}
```

View file

@ -1,4 +1,4 @@
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::sync::atomic::{AtomicU8, AtomicU64, Ordering};
use std::sync::Arc;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
@ -7,49 +7,54 @@ use axum::extract::State;
use axum::http::{HeaderMap, StatusCode};
use axum::routing::get;
use axum::{Json, Router};
use noisebell_common::{validate_bearer, WebhookPayload};
use serde::Serialize;
use noisebell_common::{
validate_bearer, DoorStatus, PiStatusResponse, SignalLevel, WebhookPayload,
};
use tokio_gpiod::{Bias, Chip, Edge, EdgeDetect, Options};
use tracing::{error, info, warn};
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[repr(u8)]
enum LocalDoorState {
Closed = 0,
Open = 1,
}
impl LocalDoorState {
fn from_raw_level(raw_level: SignalLevel, active_level: SignalLevel) -> Self {
if raw_level == active_level {
Self::Open
} else {
Self::Closed
}
}
fn from_atomic(value: u8) -> Self {
match value {
1 => Self::Open,
_ => Self::Closed,
}
}
const fn as_door_status(self) -> DoorStatus {
match self {
Self::Open => DoorStatus::Open,
Self::Closed => DoorStatus::Closed,
}
}
}
struct AppState {
is_open: AtomicBool,
door_state: AtomicU8,
last_changed: AtomicU64,
started_at: u64,
gpio_pin: u8,
active_low: bool,
commit: String,
inbound_api_key: String,
}
#[derive(Serialize)]
struct StatusResponse {
status: &'static str,
timestamp: u64,
impl AppState {
fn current_door_state(&self) -> LocalDoorState {
LocalDoorState::from_atomic(self.door_state.load(Ordering::Relaxed))
}
#[derive(Serialize)]
struct GpioInfo {
pin: u8,
active_low: bool,
pull: &'static str,
open_level: &'static str,
current_raw_level: &'static str,
}
#[derive(Serialize)]
struct InfoResponse {
uptime_secs: u64,
started_at: u64,
cpu_temp_celsius: Option<f64>,
memory_available_kb: Option<u64>,
memory_total_kb: Option<u64>,
disk_total_bytes: Option<u64>,
disk_available_bytes: Option<u64>,
load_average: Option<[f64; 3]>,
nixos_version: Option<String>,
commit: String,
gpio: GpioInfo,
}
fn unix_timestamp() -> u64 {
@ -59,112 +64,19 @@ fn unix_timestamp() -> u64 {
.as_secs()
}
fn status_str(is_open: bool) -> &'static str {
if is_open {
"open"
} else {
"closed"
}
}
fn read_cpu_temp() -> Option<f64> {
std::fs::read_to_string("/sys/class/thermal/thermal_zone0/temp")
.ok()
.and_then(|s| s.trim().parse::<f64>().ok())
.map(|m| m / 1000.0)
}
fn read_meminfo_field(contents: &str, field: &str) -> Option<u64> {
contents
.lines()
.find(|l| l.starts_with(field))
.and_then(|l| l.split_whitespace().nth(1))
.and_then(|v| v.parse().ok())
}
fn read_disk_usage() -> Option<(u64, u64)> {
let path = std::ffi::CString::new("/").ok()?;
let mut stat: libc::statvfs = unsafe { std::mem::zeroed() };
let ret = unsafe { libc::statvfs(path.as_ptr(), &mut stat) };
if ret != 0 {
return None;
}
let block_size = stat.f_frsize as u64;
Some((
stat.f_blocks * block_size,
stat.f_bavail * block_size,
))
}
fn read_load_average() -> Option<[f64; 3]> {
let contents = std::fs::read_to_string("/proc/loadavg").ok()?;
let mut parts = contents.split_whitespace();
Some([
parts.next()?.parse().ok()?,
parts.next()?.parse().ok()?,
parts.next()?.parse().ok()?,
])
}
fn read_nixos_version() -> Option<String> {
std::fs::read_to_string("/run/current-system/nixos-version")
.ok()
.map(|s| s.trim().to_string())
}
async fn get_status(
State(state): State<Arc<AppState>>,
headers: HeaderMap,
) -> Result<Json<StatusResponse>, StatusCode> {
) -> Result<Json<PiStatusResponse>, StatusCode> {
if !validate_bearer(&headers, &state.inbound_api_key) {
return Err(StatusCode::UNAUTHORIZED);
}
Ok(Json(StatusResponse {
status: status_str(state.is_open.load(Ordering::Relaxed)),
Ok(Json(PiStatusResponse {
status: state.current_door_state().as_door_status(),
timestamp: state.last_changed.load(Ordering::Relaxed),
}))
}
async fn get_info(
State(state): State<Arc<AppState>>,
headers: HeaderMap,
) -> Result<Json<InfoResponse>, StatusCode> {
if !validate_bearer(&headers, &state.inbound_api_key) {
return Err(StatusCode::UNAUTHORIZED);
}
let meminfo = std::fs::read_to_string("/proc/meminfo").ok();
let disk = read_disk_usage();
let is_open = state.is_open.load(Ordering::Relaxed);
let raw_level = match (state.active_low, is_open) {
(true, true) | (false, false) => "low",
_ => "high",
};
Ok(Json(InfoResponse {
uptime_secs: unix_timestamp() - state.started_at,
started_at: state.started_at,
cpu_temp_celsius: read_cpu_temp(),
memory_available_kb: meminfo
.as_deref()
.and_then(|m| read_meminfo_field(m, "MemAvailable:")),
memory_total_kb: meminfo
.as_deref()
.and_then(|m| read_meminfo_field(m, "MemTotal:")),
disk_total_bytes: disk.map(|(total, _)| total),
disk_available_bytes: disk.map(|(_, avail)| avail),
load_average: read_load_average(),
nixos_version: read_nixos_version(),
commit: state.commit.clone(),
gpio: GpioInfo {
pin: state.gpio_pin,
active_low: state.active_low,
pull: if state.active_low { "up" } else { "down" },
open_level: if state.active_low { "low" } else { "high" },
current_raw_level: raw_level,
},
}))
}
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt()
@ -189,8 +101,7 @@ async fn main() -> Result<()> {
let endpoint_url =
std::env::var("NOISEBELL_ENDPOINT_URL").context("NOISEBELL_ENDPOINT_URL is required")?;
let api_key =
std::env::var("NOISEBELL_API_KEY").context("NOISEBELL_API_KEY is required")?;
let api_key = std::env::var("NOISEBELL_API_KEY").context("NOISEBELL_API_KEY is required")?;
let retry_attempts: u32 = std::env::var("NOISEBELL_RETRY_ATTEMPTS")
.unwrap_or_else(|_| "3".into())
@ -207,13 +118,17 @@ async fn main() -> Result<()> {
.parse()
.context("NOISEBELL_HTTP_TIMEOUT_SECS must be a valid u64")?;
let bind_address =
std::env::var("NOISEBELL_BIND_ADDRESS").unwrap_or_else(|_| "0.0.0.0".into());
let bind_address = std::env::var("NOISEBELL_BIND_ADDRESS").unwrap_or_else(|_| "0.0.0.0".into());
let active_low: bool = std::env::var("NOISEBELL_ACTIVE_LOW")
.unwrap_or_else(|_| "true".into())
.parse()
.context("NOISEBELL_ACTIVE_LOW must be true or false")?;
let active_level = if active_low {
SignalLevel::Low
} else {
SignalLevel::High
};
let inbound_api_key = std::env::var("NOISEBELL_INBOUND_API_KEY")
.context("NOISEBELL_INBOUND_API_KEY is required")?;
@ -224,7 +139,11 @@ async fn main() -> Result<()> {
.await
.context("failed to open gpiochip0")?;
let bias = if active_low { Bias::PullUp } else { Bias::PullDown };
let bias = if active_level == SignalLevel::Low {
Bias::PullUp
} else {
Bias::PullDown
};
// Request the line with edge detection for monitoring
let opts = Options::input([gpio_pin])
@ -243,29 +162,30 @@ async fn main() -> Result<()> {
.context("failed to read initial GPIO value")?;
// Value is true when line is active. With Active::High (default),
// true means the physical level is high.
let initial_high = initial_values[0];
let initial_open = if active_low { !initial_high } else { initial_high };
let initial_raw_level = if initial_values[0] {
SignalLevel::High
} else {
SignalLevel::Low
};
let initial_state = LocalDoorState::from_raw_level(initial_raw_level, active_level);
let now = unix_timestamp();
let commit =
std::env::var("NOISEBELL_COMMIT").unwrap_or_else(|_| "unknown".into());
let state = Arc::new(AppState {
is_open: AtomicBool::new(initial_open),
door_state: AtomicU8::new(initial_state as u8),
last_changed: AtomicU64::new(now),
started_at: now,
gpio_pin: gpio_pin as u8,
active_low,
commit,
inbound_api_key,
});
info!(initial_status = status_str(initial_open), "GPIO initialized");
info!(
initial_status = %initial_state.as_door_status(),
"GPIO initialized"
);
let (tx, mut rx) = tokio::sync::mpsc::unbounded_channel::<(bool, u64)>();
let (tx, mut rx) = tokio::sync::mpsc::unbounded_channel::<(DoorStatus, u64)>();
// Sync initial state with the cache on startup
let _ = tx.send((initial_open, now));
let _ = tx.send((initial_state.as_door_status(), now));
// Spawn async edge detection task
let state_for_edges = state.clone();
@ -290,18 +210,20 @@ async fn main() -> Result<()> {
}
last_event_time = std::time::Instant::now();
let new_open = match event.edge {
Edge::Falling => active_low,
Edge::Rising => !active_low,
let new_raw_level = match event.edge {
Edge::Falling => SignalLevel::Low,
Edge::Rising => SignalLevel::High,
};
let new_state = LocalDoorState::from_raw_level(new_raw_level, active_level);
let was_open = state_for_edges.is_open.swap(new_open, Ordering::Relaxed);
if was_open != new_open {
let previous_state =
LocalDoorState::from_atomic(state_for_edges.door_state.swap(new_state as u8, Ordering::Relaxed));
if previous_state != new_state {
let timestamp = unix_timestamp();
state_for_edges
.last_changed
.store(timestamp, Ordering::Relaxed);
let _ = edge_tx.send((new_open, timestamp));
let _ = edge_tx.send((new_state.as_door_status(), timestamp));
}
}
});
@ -313,11 +235,10 @@ async fn main() -> Result<()> {
.build()
.expect("failed to build HTTP client");
while let Some((new_open, timestamp)) = rx.recv().await {
let status = status_str(new_open);
info!(status, timestamp, "state changed");
while let Some((status, timestamp)) = rx.recv().await {
info!(status = %status, timestamp, "state changed");
let payload = WebhookPayload { status: status.to_string(), timestamp };
let payload = WebhookPayload { status, timestamp };
for attempt in 0..=retry_attempts {
let result = client
@ -336,9 +257,8 @@ async fn main() -> Result<()> {
if attempt == retry_attempts {
error!(error = %err_msg, "failed to notify endpoint after {} attempts", retry_attempts + 1);
} else {
let delay = Duration::from_secs(
retry_base_delay_secs * 2u64.pow(attempt),
);
let delay =
Duration::from_secs(retry_base_delay_secs * 2u64.pow(attempt));
warn!(error = %err_msg, attempt = attempt + 1, "notify failed, retrying in {:?}", delay);
tokio::time::sleep(delay).await;
}
@ -350,7 +270,6 @@ async fn main() -> Result<()> {
let app = Router::new()
.route("/", get(get_status))
.route("/info", get(get_info))
.with_state(state);
let listener = tokio::net::TcpListener::bind((&*bind_address, port))

View file

@ -4,9 +4,8 @@ Cargo workspace with the server-side pieces of Noisebell. Runs on any Linux box.
| Service | Port | What it does |
|---------|------|--------------|
| [`cache-service/`](cache-service/) | 3000 | Polls the Pi, stores history in SQLite, fans out webhooks |
| [`cache-service/`](cache-service/) | 3000 | Polls the Pi, stores the latest state in SQLite, fans out webhooks |
| [`discord-bot/`](discord-bot/) | 3001 | Posts door status to a Discord channel |
| [`rss-service/`](rss-service/) | 3002 | Serves an Atom feed of door events |
| [`noisebell-common/`](noisebell-common/) | — | Shared types and helpers |
See each service's README for configuration and API docs.
@ -22,7 +21,6 @@ Or with Nix:
```sh
nix build .#noisebell-cache
nix build .#noisebell-discord
nix build .#noisebell-rss
```
## NixOS deployment
@ -57,11 +55,6 @@ The flake exports NixOS modules. Each service runs as a hardened systemd unit be
channelId = "123456789012345678";
webhookSecretFile = "/run/secrets/noisebell-discord-webhook-secret";
};
services.noisebell-rss = {
enable = true;
domain = "rss.noisebell.example.com";
cacheUrl = "http://localhost:3000";
};
})
];
};

View file

@ -1,8 +1,8 @@
# Cache Service
The central hub. Sits between the Pi and everything else.
The central hub. Sits between the Pi and Discord.
It does two things: polls the Pi on a timer to keep a local copy of the door state, and receives push webhooks from the Pi when the state actually changes. Either way, updates get written to SQLite and forwarded to downstream services (Discord, etc.) via outbound webhooks.
It does two things: polls the Pi on a timer to keep a local copy of the door state, and receives push webhooks from the Pi when the state actually changes. Either way, updates get written to SQLite and forwarded to downstream services via outbound webhooks.
If the Pi stops responding to polls (configurable threshold, default 3 misses), the cache marks it as offline and notifies downstream.
@ -10,12 +10,12 @@ If the Pi stops responding to polls (configurable threshold, default 3 misses),
| Method | Path | Auth | Description |
|--------|------|------|-------------|
| `GET` | `/status` | — | Current door status (`status`, `timestamp`, `last_seen`) |
| `GET` | `/info` | — | Cached Pi system info |
| `GET` | `/history` | — | Last 100 state changes |
| `GET` | `/status` | — | Current door status (`status`, `since`, `last_checked`) |
| `POST` | `/webhook` | Bearer | Inbound webhook from the Pi |
| `GET` | `/health` | — | Health check |
`since` is the Pi-reported time when the current state began. `last_checked` is when the cache most recently attempted a poll.
## Configuration
NixOS options under `services.noisebell-cache`:
@ -28,7 +28,6 @@ NixOS options under `services.noisebell-cache`:
| `inboundApiKeyFile` | required | Key file for validating inbound webhooks |
| `port` | `3000` | Listen port |
| `statusPollIntervalSecs` | `60` | How often to poll `GET /` on the Pi |
| `infoPollIntervalSecs` | `300` | How often to poll `GET /info` on the Pi |
| `offlineThreshold` | `3` | Consecutive failed polls before marking offline |
| `retryAttempts` | `3` | Outbound webhook retry count |
| `retryBaseDelaySecs` | `1` | Exponential backoff base delay |

View file

@ -39,11 +39,6 @@ in
default = 60;
};
infoPollIntervalSecs = lib.mkOption {
type = lib.types.ints.positive;
default = 300;
};
offlineThreshold = lib.mkOption {
type = lib.types.ints.positive;
default = 3;
@ -70,7 +65,8 @@ in
};
outboundWebhooks = lib.mkOption {
type = lib.types.listOf (lib.types.submodule {
type = lib.types.listOf (
lib.types.submodule {
options = {
url = lib.mkOption { type = lib.types.str; };
secretFile = lib.mkOption {
@ -78,7 +74,8 @@ in
default = null;
};
};
});
}
);
default = [ ];
};
};
@ -95,15 +92,20 @@ in
reverse_proxy localhost:${toString cfg.port}
'';
systemd.services.noisebell-cache = let
webhookExports = lib.concatImapStringsSep "\n" (i: wh:
let idx = toString (i - 1); in
''export NOISEBELL_CACHE_WEBHOOK_${idx}_URL="${wh.url}"'' +
lib.optionalString (wh.secretFile != null)
systemd.services.noisebell-cache =
let
webhookExports = lib.concatImapStringsSep "\n" (
i: wh:
let
idx = toString (i - 1);
in
''export NOISEBELL_CACHE_WEBHOOK_${idx}_URL="${wh.url}"''
+ lib.optionalString (wh.secretFile != null) ''
export NOISEBELL_CACHE_WEBHOOK_${idx}_SECRET="$(cat ${wh.secretFile})"
''
export NOISEBELL_CACHE_WEBHOOK_${idx}_SECRET="$(cat ${wh.secretFile})"''
) cfg.outboundWebhooks;
in {
in
{
description = "Noisebell cache service";
wantedBy = [ "multi-user.target" ];
after = [ "network-online.target" ];
@ -113,7 +115,6 @@ in
NOISEBELL_CACHE_PI_ADDRESS = cfg.piAddress;
NOISEBELL_CACHE_DATA_DIR = cfg.dataDir;
NOISEBELL_CACHE_STATUS_POLL_INTERVAL_SECS = toString cfg.statusPollIntervalSecs;
NOISEBELL_CACHE_INFO_POLL_INTERVAL_SECS = toString cfg.infoPollIntervalSecs;
NOISEBELL_CACHE_OFFLINE_THRESHOLD = toString cfg.offlineThreshold;
NOISEBELL_CACHE_RETRY_ATTEMPTS = toString cfg.retryAttempts;
NOISEBELL_CACHE_RETRY_BASE_DELAY_SECS = toString cfg.retryBaseDelaySecs;

View file

@ -1,15 +1,16 @@
use std::sync::Arc;
use axum::extract::State;
use axum::http::{HeaderMap, StatusCode, header};
use axum::response::IntoResponse;
use axum::http::{header, HeaderMap, StatusCode};
use axum::response::{IntoResponse, Response};
use axum::Json;
use noisebell_common::{validate_bearer, HistoryEntry, WebhookPayload};
use noisebell_common::{validate_bearer, CacheStatusResponse, DoorStatus, WebhookPayload};
use tokio::sync::Mutex;
use tracing::{error, info};
use crate::db;
use crate::types::{DoorStatus, WebhookTarget};
use crate::db::ApplyStateOutcome;
use crate::types::WebhookTarget;
use crate::webhook;
static OPEN_PNG: &[u8] = include_bytes!("../assets/open.png");
@ -46,12 +47,18 @@ pub async fn post_webhook(
return StatusCode::UNAUTHORIZED;
}
// Simple rate limiting: reset tokens every window, reject if exhausted
// Simple rate limiting: reset tokens every window, reject if exhausted.
let now = unix_now();
let last = state.webhook_last_request.load(std::sync::atomic::Ordering::Relaxed);
let last = state
.webhook_last_request
.load(std::sync::atomic::Ordering::Relaxed);
if now.saturating_sub(last) >= WEBHOOK_RATE_WINDOW_SECS {
state.webhook_tokens.store(WEBHOOK_RATE_LIMIT, std::sync::atomic::Ordering::Relaxed);
state.webhook_last_request.store(now, std::sync::atomic::Ordering::Relaxed);
state
.webhook_tokens
.store(WEBHOOK_RATE_LIMIT, std::sync::atomic::Ordering::Relaxed);
state
.webhook_last_request
.store(now, std::sync::atomic::Ordering::Relaxed);
}
let remaining = state.webhook_tokens.fetch_update(
std::sync::atomic::Ordering::Relaxed,
@ -62,43 +69,63 @@ pub async fn post_webhook(
return StatusCode::TOO_MANY_REQUESTS;
}
let Some(status) = DoorStatus::from_str(&body.status) else {
return StatusCode::BAD_REQUEST;
};
let now = unix_now();
let db = state.db.clone();
let status = body.status;
let timestamp = body.timestamp;
let result = tokio::task::spawn_blocking(move || {
let conn = db.blocking_lock();
db::update_state(&conn, status, timestamp, now)
db::apply_state(&conn, status, timestamp, now)
})
.await
.expect("db task panicked");
if let Err(e) = result {
error!(error = %e, "failed to update state from webhook");
return StatusCode::INTERNAL_SERVER_ERROR;
}
info!(status = status.as_str(), timestamp = body.timestamp, "state updated via webhook");
match result {
Ok(ApplyStateOutcome::Applied) => {
info!(
status = %status,
timestamp = body.timestamp,
"state updated via webhook"
);
webhook::forward(
&state.client,
&state.webhooks,
&WebhookPayload {
status: status.as_str().to_string(),
status,
timestamp: body.timestamp,
},
state.retry_attempts,
state.retry_base_delay_secs,
)
.await;
}
Ok(ApplyStateOutcome::Duplicate) => {
info!(
status = %status,
timestamp = body.timestamp,
"duplicate webhook ignored"
);
}
Ok(ApplyStateOutcome::Stale) => {
info!(
status = %status,
timestamp = body.timestamp,
"stale webhook ignored"
);
}
Err(e) => {
error!(error = %e, "failed to update state from webhook");
return StatusCode::INTERNAL_SERVER_ERROR;
}
}
StatusCode::OK
}
pub async fn get_status(State(state): State<Arc<AppState>>) -> Result<Json<serde_json::Value>, StatusCode> {
pub async fn get_status(
State(state): State<Arc<AppState>>,
) -> Result<Json<CacheStatusResponse>, StatusCode> {
let db = state.db.clone();
let status = tokio::task::spawn_blocking(move || {
let conn = db.blocking_lock();
@ -111,125 +138,70 @@ pub async fn get_status(State(state): State<Arc<AppState>>) -> Result<Json<serde
StatusCode::INTERNAL_SERVER_ERROR
})?;
Ok(Json(serde_json::to_value(status).unwrap()))
}
pub async fn get_info(State(state): State<Arc<AppState>>) -> Result<Json<serde_json::Value>, StatusCode> {
let db = state.db.clone();
let info = tokio::task::spawn_blocking(move || {
let conn = db.blocking_lock();
db::get_pi_info(&conn)
})
.await
.expect("db task panicked")
.map_err(|e| {
error!(error = %e, "failed to get pi info");
StatusCode::INTERNAL_SERVER_ERROR
})?;
Ok(Json(info))
Ok(Json(status))
}
pub async fn health() -> StatusCode {
StatusCode::OK
}
pub async fn get_history(
State(state): State<Arc<AppState>>,
) -> Result<Json<Vec<HistoryEntry>>, StatusCode> {
let limit = 100u32;
let db = state.db.clone();
let entries = tokio::task::spawn_blocking(move || {
let conn = db.blocking_lock();
db::get_history(&conn, limit)
})
.await
.expect("db task panicked")
.map_err(|e| {
error!(error = %e, "failed to get history");
StatusCode::INTERNAL_SERVER_ERROR
})?;
Ok(Json(entries))
}
pub async fn get_image_open() -> impl IntoResponse {
([(header::CONTENT_TYPE, "image/png"), (header::CACHE_CONTROL, "public, max-age=86400")], OPEN_PNG)
(
[
(header::CONTENT_TYPE, "image/png"),
(header::CACHE_CONTROL, "public, max-age=86400"),
],
OPEN_PNG,
)
}
pub async fn get_image_closed() -> impl IntoResponse {
([(header::CONTENT_TYPE, "image/png"), (header::CACHE_CONTROL, "public, max-age=86400")], CLOSED_PNG)
(
[
(header::CONTENT_TYPE, "image/png"),
(header::CACHE_CONTROL, "public, max-age=86400"),
],
CLOSED_PNG,
)
}
pub async fn get_image_offline() -> impl IntoResponse {
([(header::CONTENT_TYPE, "image/png"), (header::CACHE_CONTROL, "public, max-age=86400")], OFFLINE_PNG)
(
[
(header::CONTENT_TYPE, "image/png"),
(header::CACHE_CONTROL, "public, max-age=86400"),
],
OFFLINE_PNG,
)
}
pub async fn get_image(State(state): State<Arc<AppState>>) -> impl IntoResponse {
pub async fn get_image(State(state): State<Arc<AppState>>) -> Response {
let db = state.db.clone();
let status = tokio::task::spawn_blocking(move || {
let status = match tokio::task::spawn_blocking(move || {
let conn = db.blocking_lock();
db::get_current_status(&conn)
})
.await
.expect("db task panicked")
.unwrap_or(DoorStatus::Offline);
{
Ok(status) => status,
Err(e) => {
error!(error = %e, "failed to get current status for image");
return StatusCode::INTERNAL_SERVER_ERROR.into_response();
}
};
let image = match status {
DoorStatus::Open => OPEN_PNG,
DoorStatus::Closed => CLOSED_PNG,
DoorStatus::Offline => OFFLINE_PNG,
};
([(header::CONTENT_TYPE, "image/png"), (header::CACHE_CONTROL, "public, max-age=5")], image)
}
pub async fn get_badge(State(state): State<Arc<AppState>>) -> impl IntoResponse {
let db = state.db.clone();
let status = tokio::task::spawn_blocking(move || {
let conn = db.blocking_lock();
db::get_current_status(&conn)
})
.await
.expect("db task panicked")
.unwrap_or(DoorStatus::Offline);
let (label, color) = match status {
DoorStatus::Open => ("open", "#57f287"),
DoorStatus::Closed => ("closed", "#ed4245"),
DoorStatus::Offline => ("offline", "#99aab5"),
};
let label_width = 70u32;
let value_width = 10 + label.len() as u32 * 7;
let total_width = label_width + value_width;
let label_x = label_width as f32 / 2.0;
let value_x = label_width as f32 + value_width as f32 / 2.0;
let svg = format!(
"<svg xmlns=\"http://www.w3.org/2000/svg\" width=\"{total_width}\" height=\"20\">\
<linearGradient id=\"s\" x2=\"0\" y2=\"100%\">\
<stop offset=\"0\" stop-color=\"#bbb\" stop-opacity=\".1\"/>\
<stop offset=\"1\" stop-opacity=\".1\"/>\
</linearGradient>\
<clipPath id=\"r\"><rect width=\"{total_width}\" height=\"20\" rx=\"3\" fill=\"#fff\"/></clipPath>\
<g clip-path=\"url(#r)\">\
<rect width=\"{label_width}\" height=\"20\" fill=\"#555\"/>\
<rect x=\"{label_width}\" width=\"{value_width}\" height=\"20\" fill=\"{color}\"/>\
<rect width=\"{total_width}\" height=\"20\" fill=\"url(#s)\"/>\
</g>\
<g fill=\"#fff\" text-anchor=\"middle\" font-family=\"Verdana,Geneva,sans-serif\" font-size=\"11\">\
<text x=\"{label_x}\" y=\"15\" fill=\"#010101\" fill-opacity=\".3\">noisebell</text>\
<text x=\"{label_x}\" y=\"14\">noisebell</text>\
<text x=\"{value_x}\" y=\"15\" fill=\"#010101\" fill-opacity=\".3\">{label}</text>\
<text x=\"{value_x}\" y=\"14\">{label}</text>\
</g></svg>"
);
(
[
(header::CONTENT_TYPE, "image/svg+xml"),
(header::CACHE_CONTROL, "no-cache, max-age=0"),
(header::CONTENT_TYPE, "image/png"),
(header::CACHE_CONTROL, "public, max-age=5"),
],
svg,
image,
)
.into_response()
}

View file

@ -1,7 +1,72 @@
use anyhow::{Context, Result};
use rusqlite::Connection;
use std::path::Path;
use std::time::{SystemTime, UNIX_EPOCH};
use crate::types::{DoorStatus, StatusResponse};
use anyhow::{Context, Result};
use noisebell_common::{CacheStatusResponse, DoorStatus};
use rusqlite::{Connection, OptionalExtension};
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum LiveDoorStatus {
Open,
Closed,
}
impl LiveDoorStatus {
const fn into_door_status(self) -> DoorStatus {
match self {
Self::Open => DoorStatus::Open,
Self::Closed => DoorStatus::Closed,
}
}
}
impl TryFrom<DoorStatus> for LiveDoorStatus {
type Error = &'static str;
fn try_from(value: DoorStatus) -> std::result::Result<Self, Self::Error> {
match value {
DoorStatus::Open => Ok(Self::Open),
DoorStatus::Closed => Ok(Self::Closed),
DoorStatus::Offline => Err("offline is not a live door state"),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum CachedState {
Unknown,
Live { status: LiveDoorStatus, since: u64 },
Offline { since: u64 },
}
impl CachedState {
const fn status_for_api(self) -> DoorStatus {
match self {
Self::Unknown | Self::Offline { .. } => DoorStatus::Offline,
Self::Live { status, .. } => status.into_door_status(),
}
}
const fn since_for_api(self) -> Option<u64> {
match self {
Self::Unknown => None,
Self::Live { since, .. } | Self::Offline { since } => Some(since),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct CurrentStateRow {
state: CachedState,
last_seen: Option<u64>,
last_checked: Option<u64>,
}
fn parse_status(status: &str, location: &str) -> Result<DoorStatus> {
status
.parse()
.with_context(|| format!("invalid door status {status:?} in {location}"))
}
pub fn init(path: &str) -> Result<Connection> {
let conn = Connection::open(path).context("failed to open SQLite database")?;
@ -10,69 +75,173 @@ pub fn init(path: &str) -> Result<Connection> {
"
CREATE TABLE IF NOT EXISTS current_state (
id INTEGER PRIMARY KEY CHECK (id = 1),
status TEXT NOT NULL DEFAULT 'offline',
status TEXT NOT NULL DEFAULT 'offline' CHECK (status IN ('open', 'closed', 'offline')),
timestamp INTEGER,
last_seen INTEGER,
last_checked INTEGER
);
CREATE TABLE IF NOT EXISTS state_log (
id INTEGER PRIMARY KEY AUTOINCREMENT,
status TEXT NOT NULL,
timestamp INTEGER NOT NULL,
recorded_at INTEGER NOT NULL
);
CREATE TABLE IF NOT EXISTS pi_info (
id INTEGER PRIMARY KEY CHECK (id = 1),
data TEXT NOT NULL,
fetched_at INTEGER NOT NULL
);
INSERT OR IGNORE INTO current_state (id) VALUES (1);
INSERT OR IGNORE INTO pi_info (id, data, fetched_at) VALUES (1, '{}', 0);
",
)
.context("failed to initialize database schema")?;
migrate_current_state(&conn)?;
Ok(conn)
}
pub fn get_status(conn: &Connection) -> Result<StatusResponse> {
let (status_str, timestamp, last_checked) = conn.query_row(
"SELECT status, timestamp, last_checked FROM current_state WHERE id = 1",
fn current_state_has_column(conn: &Connection, column: &str) -> Result<bool> {
let mut stmt = conn.prepare("PRAGMA table_info(current_state)")?;
let mut rows = stmt.query([])?;
while let Some(row) = rows.next()? {
let name: String = row.get(1)?;
if name == column {
return Ok(true);
}
}
Ok(false)
}
fn migrate_current_state(conn: &Connection) -> Result<()> {
if !current_state_has_column(conn, "last_checked")? {
conn.execute(
"ALTER TABLE current_state ADD COLUMN last_checked INTEGER",
[],
)
.context("failed to add current_state.last_checked")?;
}
conn.execute(
"UPDATE current_state SET status = 'offline' WHERE status IS NULL",
[],
)
.context("failed to backfill NULL current_state.status")?;
validate_status_values(conn)?;
Ok(())
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ApplyStateOutcome {
Applied,
Duplicate,
Stale,
}
fn validate_status_column(conn: &Connection, table: &str) -> Result<()> {
let query = format!(
"SELECT status FROM {table} WHERE status IS NULL OR status NOT IN ('open', 'closed', 'offline') LIMIT 1"
);
let invalid: Option<Option<String>> = conn
.query_row(&query, [], |row| row.get(0))
.optional()
.context(format!("failed to validate {table}.status"))?;
if let Some(status) = invalid {
match status {
Some(status) => anyhow::bail!("invalid door status {status:?} in {table}.status"),
None => anyhow::bail!("invalid NULL door status in {table}.status"),
}
}
Ok(())
}
fn validate_status_values(conn: &Connection) -> Result<()> {
validate_status_column(conn, "current_state")?;
Ok(())
}
fn current_state_row(conn: &Connection) -> Result<CurrentStateRow> {
let (status_str, since, last_seen, last_checked) = conn.query_row(
"SELECT status, timestamp, last_seen, last_checked FROM current_state WHERE id = 1",
[],
|row| {
Ok((
row.get::<_, Option<String>>(0)?,
row.get::<_, String>(0)?,
row.get::<_, Option<u64>>(1)?,
row.get::<_, Option<u64>>(2)?,
row.get::<_, Option<u64>>(3)?,
))
},
)?;
let status = status_str
.as_deref()
.and_then(DoorStatus::from_str)
.unwrap_or(DoorStatus::Offline);
Ok(StatusResponse {
status,
since: timestamp,
let status = parse_status(&status_str, "current_state.status")?;
let state = match (status, since) {
(DoorStatus::Open, Some(since)) => CachedState::Live {
status: LiveDoorStatus::Open,
since,
},
(DoorStatus::Closed, Some(since)) => CachedState::Live {
status: LiveDoorStatus::Closed,
since,
},
(DoorStatus::Offline, Some(since)) => CachedState::Offline { since },
(DoorStatus::Offline, None) => CachedState::Unknown,
(DoorStatus::Open | DoorStatus::Closed, None) => {
anyhow::bail!("live current_state.status must have a timestamp")
}
};
Ok(CurrentStateRow {
state,
last_seen,
last_checked,
})
}
pub fn update_state(conn: &Connection, status: DoorStatus, timestamp: u64, now: u64) -> Result<()> {
pub fn get_status(conn: &Connection) -> Result<CacheStatusResponse> {
let row = current_state_row(conn)?;
Ok(CacheStatusResponse {
status: row.state.status_for_api(),
since: row.state.since_for_api(),
last_checked: row.last_checked,
})
}
fn write_state_change(
conn: &Connection,
status: DoorStatus,
timestamp: u64,
now: u64,
) -> Result<()> {
let status_str = status.as_str();
conn.execute(
"UPDATE current_state SET status = ?1, timestamp = ?2, last_seen = ?3 WHERE id = 1",
rusqlite::params![status_str, now, now],
)?;
conn.execute(
"INSERT INTO state_log (status, timestamp, recorded_at) VALUES (?1, ?2, ?3)",
rusqlite::params![status_str, timestamp, now],
)?;
Ok(())
}
pub fn apply_state(
conn: &Connection,
status: DoorStatus,
timestamp: u64,
now: u64,
) -> Result<ApplyStateOutcome> {
let current = current_state_row(conn)?;
let live_status = LiveDoorStatus::try_from(status).map_err(anyhow::Error::msg)?;
let outcome = match current.state {
CachedState::Unknown => ApplyStateOutcome::Applied,
CachedState::Offline { since } if timestamp < since => ApplyStateOutcome::Stale,
CachedState::Offline { .. } => ApplyStateOutcome::Applied,
CachedState::Live { status: _, since } if timestamp < since => ApplyStateOutcome::Stale,
CachedState::Live {
status: current_status,
since,
} if timestamp == since && live_status == current_status => ApplyStateOutcome::Duplicate,
CachedState::Live { .. } => ApplyStateOutcome::Applied,
};
match outcome {
ApplyStateOutcome::Applied => write_state_change(conn, status, timestamp, now)?,
ApplyStateOutcome::Duplicate | ApplyStateOutcome::Stale => update_last_seen(conn, now)?,
}
Ok(outcome)
}
pub fn update_last_seen(conn: &Connection, now: u64) -> Result<()> {
conn.execute(
"UPDATE current_state SET last_seen = ?1 WHERE id = 1",
@ -90,62 +259,16 @@ pub fn update_last_checked(conn: &Connection, now: u64) -> Result<()> {
}
pub fn mark_offline(conn: &Connection, now: u64) -> Result<()> {
let offline = DoorStatus::Offline.as_str();
conn.execute(
"UPDATE current_state SET status = 'offline', timestamp = ?1 WHERE id = 1",
rusqlite::params![now],
)?;
conn.execute(
"INSERT INTO state_log (status, timestamp, recorded_at) VALUES ('offline', ?1, ?1)",
rusqlite::params![now],
"UPDATE current_state SET status = ?1, timestamp = ?2 WHERE id = 1",
rusqlite::params![offline, now],
)?;
Ok(())
}
pub fn get_current_status(conn: &Connection) -> Result<DoorStatus> {
let status_str: Option<String> = conn.query_row(
"SELECT status FROM current_state WHERE id = 1",
[],
|row| row.get(0),
)?;
Ok(status_str
.as_deref()
.and_then(DoorStatus::from_str)
.unwrap_or(DoorStatus::Offline))
}
pub fn get_history(conn: &Connection, limit: u32) -> Result<Vec<noisebell_common::HistoryEntry>> {
let mut stmt = conn.prepare(
"SELECT id, status, timestamp, recorded_at FROM state_log ORDER BY id DESC LIMIT ?1",
)?;
let entries = stmt
.query_map(rusqlite::params![limit], |row| {
Ok(noisebell_common::HistoryEntry {
id: row.get(0)?,
status: row.get(1)?,
timestamp: row.get(2)?,
recorded_at: row.get(3)?,
})
})?
.collect::<Result<Vec<_>, _>>()?;
Ok(entries)
}
pub fn get_pi_info(conn: &Connection) -> Result<serde_json::Value> {
let data: String = conn.query_row(
"SELECT data FROM pi_info WHERE id = 1",
[],
|row| row.get(0),
)?;
Ok(serde_json::from_str(&data).unwrap_or(serde_json::json!({})))
}
pub fn update_pi_info(conn: &Connection, data: &serde_json::Value, now: u64) -> Result<()> {
let json = serde_json::to_string(data)?;
conn.execute(
"INSERT OR REPLACE INTO pi_info (id, data, fetched_at) VALUES (1, ?1, ?2)",
rusqlite::params![json, now],
)?;
Ok(())
Ok(current_state_row(conn)?.state.status_for_api())
}
#[cfg(test)]
@ -156,6 +279,31 @@ mod tests {
init(":memory:").expect("failed to init test db")
}
fn temp_db_path(label: &str) -> std::path::PathBuf {
let nanos = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_nanos();
std::env::temp_dir().join(format!("noisebell-{label}-{nanos}.sqlite"))
}
fn create_legacy_db(path: &Path) {
let conn = Connection::open(path).unwrap();
conn.execute_batch(
"
CREATE TABLE current_state (
id INTEGER PRIMARY KEY CHECK (id = 1),
status TEXT,
timestamp INTEGER,
last_seen INTEGER
);
INSERT INTO current_state (id, status, timestamp, last_seen) VALUES (1, NULL, NULL, NULL);
",
)
.unwrap();
}
#[test]
fn initial_status_is_offline() {
let conn = test_db();
@ -166,19 +314,20 @@ mod tests {
}
#[test]
fn update_state_changes_status() {
fn apply_state_changes_status_and_preserves_event_timestamp() {
let conn = test_db();
update_state(&conn, DoorStatus::Open, 1000, 1001).unwrap();
let outcome = apply_state(&conn, DoorStatus::Open, 1000, 1001).unwrap();
assert_eq!(outcome, ApplyStateOutcome::Applied);
let status = get_status(&conn).unwrap();
assert_eq!(status.status, DoorStatus::Open);
assert_eq!(status.since, Some(1001));
assert_eq!(status.since, Some(1000));
}
#[test]
fn mark_offline_sets_offline_status() {
let conn = test_db();
update_state(&conn, DoorStatus::Open, 1000, 1001).unwrap();
apply_state(&conn, DoorStatus::Open, 1000, 1001).unwrap();
mark_offline(&conn, 2000).unwrap();
let status = get_status(&conn).unwrap();
@ -191,7 +340,7 @@ mod tests {
let conn = test_db();
assert_eq!(get_current_status(&conn).unwrap(), DoorStatus::Offline);
update_state(&conn, DoorStatus::Closed, 1000, 1001).unwrap();
apply_state(&conn, DoorStatus::Closed, 1000, 1001).unwrap();
assert_eq!(get_current_status(&conn).unwrap(), DoorStatus::Closed);
}
@ -204,23 +353,9 @@ mod tests {
assert_eq!(status.last_checked, Some(5000));
}
#[test]
fn history_records_state_changes() {
let conn = test_db();
update_state(&conn, DoorStatus::Open, 1000, 1001).unwrap();
update_state(&conn, DoorStatus::Closed, 2000, 2001).unwrap();
mark_offline(&conn, 3000).unwrap();
let history = get_history(&conn, 10).unwrap();
assert_eq!(history.len(), 3);
assert_eq!(history[0].status, "offline");
assert_eq!(history[1].status, "closed");
assert_eq!(history[2].status, "open");
}
#[test]
fn status_response_serializes_correctly() {
let resp = StatusResponse {
let resp = CacheStatusResponse {
status: DoorStatus::Open,
since: Some(1234),
last_checked: Some(5678),
@ -232,20 +367,110 @@ mod tests {
}
#[test]
fn null_status_migration_converts_to_offline() {
// Simulate an old database with NULL status
let conn = Connection::open_in_memory().unwrap();
conn.execute_batch("
CREATE TABLE current_state (id INTEGER PRIMARY KEY CHECK (id = 1), status TEXT, timestamp INTEGER, last_seen INTEGER);
INSERT INTO current_state (id, status, timestamp, last_seen) VALUES (1, NULL, NULL, NULL);
CREATE TABLE state_log (id INTEGER PRIMARY KEY AUTOINCREMENT, status TEXT NOT NULL, timestamp INTEGER NOT NULL, recorded_at INTEGER NOT NULL);
CREATE TABLE pi_info (id INTEGER PRIMARY KEY CHECK (id = 1), data TEXT NOT NULL, fetched_at INTEGER NOT NULL);
INSERT INTO pi_info (id, data, fetched_at) VALUES (1, '{}', 0);
").unwrap();
fn apply_state_deduplicates_same_event() {
let conn = test_db();
assert_eq!(
apply_state(&conn, DoorStatus::Open, 1000, 1001).unwrap(),
ApplyStateOutcome::Applied
);
assert_eq!(
apply_state(&conn, DoorStatus::Open, 1000, 1002).unwrap(),
ApplyStateOutcome::Duplicate
);
let status = get_status(&conn).unwrap();
assert_eq!(status.status, DoorStatus::Open);
assert_eq!(status.since, Some(1000));
}
#[test]
fn apply_state_ignores_stale_events() {
let conn = test_db();
assert_eq!(
apply_state(&conn, DoorStatus::Open, 2000, 2001).unwrap(),
ApplyStateOutcome::Applied
);
assert_eq!(
apply_state(&conn, DoorStatus::Closed, 1999, 2002).unwrap(),
ApplyStateOutcome::Stale
);
// Re-init should migrate
let conn = init(":memory:").unwrap();
let status = get_current_status(&conn).unwrap();
assert_eq!(status, DoorStatus::Offline);
assert_eq!(status, DoorStatus::Open);
}
#[test]
fn apply_state_accepts_newer_same_status_event() {
let conn = test_db();
assert_eq!(
apply_state(&conn, DoorStatus::Open, 1000, 1001).unwrap(),
ApplyStateOutcome::Applied
);
assert_eq!(
apply_state(&conn, DoorStatus::Open, 2000, 2001).unwrap(),
ApplyStateOutcome::Applied
);
let status = get_status(&conn).unwrap();
assert_eq!(status.status, DoorStatus::Open);
assert_eq!(status.since, Some(2000));
}
#[test]
fn apply_state_after_offline_recovers_with_event_timestamp() {
let conn = test_db();
mark_offline(&conn, 3000).unwrap();
assert_eq!(
apply_state(&conn, DoorStatus::Open, 2500, 3100).unwrap(),
ApplyStateOutcome::Stale
);
assert_eq!(
apply_state(&conn, DoorStatus::Open, 3200, 3201).unwrap(),
ApplyStateOutcome::Applied
);
let status = get_status(&conn).unwrap();
assert_eq!(status.status, DoorStatus::Open);
assert_eq!(status.since, Some(3200));
}
#[test]
fn legacy_db_is_migrated_in_place() {
let path = temp_db_path("legacy-migration");
create_legacy_db(&path);
let conn = init(path.to_str().unwrap()).unwrap();
assert!(current_state_has_column(&conn, "last_checked").unwrap());
let status = get_status(&conn).unwrap();
assert_eq!(status.status, DoorStatus::Offline);
assert_eq!(status.since, None);
assert_eq!(status.last_checked, None);
drop(conn);
std::fs::remove_file(path).unwrap();
}
#[test]
fn invalid_legacy_status_is_rejected() {
let path = temp_db_path("legacy-invalid-status");
create_legacy_db(&path);
let conn = Connection::open(&path).unwrap();
conn.execute(
"UPDATE current_state SET status = 'mystery' WHERE id = 1",
[],
)
.unwrap();
drop(conn);
let err = init(path.to_str().unwrap()).unwrap_err().to_string();
assert!(err.contains("invalid door status"));
std::fs::remove_file(path).unwrap();
}
}

View file

@ -4,8 +4,8 @@ use std::time::Duration;
use anyhow::{Context, Result};
use axum::routing::{get, post};
use axum::Router;
use tokio::sync::Mutex;
use std::sync::atomic::AtomicU64;
use tokio::sync::Mutex;
use tower_http::trace::TraceLayer;
use tracing::{info, Level};
@ -15,7 +15,7 @@ mod poller;
mod types;
mod webhook;
use types::WebhookTarget;
use types::{WebhookAuth, WebhookTarget};
#[tokio::main]
async fn main() -> Result<()> {
@ -39,19 +39,14 @@ async fn main() -> Result<()> {
let inbound_api_key = std::env::var("NOISEBELL_CACHE_INBOUND_API_KEY")
.context("NOISEBELL_CACHE_INBOUND_API_KEY is required")?;
let data_dir =
std::env::var("NOISEBELL_CACHE_DATA_DIR").unwrap_or_else(|_| "/var/lib/noisebell-cache".into());
let data_dir = std::env::var("NOISEBELL_CACHE_DATA_DIR")
.unwrap_or_else(|_| "/var/lib/noisebell-cache".into());
let status_poll_interval_secs: u64 = std::env::var("NOISEBELL_CACHE_STATUS_POLL_INTERVAL_SECS")
.unwrap_or_else(|_| "60".into())
.parse()
.context("NOISEBELL_CACHE_STATUS_POLL_INTERVAL_SECS must be a valid u64")?;
let info_poll_interval_secs: u64 = std::env::var("NOISEBELL_CACHE_INFO_POLL_INTERVAL_SECS")
.unwrap_or_else(|_| "300".into())
.parse()
.context("NOISEBELL_CACHE_INFO_POLL_INTERVAL_SECS must be a valid u64")?;
let offline_threshold: u32 = std::env::var("NOISEBELL_CACHE_OFFLINE_THRESHOLD")
.unwrap_or_else(|_| "3".into())
.parse()
@ -79,8 +74,11 @@ async fn main() -> Result<()> {
match std::env::var(&url_key) {
Ok(url) => {
let secret_key = format!("NOISEBELL_CACHE_WEBHOOK_{i}_SECRET");
let secret = std::env::var(&secret_key).ok();
webhooks.push(WebhookTarget { url, secret });
let auth = match std::env::var(&secret_key) {
Ok(secret) => WebhookAuth::Bearer(secret),
Err(_) => WebhookAuth::None,
};
webhooks.push(WebhookTarget { url, auth });
}
Err(_) => break,
}
@ -106,7 +104,6 @@ async fn main() -> Result<()> {
pi_address,
pi_api_key,
status_poll_interval: Duration::from_secs(status_poll_interval_secs),
info_poll_interval: Duration::from_secs(info_poll_interval_secs),
offline_threshold,
retry_attempts,
retry_base_delay_secs,
@ -114,7 +111,6 @@ async fn main() -> Result<()> {
});
poller::spawn_status_poller(poller_config.clone(), db.clone(), client.clone());
poller::spawn_info_poller(poller_config, db.clone(), client.clone());
let app_state = Arc::new(api::AppState {
db,
@ -131,13 +127,10 @@ async fn main() -> Result<()> {
.route("/health", get(api::health))
.route("/webhook", post(api::post_webhook))
.route("/status", get(api::get_status))
.route("/info", get(api::get_info))
.route("/history", get(api::get_history))
.route("/image", get(api::get_image))
.route("/image/open.png", get(api::get_image_open))
.route("/image/closed.png", get(api::get_image_closed))
.route("/image/offline.png", get(api::get_image_offline))
.route("/badge.svg", get(api::get_badge))
.layer(
TraceLayer::new_for_http()
.make_span_with(tower_http::trace::DefaultMakeSpan::new().level(Level::INFO))

View file

@ -1,19 +1,19 @@
use std::sync::Arc;
use std::time::Duration;
use noisebell_common::WebhookPayload;
use noisebell_common::{DoorStatus, PiStatusResponse, WebhookPayload};
use tokio::sync::Mutex;
use tracing::{error, info, warn};
use crate::db;
use crate::types::{DoorStatus, WebhookTarget};
use crate::db::ApplyStateOutcome;
use crate::types::WebhookTarget;
use crate::webhook;
pub struct PollerConfig {
pub pi_address: String,
pub pi_api_key: String,
pub status_poll_interval: Duration,
pub info_poll_interval: Duration,
pub offline_threshold: u32,
pub retry_attempts: u32,
pub retry_base_delay_secs: u64,
@ -64,9 +64,10 @@ pub fn spawn_status_poller(
}
let now = unix_now();
if let Ok(body) = resp.json::<serde_json::Value>().await {
let status_str = body.get("status").and_then(|s| s.as_str()).map(String::from);
let event_timestamp = body.get("timestamp").and_then(|t| t.as_u64());
match resp.json::<PiStatusResponse>().await {
Ok(body) => {
let status = body.status;
let event_timestamp = body.timestamp;
let db = db.clone();
let update_result = tokio::task::spawn_blocking(move || {
@ -75,43 +76,64 @@ pub fn spawn_status_poller(
error!(error = %e, "failed to update last_seen");
}
if let Some(ref status_str) = status_str {
if let Some(status) = DoorStatus::from_str(status_str) {
let current = db::get_current_status(&conn);
let changed = match &current {
Ok(current) => *current != status,
Err(_) => true,
};
if changed {
let timestamp = event_timestamp.unwrap_or(now);
if let Err(e) = db::update_state(&conn, status, timestamp, now) {
match db::apply_state(&conn, status, event_timestamp, now) {
Ok(ApplyStateOutcome::Applied) => {
Some((status, event_timestamp, ApplyStateOutcome::Applied))
}
Ok(ApplyStateOutcome::Duplicate) => Some((
status,
event_timestamp,
ApplyStateOutcome::Duplicate,
)),
Ok(ApplyStateOutcome::Stale) => {
Some((status, event_timestamp, ApplyStateOutcome::Stale))
}
Err(e) => {
error!(error = %e, "failed to update state from poll");
return None;
}
return Some((status, timestamp));
}
}
}
None
}
}
})
.await
.expect("db task panicked");
if let Some((status, timestamp)) = update_result {
info!(status = status.as_str(), "state updated from poll");
if let Some((status, timestamp, outcome)) = update_result {
match outcome {
ApplyStateOutcome::Applied => {
info!(
status = %status,
timestamp,
"state updated from poll"
);
webhook::forward(
&client,
&config.webhooks,
&WebhookPayload {
status: status.as_str().to_string(),
timestamp,
},
&WebhookPayload { status, timestamp },
config.retry_attempts,
config.retry_base_delay_secs,
)
.await;
}
ApplyStateOutcome::Duplicate => {
info!(
status = %status,
timestamp,
"duplicate poll state ignored"
);
}
ApplyStateOutcome::Stale => {
warn!(
status = %status,
timestamp,
"stale poll state ignored"
);
}
}
}
}
Err(e) => {
error!(error = %e, "failed to parse status poll response");
}
}
}
_ => {
@ -139,12 +161,15 @@ pub fn spawn_status_poller(
match marked {
Ok(()) => {
info!("Pi marked offline after {} consecutive failures", consecutive_failures);
info!(
"Pi marked offline after {} consecutive failures",
consecutive_failures
);
webhook::forward(
&client,
&config.webhooks,
&WebhookPayload {
status: "offline".to_string(),
status: DoorStatus::Offline,
timestamp: now,
},
config.retry_attempts,
@ -164,47 +189,3 @@ pub fn spawn_status_poller(
}
});
}
pub fn spawn_info_poller(
config: Arc<PollerConfig>,
db: Arc<Mutex<rusqlite::Connection>>,
client: reqwest::Client,
) {
tokio::spawn(async move {
loop {
let result = client
.get(format!("{}/info", config.pi_address))
.bearer_auth(&config.pi_api_key)
.send()
.await;
match result {
Ok(resp) if resp.status().is_success() => {
if let Ok(data) = resp.json::<serde_json::Value>().await {
let now = unix_now();
let db = db.clone();
let result = tokio::task::spawn_blocking(move || {
let conn = db.blocking_lock();
db::update_pi_info(&conn, &data, now)
})
.await
.expect("db task panicked");
if let Err(e) = result {
error!(error = %e, "failed to update pi_info");
}
}
}
_ => {
let err_msg = match &result {
Ok(resp) => format!("HTTP {}", resp.status()),
Err(e) => e.to_string(),
};
warn!(error = %err_msg, "info poll failed");
}
}
tokio::time::sleep(config.info_poll_interval).await;
}
});
}

View file

@ -1,72 +1,11 @@
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum DoorStatus {
Open,
Closed,
Offline,
}
impl DoorStatus {
pub fn as_str(self) -> &'static str {
match self {
DoorStatus::Open => "open",
DoorStatus::Closed => "closed",
DoorStatus::Offline => "offline",
}
}
pub fn from_str(s: &str) -> Option<Self> {
match s {
"open" => Some(DoorStatus::Open),
"closed" => Some(DoorStatus::Closed),
"offline" => Some(DoorStatus::Offline),
_ => None,
}
}
}
#[derive(Debug, Clone, Serialize)]
pub struct StatusResponse {
pub status: DoorStatus,
pub since: Option<u64>, // when the current status was set
pub last_checked: Option<u64>, // when the cache last attempted to poll
#[derive(Debug, Clone)]
pub enum WebhookAuth {
None,
Bearer(String),
}
#[derive(Debug, Clone)]
pub struct WebhookTarget {
pub url: String,
pub secret: Option<String>,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn door_status_round_trip() {
for status in [DoorStatus::Open, DoorStatus::Closed, DoorStatus::Offline] {
let s = status.as_str();
assert_eq!(DoorStatus::from_str(s), Some(status));
}
}
#[test]
fn door_status_from_str_rejects_unknown() {
assert_eq!(DoorStatus::from_str("unknown"), None);
assert_eq!(DoorStatus::from_str(""), None);
}
#[test]
fn door_status_serde_lowercase() {
let json = serde_json::to_string(&DoorStatus::Open).unwrap();
assert_eq!(json, "\"open\"");
let deserialized: DoorStatus = serde_json::from_str("\"closed\"").unwrap();
assert_eq!(deserialized, DoorStatus::Closed);
let deserialized: DoorStatus = serde_json::from_str("\"offline\"").unwrap();
assert_eq!(deserialized, DoorStatus::Offline);
}
pub auth: WebhookAuth,
}

View file

@ -3,7 +3,7 @@ use std::time::Duration;
use noisebell_common::WebhookPayload;
use tracing::{error, info, warn};
use crate::types::WebhookTarget;
use crate::types::{WebhookAuth, WebhookTarget};
pub async fn forward(
client: &reqwest::Client,
@ -18,14 +18,14 @@ pub async fn forward(
let payload = payload.clone();
let client = client.clone();
let url = target.url.clone();
let secret = target.secret.clone();
let auth = target.auth.clone();
set.spawn(async move {
info!(url = %url, status = %payload.status, "forwarding to outbound webhook");
for attempt in 0..=retry_attempts {
let mut req = client.post(&url).json(&payload);
if let Some(ref secret) = secret {
if let WebhookAuth::Bearer(secret) = &auth {
req = req.bearer_auth(secret);
}

View file

@ -6,7 +6,7 @@ use axum::extract::State as AxumState;
use axum::http::{HeaderMap, StatusCode};
use axum::routing::{get, post};
use axum::{Json, Router};
use noisebell_common::{validate_bearer, WebhookPayload};
use noisebell_common::{validate_bearer, CacheStatusResponse, DoorStatus, WebhookPayload};
use serenity::all::{
ChannelId, Colour, CommandInteraction, CreateCommand, CreateEmbed, CreateInteractionResponse,
CreateInteractionResponseMessage, CreateMessage, GatewayIntents, Interaction,
@ -24,11 +24,26 @@ struct AppState {
client: reqwest::Client,
}
fn build_embed(status: &str, timestamp: u64, image_base_url: &str) -> CreateEmbed {
fn build_embed(status: DoorStatus, timestamp: u64, image_base_url: &str) -> CreateEmbed {
let (colour, title, description, image_file) = match status {
"open" => (Colour::from_rgb(0, 255, 0), "Noisebridge is Open!", "It's time to start hacking.", "open.png"),
"closed" => (Colour::from_rgb(255, 0, 0), "Noisebridge is Closed!", "We'll see you again soon.", "closed.png"),
_ => (Colour::from_rgb(153, 170, 181), "Noisebridge is Offline", "The Noisebridge Pi is not responding.", "offline.png"),
DoorStatus::Open => (
Colour::from_rgb(0, 255, 0),
"Noisebridge is Open!",
"It's time to start hacking.",
"open.png",
),
DoorStatus::Closed => (
Colour::from_rgb(255, 0, 0),
"Noisebridge is Closed!",
"We'll see you again soon.",
"closed.png",
),
DoorStatus::Offline => (
Colour::from_rgb(153, 170, 181),
"Noisebridge is Offline",
"The Noisebridge Pi is not responding.",
"offline.png",
),
};
let image_url = format!("{image_base_url}/{image_file}");
@ -38,7 +53,10 @@ fn build_embed(status: &str, timestamp: u64, image_base_url: &str) -> CreateEmbe
.description(description)
.colour(colour)
.thumbnail(image_url)
.timestamp(serenity::model::Timestamp::from_unix_timestamp(timestamp as i64).unwrap_or_else(|_| serenity::model::Timestamp::now()))
.timestamp(
serenity::model::Timestamp::from_unix_timestamp(timestamp as i64)
.unwrap_or_else(|_| serenity::model::Timestamp::now()),
)
}
async fn post_webhook(
@ -52,7 +70,7 @@ async fn post_webhook(
info!(status = %body.status, timestamp = body.timestamp, "received webhook");
let embed = build_embed(&body.status, body.timestamp, &state.image_base_url);
let embed = build_embed(body.status, body.timestamp, &state.image_base_url);
let message = CreateMessage::new().embed(embed);
match state.channel_id.send_message(&state.http, message).await {
@ -78,25 +96,27 @@ fn format_timestamp(ts: u64) -> String {
format!("<t:{}:R>", ts)
}
async fn handle_status(state: &AppState, _command: &CommandInteraction) -> CreateInteractionResponse {
async fn handle_status(
state: &AppState,
_command: &CommandInteraction,
) -> CreateInteractionResponse {
let url = format!("{}/status", state.cache_url);
let resp = state.client.get(&url).send().await;
let embed = match resp {
Ok(resp) if resp.status().is_success() => {
match resp.json::<serde_json::Value>().await {
Ok(resp) if resp.status().is_success() => match resp.json::<CacheStatusResponse>().await {
Ok(data) => {
let status = data.get("status").and_then(|s| s.as_str()).unwrap_or("unknown");
let since = data.get("since").and_then(|t| t.as_u64());
let last_checked = data.get("last_checked").and_then(|t| t.as_u64());
let mut embed = build_embed(status, since.unwrap_or(unix_now()), &state.image_base_url);
let mut embed = build_embed(
data.status,
data.since.unwrap_or(unix_now()),
&state.image_base_url,
);
let mut fields = Vec::new();
if let Some(ts) = since {
if let Some(ts) = data.since {
fields.push(("Since", format_timestamp(ts), true));
}
if let Some(ts) = last_checked {
if let Some(ts) = data.last_checked {
fields.push(("Last Checked", format_timestamp(ts), true));
}
if !fields.is_empty() {
@ -111,140 +131,14 @@ async fn handle_status(state: &AppState, _command: &CommandInteraction) -> Creat
.description("Failed to parse status response.")
.colour(Colour::from_rgb(255, 0, 0))
}
}
}
_ => {
CreateEmbed::new()
},
_ => CreateEmbed::new()
.title("Error")
.description("Failed to reach the cache service.")
.colour(Colour::from_rgb(255, 0, 0))
}
.colour(Colour::from_rgb(255, 0, 0)),
};
CreateInteractionResponse::Message(
CreateInteractionResponseMessage::new().embed(embed)
)
}
async fn handle_info(state: &AppState, _command: &CommandInteraction) -> CreateInteractionResponse {
let url = format!("{}/info", state.cache_url);
let resp = state.client.get(&url).send().await;
let embed = match resp {
Ok(resp) if resp.status().is_success() => {
match resp.json::<serde_json::Value>().await {
Ok(data) => {
let mut fields = Vec::new();
if let Some(temp) = data.get("cpu_temp_celsius").and_then(|t| t.as_f64()) {
fields.push(("CPU Temp", format!("{:.1}°C", temp), true));
}
if let Some(load) = data.get("load_average").and_then(|l| l.as_array()) {
let loads: Vec<String> = load.iter().filter_map(|v| v.as_f64()).map(|v| format!("{:.2}", v)).collect();
fields.push(("Load Average", loads.join(", "), true));
}
if let Some(total) = data.get("memory_total_kb").and_then(|t| t.as_u64()) {
if let Some(avail) = data.get("memory_available_kb").and_then(|a| a.as_u64()) {
let used = total.saturating_sub(avail);
fields.push(("Memory", format!("{} / {} MB", used / 1024, total / 1024), true));
}
}
if let Some(total) = data.get("disk_total_bytes").and_then(|t| t.as_u64()) {
if let Some(avail) = data.get("disk_available_bytes").and_then(|a| a.as_u64()) {
let used = total.saturating_sub(avail);
fields.push(("Disk", format!("{:.1} / {:.1} GB", used as f64 / 1e9, total as f64 / 1e9), true));
}
}
if let Some(uptime) = data.get("uptime_secs").and_then(|u| u.as_u64()) {
let hours = uptime / 3600;
let mins = (uptime % 3600) / 60;
fields.push(("Uptime", format!("{}h {}m", hours, mins), true));
}
if let Some(version) = data.get("nixos_version").and_then(|v| v.as_str()) {
fields.push(("NixOS", version.to_string(), true));
}
if let Some(commit) = data.get("commit").and_then(|c| c.as_str()) {
fields.push(("Commit", commit.to_string(), true));
}
CreateEmbed::new()
.title("Noisebridge Pi Info")
.colour(Colour::BLUE)
.fields(fields)
}
Err(e) => {
error!(error = %e, "failed to parse info response");
CreateEmbed::new()
.title("Error")
.description("Failed to parse Pi info.")
.colour(Colour::from_rgb(255, 0, 0))
}
}
}
_ => {
CreateEmbed::new()
.title("Error")
.description("Failed to reach the cache service.")
.colour(Colour::from_rgb(255, 0, 0))
}
};
CreateInteractionResponse::Message(
CreateInteractionResponseMessage::new().embed(embed)
)
}
async fn handle_history(state: &AppState, _command: &CommandInteraction) -> CreateInteractionResponse {
let url = format!("{}/history", state.cache_url);
let resp = state.client.get(&url).send().await;
let embed = match resp {
Ok(resp) if resp.status().is_success() => {
match resp.json::<Vec<serde_json::Value>>().await {
Ok(entries) => {
let lines: Vec<String> = entries.iter().take(10).map(|entry| {
let status = entry.get("status").and_then(|s| s.as_str()).unwrap_or("unknown");
let ts = entry.get("timestamp").and_then(|t| t.as_u64()).unwrap_or(0);
let emoji = match status {
"open" => "🟢",
"closed" => "🔴",
"offline" => "",
_ => "",
};
format!("{} **{}** — {}", emoji, status, format_timestamp(ts))
}).collect();
let description = if lines.is_empty() {
"No history available.".to_string()
} else {
lines.join("\n")
};
CreateEmbed::new()
.title("Recent Door History")
.description(description)
.colour(Colour::BLUE)
}
Err(e) => {
error!(error = %e, "failed to parse history response");
CreateEmbed::new()
.title("Error")
.description("Failed to parse history.")
.colour(Colour::from_rgb(255, 0, 0))
}
}
}
_ => {
CreateEmbed::new()
.title("Error")
.description("Failed to reach the cache service.")
.colour(Colour::from_rgb(255, 0, 0))
}
};
CreateInteractionResponse::Message(
CreateInteractionResponseMessage::new().embed(embed)
)
CreateInteractionResponse::Message(CreateInteractionResponseMessage::new().embed(embed))
}
struct Handler {
@ -256,11 +150,7 @@ impl serenity::all::EventHandler for Handler {
async fn ready(&self, ctx: serenity::all::Context, ready: serenity::model::gateway::Ready) {
info!(user = %ready.user.name, "Discord bot connected");
let commands = vec![
CreateCommand::new("status").description("Show the current door status"),
CreateCommand::new("info").description("Show Pi system information"),
CreateCommand::new("history").description("Show recent door history"),
];
let commands = vec![CreateCommand::new("status").description("Show the current door status")];
if let Err(e) = serenity::all::Command::set_global_commands(&ctx.http, commands).await {
error!(error = %e, "failed to register slash commands");
@ -273,13 +163,9 @@ impl serenity::all::EventHandler for Handler {
if let Interaction::Command(command) = interaction {
let response = match command.data.name.as_str() {
"status" => handle_status(&self.state, &command).await,
"info" => handle_info(&self.state, &command).await,
"history" => handle_history(&self.state, &command).await,
_ => {
CreateInteractionResponse::Message(
CreateInteractionResponseMessage::new().content("Unknown command.")
)
}
_ => CreateInteractionResponse::Message(
CreateInteractionResponseMessage::new().content("Unknown command."),
),
};
if let Err(e) = command.create_response(&ctx.http, response).await {
@ -295,8 +181,8 @@ async fn main() -> Result<()> {
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.init();
let discord_token = std::env::var("NOISEBELL_DISCORD_TOKEN")
.context("NOISEBELL_DISCORD_TOKEN is required")?;
let discord_token =
std::env::var("NOISEBELL_DISCORD_TOKEN").context("NOISEBELL_DISCORD_TOKEN is required")?;
let channel_id: u64 = std::env::var("NOISEBELL_DISCORD_CHANNEL_ID")
.context("NOISEBELL_DISCORD_CHANNEL_ID is required")?

View file

@ -1,3 +1,6 @@
use std::fmt;
use std::str::FromStr;
use axum::http::HeaderMap;
use serde::{Deserialize, Serialize};
@ -9,18 +12,103 @@ pub fn validate_bearer(headers: &HeaderMap, expected: &str) -> bool {
.unwrap_or(false)
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum DoorStatus {
Open,
Closed,
Offline,
}
impl DoorStatus {
pub const ALL: [Self; 3] = [Self::Open, Self::Closed, Self::Offline];
pub const fn as_str(self) -> &'static str {
match self {
Self::Open => "open",
Self::Closed => "closed",
Self::Offline => "offline",
}
}
pub const fn from_is_open(is_open: bool) -> Self {
if is_open {
Self::Open
} else {
Self::Closed
}
}
}
impl fmt::Display for DoorStatus {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(self.as_str())
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct ParseDoorStatusError;
impl fmt::Display for ParseDoorStatusError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("invalid door status")
}
}
impl std::error::Error for ParseDoorStatusError {}
impl FromStr for DoorStatus {
type Err = ParseDoorStatusError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"open" => Ok(Self::Open),
"closed" => Ok(Self::Closed),
"offline" => Ok(Self::Offline),
_ => Err(ParseDoorStatusError),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WebhookPayload {
pub status: String,
pub status: DoorStatus,
pub timestamp: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HistoryEntry {
pub id: i64,
pub status: String,
pub struct CacheStatusResponse {
pub status: DoorStatus,
pub since: Option<u64>,
pub last_checked: Option<u64>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PiStatusResponse {
pub status: DoorStatus,
pub timestamp: u64,
pub recorded_at: u64,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum SignalLevel {
Low,
High,
}
impl SignalLevel {
pub const fn as_str(self) -> &'static str {
match self {
Self::Low => "low",
Self::High => "high",
}
}
}
impl fmt::Display for SignalLevel {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(self.as_str())
}
}
#[cfg(test)]
@ -54,12 +142,46 @@ mod tests {
assert!(!validate_bearer(&headers, "secret123"));
}
#[test]
fn door_status_round_trips() {
for status in DoorStatus::ALL {
assert_eq!(status.as_str().parse::<DoorStatus>().unwrap(), status);
assert_eq!(
serde_json::to_string(&status).unwrap(),
format!("\"{status}\"")
);
}
}
#[test]
fn door_status_rejects_unknown_values() {
assert!("unknown".parse::<DoorStatus>().is_err());
assert!(serde_json::from_str::<DoorStatus>("\"unknown\"").is_err());
}
#[test]
fn webhook_payload_round_trips() {
let payload = WebhookPayload { status: "open".into(), timestamp: 1234567890 };
let payload = WebhookPayload {
status: DoorStatus::Open,
timestamp: 1234567890,
};
let json = serde_json::to_string(&payload).unwrap();
let deserialized: WebhookPayload = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized.status, "open");
assert_eq!(deserialized.status, DoorStatus::Open);
assert_eq!(deserialized.timestamp, 1234567890);
}
#[test]
fn cache_status_response_serializes_with_enum_status() {
let response = CacheStatusResponse {
status: DoorStatus::Closed,
since: Some(123),
last_checked: Some(456),
};
let json = serde_json::to_value(&response).unwrap();
assert_eq!(json["status"], "closed");
assert_eq!(json["since"], 123);
assert_eq!(json["last_checked"], 456);
}
}

View file

@ -1,16 +0,0 @@
[package]
name = "noisebell-rss"
version = "0.1.0"
edition = "2021"
[dependencies]
anyhow = "1.0"
axum = "0.8"
noisebell-common = { path = "../noisebell-common" }
reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls"] }
serde = { version = "1.0", features = ["derive"] }
time = { version = "0.3", features = ["formatting"] }
tokio = { version = "1", features = ["rt-multi-thread", "macros", "net", "sync", "signal"] }
tower-http = { version = "0.6", features = ["trace"] }
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }

View file

@ -1,20 +0,0 @@
# RSS Service
Serves an Atom feed of door status history. Stateless — it fetches from the cache service's `/history` endpoint on each request and renders the last 7 days as Atom XML.
## API
| Method | Path | Description |
|--------|------|-------------|
| `GET` | `/feed` | Atom feed |
| `GET` | `/health` | Health check |
## Configuration
NixOS options under `services.noisebell-rss`:
| Option | Default | Description |
|--------|---------|-------------|
| `domain` | required | Caddy virtual host domain |
| `cacheUrl` | required | Cache service URL (e.g. `http://localhost:3000`) |
| `port` | `3002` | Listen port |

View file

@ -1,70 +0,0 @@
pkg:
{ config, lib, ... }:
let
cfg = config.services.noisebell-rss;
bin = "${pkg}/bin/noisebell-rss";
in
{
options.services.noisebell-rss = {
enable = lib.mkEnableOption "noisebell RSS/Atom feed";
domain = lib.mkOption {
type = lib.types.str;
description = "Domain for the Caddy virtual host.";
};
port = lib.mkOption {
type = lib.types.port;
default = 3002;
};
cacheUrl = lib.mkOption {
type = lib.types.str;
description = "URL of the cache service (e.g. http://localhost:3000).";
};
};
config = lib.mkIf cfg.enable {
users.users.noisebell-rss = {
isSystemUser = true;
group = "noisebell-rss";
};
users.groups.noisebell-rss = {};
services.caddy.virtualHosts.${cfg.domain}.extraConfig = ''
reverse_proxy localhost:${toString cfg.port}
'';
systemd.services.noisebell-rss = {
description = "Noisebell RSS/Atom feed";
wantedBy = [ "multi-user.target" ];
after = [ "network-online.target" "noisebell-cache.service" ];
wants = [ "network-online.target" ];
environment = {
NOISEBELL_RSS_PORT = toString cfg.port;
NOISEBELL_RSS_CACHE_URL = cfg.cacheUrl;
NOISEBELL_RSS_SITE_URL = "https://${cfg.domain}";
RUST_LOG = "info";
};
script = ''
exec ${bin}
'';
serviceConfig = {
Type = "simple";
Restart = "on-failure";
RestartSec = 5;
User = "noisebell-rss";
Group = "noisebell-rss";
NoNewPrivileges = true;
ProtectSystem = "strict";
ProtectHome = true;
PrivateTmp = true;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectControlGroups = true;
RestrictSUIDSGID = true;
};
};
};
}

View file

@ -1,187 +0,0 @@
use std::sync::Arc;
use anyhow::{Context, Result};
use axum::extract::State;
use axum::http::{StatusCode, header};
use axum::response::IntoResponse;
use axum::routing::get;
use axum::Router;
use noisebell_common::HistoryEntry;
use tower_http::trace::TraceLayer;
use tracing::{error, info, Level};
struct AppState {
client: reqwest::Client,
cache_url: String,
site_url: String,
}
fn unix_to_rfc3339(ts: u64) -> String {
let dt = time::OffsetDateTime::from_unix_timestamp(ts as i64).unwrap_or(time::OffsetDateTime::UNIX_EPOCH);
dt.format(&time::format_description::well_known::Rfc3339).unwrap_or_else(|_| "1970-01-01T00:00:00Z".to_string())
}
fn escape_xml(s: &str) -> String {
s.replace('&', "&amp;")
.replace('<', "&lt;")
.replace('>', "&gt;")
.replace('"', "&quot;")
.replace('\'', "&apos;")
}
fn status_description(status: &str) -> &str {
match status {
"open" => "The door at Noisebridge is open.",
"closed" => "The door at Noisebridge is closed.",
"offline" => "The Noisebridge Pi is offline.",
_ => "Unknown status.",
}
}
fn status_title(status: &str) -> &str {
match status {
"open" => "Door is open",
"closed" => "Door is closed",
"offline" => "Pi is offline",
_ => "Unknown",
}
}
fn unix_now() -> u64 {
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs()
}
async fn get_feed(
State(state): State<Arc<AppState>>,
) -> impl IntoResponse {
let url = format!("{}/history", state.cache_url);
let resp = match state.client.get(&url).send().await {
Ok(resp) if resp.status().is_success() => resp,
Ok(resp) => {
error!(status = %resp.status(), "cache service returned error");
return (StatusCode::BAD_GATEWAY, "upstream error").into_response();
}
Err(e) => {
error!(error = %e, "failed to reach cache service");
return (StatusCode::BAD_GATEWAY, "upstream unavailable").into_response();
}
};
let entries: Vec<HistoryEntry> = match resp.json().await {
Ok(entries) => entries,
Err(e) => {
error!(error = %e, "failed to parse cache response");
return (StatusCode::BAD_GATEWAY, "invalid upstream response").into_response();
}
};
let updated = entries
.first()
.map(|e| unix_to_rfc3339(e.timestamp))
.unwrap_or_else(|| unix_to_rfc3339(unix_now()));
let site_url = escape_xml(&state.site_url);
let mut xml = format!(
r#"<?xml version="1.0" encoding="utf-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<title>Noisebell Door Status</title>
<link href="{site_url}/feed" rel="self"/>
<link href="{site_url}" rel="alternate"/>
<id>urn:noisebell:door-status</id>
<updated>{updated}</updated>
"#,
);
let seven_days_ago = unix_now().saturating_sub(7 * 24 * 60 * 60);
for entry in &entries {
if entry.timestamp < seven_days_ago {
continue;
}
let ts_rfc = unix_to_rfc3339(entry.timestamp);
xml.push_str(&format!(
r#" <entry>
<title>{title}</title>
<id>urn:noisebell:event:{id}</id>
<updated>{ts}</updated>
<content type="text">{description}</content>
</entry>
"#,
title = escape_xml(status_title(&entry.status)),
id = entry.id,
ts = ts_rfc,
description = escape_xml(status_description(&entry.status)),
));
}
xml.push_str("</feed>\n");
(
StatusCode::OK,
[(header::CONTENT_TYPE, "application/atom+xml; charset=utf-8")],
xml,
)
.into_response()
}
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.init();
let port: u16 = std::env::var("NOISEBELL_RSS_PORT")
.unwrap_or_else(|_| "3002".into())
.parse()
.context("NOISEBELL_RSS_PORT must be a valid u16")?;
let cache_url = std::env::var("NOISEBELL_RSS_CACHE_URL")
.context("NOISEBELL_RSS_CACHE_URL is required")?;
let site_url = std::env::var("NOISEBELL_RSS_SITE_URL")
.unwrap_or_else(|_| "https://rss.noisebell.extremist.software".to_string());
info!(port, %cache_url, "starting noisebell-rss");
let client = reqwest::Client::builder()
.timeout(std::time::Duration::from_secs(10))
.build()
.context("failed to build HTTP client")?;
let app_state = Arc::new(AppState {
client,
cache_url,
site_url,
});
let app = Router::new()
.route("/health", get(|| async { StatusCode::OK }))
.route("/feed", get(get_feed))
.layer(
TraceLayer::new_for_http()
.make_span_with(tower_http::trace::DefaultMakeSpan::new().level(Level::INFO))
.on_response(tower_http::trace::DefaultOnResponse::new().level(Level::INFO)),
)
.with_state(app_state);
let listener = tokio::net::TcpListener::bind(("0.0.0.0", port))
.await
.context(format!("failed to bind to 0.0.0.0:{port}"))?;
info!(port, "listening");
let mut sigterm = tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate())
.context("failed to register SIGTERM handler")?;
axum::serve(listener, app)
.with_graceful_shutdown(async move {
sigterm.recv().await;
})
.await
.context("server error")?;
info!("shutdown complete");
Ok(())
}