Initial commit: Lookbook personal collection app

Pinterest-like app for saving images, videos, quotes, and embeds.

Features:
- Go backend with PostgreSQL, SSR templates
- Console-based admin auth (login/logout via browser console)
- Item types: images, videos (ffmpeg transcoding), quotes, embeds
- Media stored as BLOBs in PostgreSQL
- OpenGraph metadata extraction for links
- Embed detection for YouTube, Vimeo, Twitter/X
- Masonry grid layout, item detail pages
- Tag system with filtering
- Refresh metadata endpoint with change warnings
- Replace media endpoint for updating item images/videos
This commit is contained in:
soup 2026-01-17 01:09:23 -05:00
commit cdcc5b5293
Signed by: soup
SSH key fingerprint: SHA256:GYxje8eQkJ6HZKzVWDdyOUF1TyDiprruGhE0Ym8qYDY
45 changed files with 4634 additions and 0 deletions

View file

@ -0,0 +1,131 @@
package migrations
import (
"context"
"database/sql"
"embed"
"fmt"
"log/slog"
"os"
_ "github.com/jackc/pgx/v5/stdlib"
"github.com/pressly/goose/v3"
)
//go:embed sql/*.sql
var FS embed.FS
const DefaultURL = "postgres:///lookbook?sslmode=disable"
// Up applies all available migrations using the provided database URL.
func Up(ctx context.Context, dbURL string, logger *slog.Logger) error {
url := dbURL
if url == "" {
url = DefaultURL
}
db, err := openDB(url, logger)
if err != nil {
return err
}
defer db.Close()
if err := goose.UpContext(ctx, db, "sql"); err != nil {
return fmt.Errorf("apply migrations: %w", err)
}
logger.Info("database migrated")
return nil
}
// Down rolls back migrations. If targetVersion < 0, it steps back one migration; otherwise it migrates down to the target version.
func Down(ctx context.Context, dbURL string, targetVersion int64, logger *slog.Logger) error {
url := dbURL
if url == "" {
url = DefaultURL
}
db, err := openDB(url, logger)
if err != nil {
return err
}
defer db.Close()
if targetVersion < 0 {
if err := goose.DownContext(ctx, db, "sql"); err != nil {
return fmt.Errorf("rollback one: %w", err)
}
logger.Info("rolled back one migration")
return nil
}
if err := goose.DownToContext(ctx, db, "sql", targetVersion); err != nil {
return fmt.Errorf("rollback to version %d: %w", targetVersion, err)
}
logger.Info("rolled back to version", slog.Int64("version", targetVersion))
return nil
}
// CheckPending returns the number of pending migrations without applying them.
func CheckPending(ctx context.Context, dbURL string, logger *slog.Logger) (int, error) {
url := dbURL
if url == "" {
url = DefaultURL
}
db, err := openDB(url, logger)
if err != nil {
return 0, err
}
defer db.Close()
current, err := goose.GetDBVersionContext(ctx, db)
if err != nil {
return 0, fmt.Errorf("get db version: %w", err)
}
migrations, err := goose.CollectMigrations("sql", 0, goose.MaxVersion)
if err != nil {
return 0, fmt.Errorf("collect migrations: %w", err)
}
pending := 0
for _, m := range migrations {
if m.Version > current {
pending++
}
}
return pending, nil
}
// slogLogger adapts slog to goose's minimal logging interface.
type slogLogger struct {
logger *slog.Logger
}
func (l slogLogger) Printf(format string, v ...any) {
l.logger.Info(fmt.Sprintf(format, v...))
}
func (l slogLogger) Fatalf(format string, v ...any) {
l.logger.Error(fmt.Sprintf(format, v...))
os.Exit(1)
}
func openDB(url string, logger *slog.Logger) (*sql.DB, error) {
goose.SetBaseFS(FS)
goose.SetLogger(slogLogger{logger: logger})
db, err := sql.Open("pgx", url)
if err != nil {
return nil, fmt.Errorf("open db: %w", err)
}
if err := db.Ping(); err != nil {
db.Close()
return nil, fmt.Errorf("ping db: %w", err)
}
return db, nil
}

View file

View file

@ -0,0 +1,80 @@
-- +goose Up
-- gen_random_uuid() is built-in since PostgreSQL 13, no extension needed
-- Admin authentication (single row)
CREATE TABLE admin (
id SERIAL PRIMARY KEY,
password_hash BYTEA, -- NULL until first login() sets password
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
-- Insert the single admin row
INSERT INTO admin (id) VALUES (1);
-- Sessions for admin authentication
CREATE TABLE session (
id BIGSERIAL PRIMARY KEY,
session_id TEXT NOT NULL UNIQUE,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
expires_at TIMESTAMPTZ NOT NULL
);
CREATE INDEX idx_session_expires_at ON session(expires_at);
-- Items (the main content)
CREATE TABLE item (
id BIGSERIAL PRIMARY KEY,
pub_id UUID NOT NULL DEFAULT gen_random_uuid() UNIQUE,
title TEXT,
description TEXT,
link_url TEXT, -- Source URL (optional)
item_type TEXT NOT NULL, -- 'image', 'video', 'quote', 'embed'
embed_provider TEXT, -- 'youtube', 'vimeo', NULL
embed_video_id TEXT, -- Video ID for embeds
embed_html TEXT, -- Cached embed iframe HTML
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
deleted_at TIMESTAMPTZ -- Soft delete
);
CREATE INDEX idx_item_deleted ON item(deleted_at);
CREATE INDEX idx_item_created ON item(created_at DESC);
CREATE INDEX idx_item_pub_id ON item(pub_id);
-- Media blobs (stored in DB)
CREATE TABLE media (
id BIGSERIAL PRIMARY KEY,
item_id BIGINT NOT NULL REFERENCES item(id) ON DELETE CASCADE,
media_type TEXT NOT NULL, -- 'original', 'thumbnail'
content_type TEXT NOT NULL, -- MIME type
data BYTEA NOT NULL,
width INT,
height INT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_media_item_id ON media(item_id);
-- Tags
CREATE TABLE tag (
id BIGSERIAL PRIMARY KEY,
name TEXT NOT NULL UNIQUE
);
CREATE INDEX idx_tag_name ON tag(name);
-- Item-Tag junction
CREATE TABLE item_tag (
item_id BIGINT NOT NULL REFERENCES item(id) ON DELETE CASCADE,
tag_id BIGINT NOT NULL REFERENCES tag(id) ON DELETE CASCADE,
PRIMARY KEY (item_id, tag_id)
);
CREATE INDEX idx_item_tag_tag_id ON item_tag(tag_id);
-- +goose Down
DROP TABLE IF EXISTS item_tag;
DROP TABLE IF EXISTS tag;
DROP TABLE IF EXISTS media;
DROP TABLE IF EXISTS item;
DROP TABLE IF EXISTS session;
DROP TABLE IF EXISTS admin;

View file

@ -0,0 +1,5 @@
-- +goose Up
ALTER TABLE media ADD COLUMN source_url TEXT;
-- +goose Down
ALTER TABLE media DROP COLUMN source_url;