Initial lookbook implementation
Pinterest-style visual bookmarking app with: - URL metadata extraction (OG/Twitter meta, oEmbed fallback) - Image caching in Postgres with 480px thumbnails - Multi-tag filtering with Ctrl/Cmd for OR mode - Fuzzy tag suggestions and inline tag editing - Browser console auth() with first-use password setup - Brutalist UI with Commit Mono font and Pico CSS - Light/dark mode via browser preference
This commit is contained in:
commit
fc625fb9cf
486 changed files with 195373 additions and 0 deletions
19
.direnv/bin/nix-direnv-reload
Executable file
19
.direnv/bin/nix-direnv-reload
Executable file
|
|
@ -0,0 +1,19 @@
|
|||
#!/usr/bin/env bash
|
||||
set -e
|
||||
if [[ ! -d "/home/soup/sx/lookbook" ]]; then
|
||||
echo "Cannot find source directory; Did you move it?"
|
||||
echo "(Looking for "/home/soup/sx/lookbook")"
|
||||
echo 'Cannot force reload with this script - use "direnv reload" manually and then try again'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# rebuild the cache forcefully
|
||||
_nix_direnv_force_reload=1 direnv exec "/home/soup/sx/lookbook" true
|
||||
|
||||
# Update the mtime for .envrc.
|
||||
# This will cause direnv to reload again - but without re-building.
|
||||
touch "/home/soup/sx/lookbook/.envrc"
|
||||
|
||||
# Also update the timestamp of whatever profile_rc we have.
|
||||
# This makes sure that we know we are up to date.
|
||||
touch -r "/home/soup/sx/lookbook/.envrc" "/home/soup/sx/lookbook/.direnv"/*.rc
|
||||
|
|
@ -0,0 +1 @@
|
|||
/nix/store/fg60fb70c4ji2m0mw7zr6577g4y9fkjy-nix-shell-env
|
||||
File diff suppressed because it is too large
Load diff
1
.envrc
Normal file
1
.envrc
Normal file
|
|
@ -0,0 +1 @@
|
|||
use nix --impure
|
||||
26
Makefile
Normal file
26
Makefile
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
APP := ./cmd/server
|
||||
VERSION := $(shell git rev-parse --short HEAD 2>/dev/null || echo "dev")
|
||||
LDFLAGS := -X git.soup.land/soup/lookbook/internal/static.Version=$(VERSION)
|
||||
|
||||
.PHONY: dev run migrate rollback fmt test build
|
||||
|
||||
dev:
|
||||
hivemind
|
||||
|
||||
run:
|
||||
go run $(APP) web
|
||||
|
||||
migrate:
|
||||
go run $(APP) -migrate
|
||||
|
||||
rollback:
|
||||
go run $(APP) -rollback
|
||||
|
||||
fmt:
|
||||
go fmt ./...
|
||||
|
||||
test:
|
||||
go test -v ./...
|
||||
|
||||
build:
|
||||
CGO_ENABLED=0 go build -ldflags "$(LDFLAGS)" -o bin/lookbook $(APP)
|
||||
1
Procfile
Normal file
1
Procfile
Normal file
|
|
@ -0,0 +1 @@
|
|||
web: fd . -e go -e html -e css -e js | entr -r go run ./cmd/server web
|
||||
168
cmd/server/main.go
Normal file
168
cmd/server/main.go
Normal file
|
|
@ -0,0 +1,168 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
_ "github.com/jackc/pgx/v5/stdlib"
|
||||
|
||||
"git.soup.land/soup/lookbook/internal/handlers"
|
||||
"git.soup.land/soup/lookbook/internal/middleware"
|
||||
"git.soup.land/soup/lookbook/internal/migrations"
|
||||
"git.soup.land/soup/lookbook/internal/static"
|
||||
"git.soup.land/soup/sxgo/ssr"
|
||||
)
|
||||
|
||||
const defaultAddr = ":8080"
|
||||
|
||||
func main() {
|
||||
migrate := flag.Bool("migrate", false, "run database migrations and exit")
|
||||
rollback := flag.Bool("rollback", false, "roll back migrations and exit (one step by default)")
|
||||
rollbackTarget := flag.Int64("to", -1, "target version for rollback when using -rollback")
|
||||
dbURLFlag := flag.String("db-url", "", "database connection URL")
|
||||
flag.Parse()
|
||||
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
|
||||
Level: slog.LevelInfo,
|
||||
}))
|
||||
|
||||
dbURL := *dbURLFlag
|
||||
if dbURL == "" {
|
||||
dbURL = os.Getenv("DATABASE_URL")
|
||||
}
|
||||
if dbURL == "" {
|
||||
dbURL = migrations.DefaultURL
|
||||
}
|
||||
|
||||
switch {
|
||||
case *migrate && *rollback:
|
||||
logger.Error("choose either -migrate or -rollback, not both")
|
||||
os.Exit(1)
|
||||
case *migrate:
|
||||
if err := migrations.Up(context.Background(), dbURL, logger); err != nil {
|
||||
logger.Error("migration failed", slog.Any("err", err))
|
||||
os.Exit(1)
|
||||
}
|
||||
return
|
||||
case *rollback:
|
||||
if err := migrations.Down(context.Background(), dbURL, *rollbackTarget, logger); err != nil {
|
||||
logger.Error("rollback failed", slog.Any("err", err))
|
||||
os.Exit(1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Check for subcommand
|
||||
args := flag.Args()
|
||||
if len(args) < 1 {
|
||||
printUsage()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
mode := args[0]
|
||||
switch mode {
|
||||
case "web":
|
||||
runWebServer(dbURL, logger)
|
||||
default:
|
||||
logger.Error("unknown mode", slog.String("mode", mode))
|
||||
printUsage()
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func printUsage() {
|
||||
fmt.Fprintln(os.Stderr, "Usage:")
|
||||
fmt.Fprintln(os.Stderr, " lookbook web - Run web server")
|
||||
fmt.Fprintln(os.Stderr, "")
|
||||
fmt.Fprintln(os.Stderr, "Flags:")
|
||||
fmt.Fprintln(os.Stderr, " -migrate Run database migrations")
|
||||
fmt.Fprintln(os.Stderr, " -rollback Roll back migrations")
|
||||
fmt.Fprintln(os.Stderr, " -to <version> Target version for rollback")
|
||||
fmt.Fprintln(os.Stderr, " -db-url <url> Database connection URL")
|
||||
}
|
||||
|
||||
func runWebServer(dbURL string, logger *slog.Logger) {
|
||||
pending, err := migrations.CheckPending(context.Background(), dbURL, logger)
|
||||
if err != nil {
|
||||
logger.Warn("could not check migration status", slog.Any("err", err))
|
||||
} else if pending > 0 {
|
||||
logger.Warn("database has pending migrations",
|
||||
slog.Int("pending", pending),
|
||||
slog.String("hint", "run 'make migrate' to apply"))
|
||||
}
|
||||
|
||||
db, err := sql.Open("pgx", dbURL)
|
||||
if err != nil {
|
||||
logger.Error("failed to open database", slog.Any("err", err))
|
||||
os.Exit(1)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
if err := db.Ping(); err != nil {
|
||||
logger.Error("failed to ping database", slog.Any("err", err))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
rc := &handlers.RequestContext{
|
||||
DB: db,
|
||||
Logger: logger,
|
||||
TmplCache: ssr.NewTmplCache(handlers.TemplateFuncs),
|
||||
}
|
||||
router := handlers.NewRouter(rc)
|
||||
|
||||
router.Handle("GET /", handlers.HandleGetGallery)
|
||||
router.Handle("POST /items", handlers.HandlePostItem)
|
||||
router.Handle("GET /items/{id}", handlers.HandleGetItem)
|
||||
router.Handle("POST /items/{id}/tags", handlers.HandlePostItemTags)
|
||||
router.Handle("POST /items/{id}/delete", handlers.HandleDeleteItem)
|
||||
router.Handle("POST /items/{id}/refresh", handlers.HandleRefreshItem)
|
||||
router.Handle("GET /images/{id}", handlers.HandleGetImage)
|
||||
router.Handle("POST /auth/login", handlers.HandlePostAuthLogin)
|
||||
router.Handle("POST /auth/logout", handlers.HandlePostAuthLogout)
|
||||
router.Handle("GET /auth/status", handlers.HandleGetAuthStatus)
|
||||
router.HandleStd("GET /static/{version}/", static.Handler())
|
||||
|
||||
addr := defaultAddr
|
||||
if envAddr := os.Getenv("ADDR"); envAddr != "" {
|
||||
addr = envAddr
|
||||
}
|
||||
|
||||
server := &http.Server{
|
||||
Addr: addr,
|
||||
Handler: middleware.Logging(logger)(router),
|
||||
ReadHeaderTimeout: 5 * time.Second,
|
||||
}
|
||||
|
||||
go func() {
|
||||
logger.Info("listening", slog.String("addr", addr))
|
||||
if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
||||
logger.Error("http server error", slog.Any("err", err))
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
|
||||
waitForShutdown(server, logger)
|
||||
}
|
||||
|
||||
func waitForShutdown(server *http.Server, logger *slog.Logger) {
|
||||
quit := make(chan os.Signal, 1)
|
||||
signal.Notify(quit, os.Interrupt, syscall.SIGTERM)
|
||||
|
||||
<-quit
|
||||
logger.Info("shutting down")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := server.Shutdown(ctx); err != nil {
|
||||
logger.Error("graceful shutdown failed", slog.Any("err", err))
|
||||
}
|
||||
}
|
||||
90
default.nix
Normal file
90
default.nix
Normal file
|
|
@ -0,0 +1,90 @@
|
|||
{ pkgs ? import <nixpkgs> {} }:
|
||||
|
||||
let
|
||||
lookbook = pkgs.buildGoModule {
|
||||
pname = "lookbook";
|
||||
version = "0.1.0";
|
||||
src = ./.;
|
||||
|
||||
vendorHash = "sha256-ghF1GGGQokE0x/zC+AcAneF6ExPGWYQCZ/U6slvKbA4=";
|
||||
|
||||
env.GOPRIVATE = "git.soup.land";
|
||||
|
||||
subPackages = [ "cmd/server" ];
|
||||
|
||||
postInstall = ''
|
||||
mv $out/bin/server $out/bin/lookbook
|
||||
'';
|
||||
|
||||
meta = with pkgs.lib; {
|
||||
description = "Lookbook inspiration board";
|
||||
homepage = "https://git.soup.land/soup/lookbook";
|
||||
};
|
||||
};
|
||||
|
||||
in
|
||||
{
|
||||
package = lookbook;
|
||||
|
||||
nixosModule = { config, lib, pkgs, ... }:
|
||||
let
|
||||
cfg = config.services.lookbook;
|
||||
in {
|
||||
options.services.lookbook = {
|
||||
enable = lib.mkEnableOption "lookbook service";
|
||||
|
||||
address = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "127.0.0.1:8083";
|
||||
description = "Address to listen on";
|
||||
};
|
||||
|
||||
databaseUrl = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "PostgreSQL connection URL";
|
||||
};
|
||||
|
||||
user = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "lookbook";
|
||||
description = "User to run the service as";
|
||||
};
|
||||
|
||||
group = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "lookbook";
|
||||
description = "Group to run the service as";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
users.users.${cfg.user} = {
|
||||
isSystemUser = true;
|
||||
group = cfg.group;
|
||||
};
|
||||
users.groups.${cfg.group} = {};
|
||||
|
||||
systemd.services.lookbook = {
|
||||
description = "Lookbook App";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" "postgresql.service" ];
|
||||
requires = [ "postgresql.service" ];
|
||||
|
||||
environment = {
|
||||
DATABASE_URL = cfg.databaseUrl;
|
||||
ADDR = cfg.address;
|
||||
};
|
||||
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
User = cfg.user;
|
||||
Group = cfg.group;
|
||||
ExecStartPre = "${lookbook}/bin/lookbook -migrate";
|
||||
ExecStart = "${lookbook}/bin/lookbook web";
|
||||
Restart = "always";
|
||||
RestartSec = 5;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
26
go.mod
Normal file
26
go.mod
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
module git.soup.land/soup/lookbook
|
||||
|
||||
go 1.25.0
|
||||
|
||||
require (
|
||||
git.soup.land/soup/sxgo v0.1.1
|
||||
github.com/disintegration/imaging v1.6.2
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/jackc/pgx/v5 v5.7.6
|
||||
github.com/pressly/goose/v3 v3.26.0
|
||||
golang.org/x/crypto v0.40.0
|
||||
golang.org/x/net v0.42.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.2 // indirect
|
||||
github.com/mfridman/interpolate v0.0.2 // indirect
|
||||
github.com/sethvargo/go-retry v0.3.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/image v0.32.0 // indirect
|
||||
golang.org/x/sync v0.17.0 // indirect
|
||||
golang.org/x/text v0.30.0 // indirect
|
||||
)
|
||||
70
go.sum
Normal file
70
go.sum
Normal file
|
|
@ -0,0 +1,70 @@
|
|||
git.soup.land/soup/sxgo v0.1.1 h1:EIEHcb+yptNHy5kd+/YU/+Ov4kNYftDm0+El3jFFmks=
|
||||
git.soup.land/soup/sxgo v0.1.1/go.mod h1:U8x8wBk6gx4j43wgT8wXX3J6o4KIz92tPCVBeytDTxM=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c=
|
||||
github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgx/v5 v5.7.6 h1:rWQc5FwZSPX58r1OQmkuaNicxdmExaEz5A2DO2hUuTk=
|
||||
github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M=
|
||||
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
|
||||
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mfridman/interpolate v0.0.2 h1:pnuTK7MQIxxFz1Gr+rjSIx9u7qVjf5VOoM/u6BbAxPY=
|
||||
github.com/mfridman/interpolate v0.0.2/go.mod h1:p+7uk6oE07mpE/Ik1b8EckO0O4ZXiGAfshKBWLUM9Xg=
|
||||
github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
|
||||
github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pressly/goose/v3 v3.26.0 h1:KJakav68jdH0WDvoAcj8+n61WqOIaPGgH0bJWS6jpmM=
|
||||
github.com/pressly/goose/v3 v3.26.0/go.mod h1:4hC1KrritdCxtuFsqgs1R4AU5bWtTAf+cnWvfhf2DNY=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah2SE=
|
||||
github.com/sethvargo/go-retry v0.3.0/go.mod h1:mNX17F0C/HguQMyMyJxcnU471gOZGxCLyYaFyAZraas=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.11.0 h1:ib4sjIrwZKxE5u/Japgo/7SJV3PvgjGiRNAvTVGqQl8=
|
||||
github.com/stretchr/testify v1.11.0/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
|
||||
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
|
||||
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o=
|
||||
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
|
||||
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/image v0.32.0 h1:6lZQWq75h7L5IWNk0r+SCpUJ6tUVd3v4ZHnbRKLkUDQ=
|
||||
golang.org/x/image v0.32.0/go.mod h1:/R37rrQmKXtO6tYXAjtDLwQgFLHmhW+V6ayXlxzP2Pc=
|
||||
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
|
||||
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
|
||||
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
|
||||
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
modernc.org/libc v1.66.3 h1:cfCbjTUcdsKyyZZfEUKfoHcP3S0Wkvz3jgSzByEWVCQ=
|
||||
modernc.org/libc v1.66.3/go.mod h1:XD9zO8kt59cANKvHPXpx7yS2ELPheAey0vjIuZOhOU8=
|
||||
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
|
||||
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
|
||||
modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
|
||||
modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw=
|
||||
modernc.org/sqlite v1.38.2 h1:Aclu7+tgjgcQVShZqim41Bbw9Cho0y/7WzYptXqkEek=
|
||||
modernc.org/sqlite v1.38.2/go.mod h1:cPTJYSlgg3Sfg046yBShXENNtPrWrDX8bsbAQBzgQ5E=
|
||||
51
internal/components/page.go
Normal file
51
internal/components/page.go
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
package components
|
||||
|
||||
import (
|
||||
"git.soup.land/soup/sxgo/ssr"
|
||||
)
|
||||
|
||||
type Page struct {
|
||||
Title string
|
||||
Content ssr.Renderable
|
||||
ShowNav bool
|
||||
HasAuth bool
|
||||
}
|
||||
|
||||
func (p Page) Render(sw *ssr.Writer) error {
|
||||
sw.Tmpl(p, `
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<meta name="color-scheme" content="light dark">
|
||||
<meta name="darkreader-lock">
|
||||
<title>{{.Title}} - Lookbook</title>
|
||||
<link rel="stylesheet" href="{{staticURL "css/pico.min.css"}}">
|
||||
<link rel="stylesheet" href="{{staticURL "css/styles.css"}}">
|
||||
</head>
|
||||
<body>
|
||||
<header class="site-header">
|
||||
<div class="title">Lookbook</div>
|
||||
{{if .ShowNav}}
|
||||
<nav class="site-nav">
|
||||
<a href="/">All</a>
|
||||
<button type="button" class="ghost" onclick="auth()">Auth</button>
|
||||
<button type="button" class="ghost" onclick="logout()" {{if not .HasAuth}}hidden{{end}}>Logout</button>
|
||||
</nav>
|
||||
{{end}}
|
||||
</header>
|
||||
<main class="container">
|
||||
`)
|
||||
|
||||
p.Content.Render(sw)
|
||||
return sw.Tmpl(p, `
|
||||
</main>
|
||||
<script>
|
||||
window.LOOKBOOK_AUTH = {{if .HasAuth}}true{{else}}false{{end}};
|
||||
</script>
|
||||
<script src="{{staticURL "js/app.js"}}"></script>
|
||||
</body>
|
||||
</html>
|
||||
`)
|
||||
}
|
||||
45
internal/data/auth/queries.go
Normal file
45
internal/data/auth/queries.go
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Row struct {
|
||||
ID int64
|
||||
PasswordHash []byte
|
||||
CreatedAt time.Time
|
||||
}
|
||||
|
||||
func QGet(ctx context.Context, db *sql.DB) (*Row, error) {
|
||||
var row Row
|
||||
err := db.QueryRowContext(ctx, `
|
||||
SELECT id, password_hash, created_at
|
||||
FROM auth
|
||||
ORDER BY id ASC
|
||||
LIMIT 1
|
||||
`).Scan(&row.ID, &row.PasswordHash, &row.CreatedAt)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &row, nil
|
||||
}
|
||||
|
||||
func QCreate(ctx context.Context, db *sql.DB, hash []byte) (Row, error) {
|
||||
var row Row
|
||||
err := db.QueryRowContext(ctx, `
|
||||
INSERT INTO auth (password_hash)
|
||||
VALUES ($1)
|
||||
RETURNING id, password_hash, created_at
|
||||
`, hash).Scan(&row.ID, &row.PasswordHash, &row.CreatedAt)
|
||||
return row, err
|
||||
}
|
||||
|
||||
func QUpdate(ctx context.Context, db *sql.DB, id int64, hash []byte) error {
|
||||
_, err := db.ExecContext(ctx, `UPDATE auth SET password_hash = $2 WHERE id = $1`, id, hash)
|
||||
return err
|
||||
}
|
||||
119
internal/data/image/queries.go
Normal file
119
internal/data/image/queries.go
Normal file
|
|
@ -0,0 +1,119 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Row struct {
|
||||
ID int64
|
||||
ItemID int64
|
||||
OriginalURL string
|
||||
ContentType string
|
||||
Bytes []byte
|
||||
Width int
|
||||
Height int
|
||||
IsThumb bool
|
||||
CreatedAt time.Time
|
||||
}
|
||||
|
||||
type Ref struct {
|
||||
ID int64
|
||||
ItemID int64
|
||||
IsThumb bool
|
||||
}
|
||||
|
||||
func QListByItem(ctx context.Context, db *sql.DB, itemID int64) ([]Row, error) {
|
||||
rows, err := db.QueryContext(ctx, `
|
||||
SELECT id, item_id, original_url, content_type, bytes, width, height, is_thumb, created_at
|
||||
FROM image
|
||||
WHERE item_id = $1
|
||||
ORDER BY is_thumb DESC, id ASC
|
||||
`, itemID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var items []Row
|
||||
for rows.Next() {
|
||||
var row Row
|
||||
if err := rows.Scan(&row.ID, &row.ItemID, &row.OriginalURL, &row.ContentType, &row.Bytes, &row.Width, &row.Height, &row.IsThumb, &row.CreatedAt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, row)
|
||||
}
|
||||
return items, rows.Err()
|
||||
}
|
||||
|
||||
func QListPrimaryRefsByItems(ctx context.Context, db *sql.DB, itemIDs []int64) (map[int64]Ref, error) {
|
||||
if len(itemIDs) == 0 {
|
||||
return map[int64]Ref{}, nil
|
||||
}
|
||||
|
||||
rows, err := db.QueryContext(ctx, `
|
||||
SELECT id, item_id, is_thumb
|
||||
FROM image
|
||||
WHERE item_id = ANY($1)
|
||||
ORDER BY item_id ASC, is_thumb DESC, id ASC
|
||||
`, itemIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
results := make(map[int64]Ref)
|
||||
for rows.Next() {
|
||||
var row Ref
|
||||
if err := rows.Scan(&row.ID, &row.ItemID, &row.IsThumb); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, exists := results[row.ItemID]; !exists {
|
||||
results[row.ItemID] = row
|
||||
}
|
||||
}
|
||||
return results, rows.Err()
|
||||
}
|
||||
|
||||
func QFindByID(ctx context.Context, db *sql.DB, id int64) (*Row, error) {
|
||||
var row Row
|
||||
err := db.QueryRowContext(ctx, `
|
||||
SELECT id, item_id, original_url, content_type, bytes, width, height, is_thumb, created_at
|
||||
FROM image
|
||||
WHERE id = $1
|
||||
LIMIT 1
|
||||
`, id).Scan(&row.ID, &row.ItemID, &row.OriginalURL, &row.ContentType, &row.Bytes, &row.Width, &row.Height, &row.IsThumb, &row.CreatedAt)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &row, nil
|
||||
}
|
||||
|
||||
func QCreate(ctx context.Context, db *sql.DB, itemID int64, originalURL, contentType string, bytes []byte, width, height int, isThumb bool) (Row, error) {
|
||||
var row Row
|
||||
err := db.QueryRowContext(ctx, `
|
||||
INSERT INTO image (item_id, original_url, content_type, bytes, width, height, is_thumb)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7)
|
||||
RETURNING id, item_id, original_url, content_type, bytes, width, height, is_thumb, created_at
|
||||
`, itemID, originalURL, contentType, bytes, width, height, isThumb).Scan(
|
||||
&row.ID,
|
||||
&row.ItemID,
|
||||
&row.OriginalURL,
|
||||
&row.ContentType,
|
||||
&row.Bytes,
|
||||
&row.Width,
|
||||
&row.Height,
|
||||
&row.IsThumb,
|
||||
&row.CreatedAt,
|
||||
)
|
||||
return row, err
|
||||
}
|
||||
|
||||
func QDeleteByItem(ctx context.Context, db *sql.DB, itemID int64) error {
|
||||
_, err := db.ExecContext(ctx, `DELETE FROM image WHERE item_id = $1`, itemID)
|
||||
return err
|
||||
}
|
||||
100
internal/data/item/queries.go
Normal file
100
internal/data/item/queries.go
Normal file
|
|
@ -0,0 +1,100 @@
|
|||
package item
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type Row struct {
|
||||
ID int64
|
||||
PubID uuid.UUID
|
||||
SourceURL string
|
||||
Title string
|
||||
Description string
|
||||
SiteName string
|
||||
CreatedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
DeletedAt sql.Null[time.Time]
|
||||
}
|
||||
|
||||
func QList(ctx context.Context, db *sql.DB) ([]Row, error) {
|
||||
rows, err := db.QueryContext(ctx, `
|
||||
SELECT id, pub_id, source_url, title, description, site_name, created_at, updated_at, deleted_at
|
||||
FROM item
|
||||
WHERE deleted_at IS NULL
|
||||
ORDER BY created_at DESC
|
||||
`)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var items []Row
|
||||
for rows.Next() {
|
||||
var row Row
|
||||
if err := rows.Scan(&row.ID, &row.PubID, &row.SourceURL, &row.Title, &row.Description, &row.SiteName, &row.CreatedAt, &row.UpdatedAt, &row.DeletedAt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, row)
|
||||
}
|
||||
return items, rows.Err()
|
||||
}
|
||||
|
||||
func QFindByID(ctx context.Context, db *sql.DB, id int64) (*Row, error) {
|
||||
var row Row
|
||||
err := db.QueryRowContext(ctx, `
|
||||
SELECT id, pub_id, source_url, title, description, site_name, created_at, updated_at, deleted_at
|
||||
FROM item
|
||||
WHERE id = $1
|
||||
LIMIT 1
|
||||
`, id).Scan(&row.ID, &row.PubID, &row.SourceURL, &row.Title, &row.Description, &row.SiteName, &row.CreatedAt, &row.UpdatedAt, &row.DeletedAt)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &row, nil
|
||||
}
|
||||
|
||||
func QCreate(ctx context.Context, db *sql.DB, sourceURL, title, description, siteName string) (Row, error) {
|
||||
var row Row
|
||||
err := db.QueryRowContext(ctx, `
|
||||
INSERT INTO item (source_url, title, description, site_name)
|
||||
VALUES ($1, $2, $3, $4)
|
||||
RETURNING id, pub_id, source_url, title, description, site_name, created_at, updated_at, deleted_at
|
||||
`, sourceURL, title, description, siteName).Scan(
|
||||
&row.ID,
|
||||
&row.PubID,
|
||||
&row.SourceURL,
|
||||
&row.Title,
|
||||
&row.Description,
|
||||
&row.SiteName,
|
||||
&row.CreatedAt,
|
||||
&row.UpdatedAt,
|
||||
&row.DeletedAt,
|
||||
)
|
||||
return row, err
|
||||
}
|
||||
|
||||
func QUpdateMeta(ctx context.Context, db *sql.DB, id int64, title, description, siteName string) error {
|
||||
_, err := db.ExecContext(ctx, `
|
||||
UPDATE item
|
||||
SET title = $2, description = $3, site_name = $4, updated_at = NOW()
|
||||
WHERE id = $1
|
||||
`, id, title, description, siteName)
|
||||
return err
|
||||
}
|
||||
|
||||
func QSoftDelete(ctx context.Context, db *sql.DB, id int64) error {
|
||||
_, err := db.ExecContext(ctx, `UPDATE item SET deleted_at = NOW() WHERE id = $1`, id)
|
||||
return err
|
||||
}
|
||||
|
||||
func QRestore(ctx context.Context, db *sql.DB, id int64) error {
|
||||
_, err := db.ExecContext(ctx, `UPDATE item SET deleted_at = NULL WHERE id = $1`, id)
|
||||
return err
|
||||
}
|
||||
51
internal/data/session/queries.go
Normal file
51
internal/data/session/queries.go
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
package session
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Row struct {
|
||||
ID int64
|
||||
Token string
|
||||
CreatedAt time.Time
|
||||
ExpiresAt time.Time
|
||||
}
|
||||
|
||||
func QCreate(ctx context.Context, db *sql.DB, token string, expiresAt time.Time) (Row, error) {
|
||||
var row Row
|
||||
err := db.QueryRowContext(ctx, `
|
||||
INSERT INTO session (token, expires_at)
|
||||
VALUES ($1, $2)
|
||||
RETURNING id, token, created_at, expires_at
|
||||
`, token, expiresAt).Scan(&row.ID, &row.Token, &row.CreatedAt, &row.ExpiresAt)
|
||||
return row, err
|
||||
}
|
||||
|
||||
func QFindByToken(ctx context.Context, db *sql.DB, token string) (*Row, error) {
|
||||
var row Row
|
||||
err := db.QueryRowContext(ctx, `
|
||||
SELECT id, token, created_at, expires_at
|
||||
FROM session
|
||||
WHERE token = $1
|
||||
LIMIT 1
|
||||
`, token).Scan(&row.ID, &row.Token, &row.CreatedAt, &row.ExpiresAt)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &row, nil
|
||||
}
|
||||
|
||||
func QDelete(ctx context.Context, db *sql.DB, token string) error {
|
||||
_, err := db.ExecContext(ctx, `DELETE FROM session WHERE token = $1`, token)
|
||||
return err
|
||||
}
|
||||
|
||||
func QDeleteExpired(ctx context.Context, db *sql.DB) error {
|
||||
_, err := db.ExecContext(ctx, `DELETE FROM session WHERE expires_at < NOW()`)
|
||||
return err
|
||||
}
|
||||
141
internal/data/tag/queries.go
Normal file
141
internal/data/tag/queries.go
Normal file
|
|
@ -0,0 +1,141 @@
|
|||
package tag
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Row struct {
|
||||
ID int64
|
||||
Name string
|
||||
CreatedAt time.Time
|
||||
}
|
||||
|
||||
type ItemTag struct {
|
||||
ItemID int64
|
||||
TagID int64
|
||||
Name string
|
||||
}
|
||||
|
||||
func QList(ctx context.Context, db *sql.DB) ([]Row, error) {
|
||||
rows, err := db.QueryContext(ctx, `
|
||||
SELECT id, name, created_at
|
||||
FROM tag
|
||||
ORDER BY name ASC
|
||||
`)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var tags []Row
|
||||
for rows.Next() {
|
||||
var row Row
|
||||
if err := rows.Scan(&row.ID, &row.Name, &row.CreatedAt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tags = append(tags, row)
|
||||
}
|
||||
return tags, rows.Err()
|
||||
}
|
||||
|
||||
func QListByItem(ctx context.Context, db *sql.DB, itemID int64) ([]Row, error) {
|
||||
rows, err := db.QueryContext(ctx, `
|
||||
SELECT t.id, t.name, t.created_at
|
||||
FROM tag t
|
||||
JOIN item_tag it ON it.tag_id = t.id
|
||||
WHERE it.item_id = $1
|
||||
ORDER BY t.name ASC
|
||||
`, itemID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var tags []Row
|
||||
for rows.Next() {
|
||||
var row Row
|
||||
if err := rows.Scan(&row.ID, &row.Name, &row.CreatedAt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tags = append(tags, row)
|
||||
}
|
||||
return tags, rows.Err()
|
||||
}
|
||||
|
||||
func QListItemTags(ctx context.Context, db *sql.DB) ([]ItemTag, error) {
|
||||
rows, err := db.QueryContext(ctx, `
|
||||
SELECT it.item_id, t.id, t.name
|
||||
FROM item_tag it
|
||||
JOIN tag t ON t.id = it.tag_id
|
||||
ORDER BY t.name ASC
|
||||
`)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var entries []ItemTag
|
||||
for rows.Next() {
|
||||
var row ItemTag
|
||||
if err := rows.Scan(&row.ItemID, &row.TagID, &row.Name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, row)
|
||||
}
|
||||
return entries, rows.Err()
|
||||
}
|
||||
|
||||
func QUpsert(ctx context.Context, db *sql.DB, name string) (Row, error) {
|
||||
cleaned := strings.TrimSpace(strings.ToLower(name))
|
||||
var row Row
|
||||
err := db.QueryRowContext(ctx, `
|
||||
INSERT INTO tag (name)
|
||||
VALUES ($1)
|
||||
ON CONFLICT (name) DO UPDATE SET name = EXCLUDED.name
|
||||
RETURNING id, name, created_at
|
||||
`, cleaned).Scan(&row.ID, &row.Name, &row.CreatedAt)
|
||||
return row, err
|
||||
}
|
||||
|
||||
func QReplaceItemTags(ctx context.Context, db *sql.DB, itemID int64, names []string) error {
|
||||
tx, err := db.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
_, err = tx.ExecContext(ctx, `DELETE FROM item_tag WHERE item_id = $1`, itemID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, name := range names {
|
||||
if strings.TrimSpace(name) == "" {
|
||||
continue
|
||||
}
|
||||
var tagID int64
|
||||
err = tx.QueryRowContext(ctx, `
|
||||
INSERT INTO tag (name)
|
||||
VALUES ($1)
|
||||
ON CONFLICT (name) DO UPDATE SET name = EXCLUDED.name
|
||||
RETURNING id
|
||||
`, strings.TrimSpace(strings.ToLower(name))).Scan(&tagID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = tx.ExecContext(ctx, `
|
||||
INSERT INTO item_tag (item_id, tag_id)
|
||||
VALUES ($1, $2)
|
||||
ON CONFLICT DO NOTHING
|
||||
`, itemID, tagID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
167
internal/handlers/auth.go
Normal file
167
internal/handlers/auth.go
Normal file
|
|
@ -0,0 +1,167 @@
|
|||
package handlers
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
|
||||
"git.soup.land/soup/lookbook/internal/data/auth"
|
||||
"git.soup.land/soup/lookbook/internal/data/session"
|
||||
)
|
||||
|
||||
const sessionCookieName = "lookbook_session"
|
||||
const sessionDuration = 30 * 24 * time.Hour
|
||||
|
||||
type authRequest struct {
|
||||
Password string `json:"password"`
|
||||
}
|
||||
|
||||
type authStatus struct {
|
||||
Authenticated bool `json:"authenticated"`
|
||||
HasPassword bool `json:"has_password"`
|
||||
}
|
||||
|
||||
func HandlePostAuthLogin(rc *RequestContext, w http.ResponseWriter, r *http.Request) error {
|
||||
var req authRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, "Invalid request", http.StatusBadRequest)
|
||||
return nil
|
||||
}
|
||||
if req.Password == "" {
|
||||
http.Error(w, "Password required", http.StatusBadRequest)
|
||||
return nil
|
||||
}
|
||||
|
||||
existing, err := auth.QGet(r.Context(), rc.DB)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get auth: %w", err)
|
||||
}
|
||||
|
||||
if existing == nil {
|
||||
hash, err := bcrypt.GenerateFromPassword([]byte(req.Password), bcrypt.DefaultCost)
|
||||
if err != nil {
|
||||
return fmt.Errorf("hash password: %w", err)
|
||||
}
|
||||
if _, err := auth.QCreate(r.Context(), rc.DB, hash); err != nil {
|
||||
return fmt.Errorf("create password: %w", err)
|
||||
}
|
||||
} else {
|
||||
if err := bcrypt.CompareHashAndPassword(existing.PasswordHash, []byte(req.Password)); err != nil {
|
||||
http.Error(w, "Unauthorized", http.StatusUnauthorized)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
token, err := newToken(32)
|
||||
if err != nil {
|
||||
return fmt.Errorf("token: %w", err)
|
||||
}
|
||||
|
||||
expiresAt := time.Now().Add(sessionDuration)
|
||||
if _, err := session.QCreate(r.Context(), rc.DB, token, expiresAt); err != nil {
|
||||
return fmt.Errorf("create session: %w", err)
|
||||
}
|
||||
|
||||
setSessionCookie(w, r, token, expiresAt)
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return nil
|
||||
}
|
||||
|
||||
func HandlePostAuthLogout(rc *RequestContext, w http.ResponseWriter, r *http.Request) error {
|
||||
cookie, err := r.Cookie(sessionCookieName)
|
||||
if err == nil {
|
||||
_ = session.QDelete(r.Context(), rc.DB, cookie.Value)
|
||||
}
|
||||
clearSessionCookie(w, r)
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return nil
|
||||
}
|
||||
|
||||
func HandleGetAuthStatus(rc *RequestContext, w http.ResponseWriter, r *http.Request) error {
|
||||
status := authStatus{}
|
||||
|
||||
existing, err := auth.QGet(r.Context(), rc.DB)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get auth: %w", err)
|
||||
}
|
||||
status.HasPassword = existing != nil
|
||||
|
||||
authed, err := isAuthenticated(r, rc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("auth status: %w", err)
|
||||
}
|
||||
status.Authenticated = authed
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
return json.NewEncoder(w).Encode(status)
|
||||
}
|
||||
|
||||
func isAuthenticated(r *http.Request, rc *RequestContext) (bool, error) {
|
||||
cookie, err := r.Cookie(sessionCookieName)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
row, err := session.QFindByToken(r.Context(), rc.DB, cookie.Value)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if row == nil {
|
||||
return false, nil
|
||||
}
|
||||
if time.Now().After(row.ExpiresAt) {
|
||||
_ = session.QDelete(r.Context(), rc.DB, cookie.Value)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func requireAuth(w http.ResponseWriter, r *http.Request, rc *RequestContext) bool {
|
||||
authed, err := isAuthenticated(r, rc)
|
||||
if err != nil {
|
||||
http.Error(w, "Unauthorized", http.StatusUnauthorized)
|
||||
return false
|
||||
}
|
||||
if !authed {
|
||||
http.Error(w, "Unauthorized", http.StatusUnauthorized)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func setSessionCookie(w http.ResponseWriter, r *http.Request, token string, expiresAt time.Time) {
|
||||
http.SetCookie(w, &http.Cookie{
|
||||
Name: sessionCookieName,
|
||||
Value: token,
|
||||
Path: "/",
|
||||
HttpOnly: true,
|
||||
Secure: r.TLS != nil,
|
||||
SameSite: http.SameSiteLaxMode,
|
||||
Expires: expiresAt,
|
||||
})
|
||||
}
|
||||
|
||||
func clearSessionCookie(w http.ResponseWriter, r *http.Request) {
|
||||
http.SetCookie(w, &http.Cookie{
|
||||
Name: sessionCookieName,
|
||||
Value: "",
|
||||
Path: "/",
|
||||
HttpOnly: true,
|
||||
Secure: r.TLS != nil,
|
||||
SameSite: http.SameSiteLaxMode,
|
||||
Expires: time.Unix(0, 0),
|
||||
})
|
||||
}
|
||||
|
||||
func newToken(length int) (string, error) {
|
||||
buf := make([]byte, length)
|
||||
if _, err := rand.Read(buf); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return base64.RawURLEncoding.EncodeToString(buf), nil
|
||||
}
|
||||
15
internal/handlers/context.go
Normal file
15
internal/handlers/context.go
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
package handlers
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"log/slog"
|
||||
|
||||
"git.soup.land/soup/sxgo/ssr"
|
||||
)
|
||||
|
||||
// RequestContext holds dependencies that are injected into every request handler.
|
||||
type RequestContext struct {
|
||||
DB *sql.DB
|
||||
Logger *slog.Logger
|
||||
TmplCache *ssr.TmplCache
|
||||
}
|
||||
145
internal/handlers/gallery.go
Normal file
145
internal/handlers/gallery.go
Normal file
|
|
@ -0,0 +1,145 @@
|
|||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"git.soup.land/soup/lookbook/internal/components"
|
||||
"git.soup.land/soup/lookbook/internal/data/image"
|
||||
"git.soup.land/soup/lookbook/internal/data/item"
|
||||
"git.soup.land/soup/lookbook/internal/data/tag"
|
||||
"git.soup.land/soup/sxgo/ssr"
|
||||
)
|
||||
|
||||
type galleryPageData struct {
|
||||
Items []galleryItem
|
||||
Tags []tag.Row
|
||||
HasAuth bool
|
||||
TagNames []string
|
||||
}
|
||||
|
||||
type galleryItem struct {
|
||||
Item item.Row
|
||||
Thumb *image.Ref
|
||||
TagNames []string
|
||||
TagListText string
|
||||
}
|
||||
|
||||
func (g galleryPageData) Render(sw *ssr.Writer) error {
|
||||
return sw.Tmpl(g, `
|
||||
<section class="hero">
|
||||
<form method="POST" action="/items" data-auth-required {{if not .HasAuth}}hidden{{end}}>
|
||||
<input type="url" name="url" placeholder="Paste a link" required>
|
||||
<button type="submit">Add</button>
|
||||
</form>
|
||||
<div class="notice" data-auth-status>Read only</div>
|
||||
</section>
|
||||
|
||||
<section class="filters">
|
||||
{{range .Tags}}
|
||||
<button type="button" class="filter-pill" data-tag-filter="{{.Name}}">{{.Name}}</button>
|
||||
{{end}}
|
||||
<div class="tag-suggestions" data-tag-suggestions data-tags='{{json .TagNames}}'></div>
|
||||
</section>
|
||||
|
||||
<section class="gallery">
|
||||
{{range .Items}}
|
||||
<div class="card" data-item-tags="{{.TagListText}}">
|
||||
<a href="/items/{{.Item.ID}}">
|
||||
{{if .Thumb}}
|
||||
<img src="/images/{{.Thumb.ID}}" alt="{{.Item.Title}}">
|
||||
{{else}}
|
||||
<div class="placeholder">{{if .Item.Title}}{{.Item.Title}}{{else}}{{.Item.SourceURL}}{{end}}</div>
|
||||
{{end}}
|
||||
<div class="overlay">
|
||||
<div class="title">{{if .Item.Title}}{{.Item.Title}}{{else}}{{.Item.SourceURL}}{{end}}</div>
|
||||
<div class="tags">{{range $i, $tag := .TagNames}}{{if $i}} · {{end}}{{$tag}}{{end}}</div>
|
||||
</div>
|
||||
</a>
|
||||
</div>
|
||||
{{end}}
|
||||
</section>
|
||||
`)
|
||||
}
|
||||
|
||||
func HandleGetGallery(rc *RequestContext, w http.ResponseWriter, r *http.Request) error {
|
||||
items, err := item.QList(r.Context(), rc.DB)
|
||||
if err != nil {
|
||||
return fmt.Errorf("list items: %w", err)
|
||||
}
|
||||
|
||||
var itemIDs []int64
|
||||
for _, row := range items {
|
||||
itemIDs = append(itemIDs, row.ID)
|
||||
}
|
||||
|
||||
thumbs, err := image.QListPrimaryRefsByItems(r.Context(), rc.DB, itemIDs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("list thumbs: %w", err)
|
||||
}
|
||||
|
||||
entries, err := tag.QListItemTags(r.Context(), rc.DB)
|
||||
if err != nil {
|
||||
return fmt.Errorf("list tags: %w", err)
|
||||
}
|
||||
|
||||
itemTagNames := make(map[int64][]string)
|
||||
for _, entry := range entries {
|
||||
itemTagNames[entry.ItemID] = append(itemTagNames[entry.ItemID], entry.Name)
|
||||
}
|
||||
|
||||
var cards []galleryItem
|
||||
for _, row := range items {
|
||||
tagNames := itemTagNames[row.ID]
|
||||
cards = append(cards, galleryItem{
|
||||
Item: row,
|
||||
Thumb: refOrNil(thumbs[row.ID]),
|
||||
TagNames: tagNames,
|
||||
TagListText: strings.Join(tagNames, ","),
|
||||
})
|
||||
}
|
||||
|
||||
tags, err := tag.QList(r.Context(), rc.DB)
|
||||
if err != nil {
|
||||
return fmt.Errorf("list tags: %w", err)
|
||||
}
|
||||
|
||||
authed, err := isAuthenticated(r, rc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("auth status: %w", err)
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
sw := ssr.NewWriter(w, rc.TmplCache)
|
||||
|
||||
var tagNames []string
|
||||
for _, row := range tags {
|
||||
tagNames = append(tagNames, row.Name)
|
||||
}
|
||||
|
||||
data := galleryPageData{
|
||||
Items: cards,
|
||||
Tags: tags,
|
||||
HasAuth: authed,
|
||||
TagNames: tagNames,
|
||||
}
|
||||
return components.Page{
|
||||
Title: "Gallery",
|
||||
Content: data,
|
||||
ShowNav: true,
|
||||
HasAuth: authed,
|
||||
}.Render(sw)
|
||||
}
|
||||
|
||||
func refOrNil(ref image.Ref) *image.Ref {
|
||||
if ref.ID == 0 {
|
||||
return nil
|
||||
}
|
||||
return &ref
|
||||
}
|
||||
|
||||
func parseID(value string) (int64, error) {
|
||||
return strconv.ParseInt(value, 10, 64)
|
||||
}
|
||||
49
internal/handlers/handler.go
Normal file
49
internal/handlers/handler.go
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
package handlers
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// Handler is a function that handles HTTP requests and can return an error.
|
||||
// It receives a RequestContext with injected dependencies (DB, Logger).
|
||||
type Handler func(rc *RequestContext, w http.ResponseWriter, r *http.Request) error
|
||||
|
||||
// WithErrorHandling wraps a Handler to automatically handle errors.
|
||||
func WithErrorHandling(rc *RequestContext, h Handler) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
wrapper := &responseWrapper{
|
||||
ResponseWriter: w,
|
||||
written: false,
|
||||
}
|
||||
|
||||
err := h(rc, wrapper, r)
|
||||
if err != nil {
|
||||
rc.Logger.Error("handler error",
|
||||
slog.String("method", r.Method),
|
||||
slog.String("path", r.URL.Path),
|
||||
slog.Any("error", err),
|
||||
)
|
||||
|
||||
if !wrapper.written {
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// responseWrapper tracks if response was written
|
||||
type responseWrapper struct {
|
||||
http.ResponseWriter
|
||||
written bool
|
||||
}
|
||||
|
||||
func (w *responseWrapper) Write(b []byte) (int, error) {
|
||||
w.written = true
|
||||
return w.ResponseWriter.Write(b)
|
||||
}
|
||||
|
||||
func (w *responseWrapper) WriteHeader(statusCode int) {
|
||||
w.written = true
|
||||
w.ResponseWriter.WriteHeader(statusCode)
|
||||
}
|
||||
32
internal/handlers/image.go
Normal file
32
internal/handlers/image.go
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"git.soup.land/soup/lookbook/internal/data/image"
|
||||
)
|
||||
|
||||
func HandleGetImage(rc *RequestContext, w http.ResponseWriter, r *http.Request) error {
|
||||
id, err := parseID(r.PathValue("id"))
|
||||
if err != nil {
|
||||
http.Error(w, "Invalid ID", http.StatusBadRequest)
|
||||
return nil
|
||||
}
|
||||
|
||||
row, err := image.QFindByID(r.Context(), rc.DB, id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("find image: %w", err)
|
||||
}
|
||||
if row == nil {
|
||||
http.NotFound(w, r)
|
||||
return nil
|
||||
}
|
||||
|
||||
if row.ContentType != "" {
|
||||
w.Header().Set("Content-Type", row.ContentType)
|
||||
}
|
||||
w.Header().Set("Cache-Control", "public, max-age=604800")
|
||||
_, _ = w.Write(row.Bytes)
|
||||
return nil
|
||||
}
|
||||
245
internal/handlers/item.go
Normal file
245
internal/handlers/item.go
Normal file
|
|
@ -0,0 +1,245 @@
|
|||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"git.soup.land/soup/lookbook/internal/components"
|
||||
"git.soup.land/soup/lookbook/internal/data/image"
|
||||
"git.soup.land/soup/lookbook/internal/data/item"
|
||||
"git.soup.land/soup/lookbook/internal/data/tag"
|
||||
"git.soup.land/soup/lookbook/internal/services"
|
||||
"git.soup.land/soup/sxgo/ssr"
|
||||
)
|
||||
|
||||
type itemPageData struct {
|
||||
Item item.Row
|
||||
Images []image.Row
|
||||
TagNames []string
|
||||
AllTagNames []string
|
||||
HasAuth bool
|
||||
}
|
||||
|
||||
func (d itemPageData) Render(sw *ssr.Writer) error {
|
||||
return sw.Tmpl(d, `
|
||||
<section class="detail">
|
||||
<div>
|
||||
{{if .Images}}
|
||||
<img src="/images/{{(index .Images 0).ID}}" alt="{{.Item.Title}}">
|
||||
{{else}}
|
||||
<div class="notice">No preview image cached.</div>
|
||||
{{end}}
|
||||
</div>
|
||||
<div class="meta">
|
||||
<h1>{{if .Item.Title}}{{.Item.Title}}{{else}}{{.Item.SourceURL}}{{end}}</h1>
|
||||
{{if .Item.SiteName}}<div>{{.Item.SiteName}}</div>{{end}}
|
||||
{{if .Item.Description}}<p>{{.Item.Description}}</p>{{end}}
|
||||
<a href="{{.Item.SourceURL}}" target="_blank" rel="noreferrer">{{.Item.SourceURL}}</a>
|
||||
|
||||
<div>
|
||||
<strong>Tags</strong>
|
||||
<div class="tag-list" data-tag-list>
|
||||
{{range .TagNames}}
|
||||
<span class="tag-chip">{{.}}</span>
|
||||
{{else}}
|
||||
<span class="tag-chip">No tags</span>
|
||||
{{end}}
|
||||
</div>
|
||||
<button type="button" class="ghost" data-tag-toggle data-auth-required {{if not .HasAuth}}hidden{{end}}>Edit tags</button>
|
||||
</div>
|
||||
|
||||
<div class="tag-editor" data-tag-editor data-auth-required hidden {{if not .HasAuth}}hidden{{end}}>
|
||||
<form method="POST" action="/items/{{.Item.ID}}/tags" data-tag-form>
|
||||
<input type="text" name="tags" value="{{range $i, $tag := .TagNames}}{{if $i}}, {{end}}{{$tag}}{{end}}" data-tag-input>
|
||||
<button type="submit">Save tags</button>
|
||||
</form>
|
||||
<div class="tag-suggestions" data-tag-suggestions data-tags='{{json .AllTagNames}}'></div>
|
||||
</div>
|
||||
|
||||
<div class="actions" data-auth-required {{if not .HasAuth}}hidden{{end}}>
|
||||
<form method="POST" action="/items/{{.Item.ID}}/refresh">
|
||||
<button type="submit">Refresh metadata</button>
|
||||
</form>
|
||||
<form method="POST" action="/items/{{.Item.ID}}/delete">
|
||||
<button type="submit">Delete</button>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
`)
|
||||
}
|
||||
|
||||
func HandleGetItem(rc *RequestContext, w http.ResponseWriter, r *http.Request) error {
|
||||
id, err := parseID(r.PathValue("id"))
|
||||
if err != nil {
|
||||
http.Error(w, "Invalid ID", http.StatusBadRequest)
|
||||
return nil
|
||||
}
|
||||
|
||||
row, err := item.QFindByID(r.Context(), rc.DB, id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("find item: %w", err)
|
||||
}
|
||||
if row == nil {
|
||||
http.NotFound(w, r)
|
||||
return nil
|
||||
}
|
||||
|
||||
images, err := image.QListByItem(r.Context(), rc.DB, id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("list images: %w", err)
|
||||
}
|
||||
|
||||
tags, err := tag.QListByItem(r.Context(), rc.DB, id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("list item tags: %w", err)
|
||||
}
|
||||
|
||||
allTags, err := tag.QList(r.Context(), rc.DB)
|
||||
if err != nil {
|
||||
return fmt.Errorf("list tags: %w", err)
|
||||
}
|
||||
|
||||
var tagNames []string
|
||||
for _, row := range tags {
|
||||
tagNames = append(tagNames, row.Name)
|
||||
}
|
||||
|
||||
var allTagNames []string
|
||||
for _, row := range allTags {
|
||||
allTagNames = append(allTagNames, row.Name)
|
||||
}
|
||||
|
||||
authed, err := isAuthenticated(r, rc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("auth status: %w", err)
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
sw := ssr.NewWriter(w, rc.TmplCache)
|
||||
|
||||
data := itemPageData{
|
||||
Item: *row,
|
||||
Images: images,
|
||||
TagNames: tagNames,
|
||||
AllTagNames: allTagNames,
|
||||
HasAuth: authed,
|
||||
}
|
||||
|
||||
return components.Page{
|
||||
Title: "Item",
|
||||
Content: data,
|
||||
ShowNav: true,
|
||||
HasAuth: authed,
|
||||
}.Render(sw)
|
||||
}
|
||||
|
||||
func HandlePostItem(rc *RequestContext, w http.ResponseWriter, r *http.Request) error {
|
||||
if !requireAuth(w, r, rc) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := r.ParseForm(); err != nil {
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
return nil
|
||||
}
|
||||
|
||||
sourceURL := strings.TrimSpace(r.FormValue("url"))
|
||||
if sourceURL == "" {
|
||||
http.Error(w, "URL required", http.StatusBadRequest)
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err := services.CreateItemFromURL(r.Context(), rc.DB, sourceURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create item: %w", err)
|
||||
}
|
||||
|
||||
http.Redirect(w, r, "/", http.StatusSeeOther)
|
||||
return nil
|
||||
}
|
||||
|
||||
func HandlePostItemTags(rc *RequestContext, w http.ResponseWriter, r *http.Request) error {
|
||||
if !requireAuth(w, r, rc) {
|
||||
return nil
|
||||
}
|
||||
|
||||
id, err := parseID(r.PathValue("id"))
|
||||
if err != nil {
|
||||
http.Error(w, "Invalid ID", http.StatusBadRequest)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := r.ParseForm(); err != nil {
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
return nil
|
||||
}
|
||||
|
||||
tags := parseTags(r.FormValue("tags"))
|
||||
if err := tag.QReplaceItemTags(r.Context(), rc.DB, id, tags); err != nil {
|
||||
return fmt.Errorf("update tags: %w", err)
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return nil
|
||||
}
|
||||
|
||||
func HandleDeleteItem(rc *RequestContext, w http.ResponseWriter, r *http.Request) error {
|
||||
if !requireAuth(w, r, rc) {
|
||||
return nil
|
||||
}
|
||||
|
||||
id, err := parseID(r.PathValue("id"))
|
||||
if err != nil {
|
||||
http.Error(w, "Invalid ID", http.StatusBadRequest)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := item.QSoftDelete(r.Context(), rc.DB, id); err != nil {
|
||||
return fmt.Errorf("delete item: %w", err)
|
||||
}
|
||||
|
||||
http.Redirect(w, r, "/", http.StatusSeeOther)
|
||||
return nil
|
||||
}
|
||||
|
||||
func HandleRefreshItem(rc *RequestContext, w http.ResponseWriter, r *http.Request) error {
|
||||
if !requireAuth(w, r, rc) {
|
||||
return nil
|
||||
}
|
||||
|
||||
id, err := parseID(r.PathValue("id"))
|
||||
if err != nil {
|
||||
http.Error(w, "Invalid ID", http.StatusBadRequest)
|
||||
return nil
|
||||
}
|
||||
|
||||
row, err := item.QFindByID(r.Context(), rc.DB, id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("find item: %w", err)
|
||||
}
|
||||
if row == nil {
|
||||
http.NotFound(w, r)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := services.RefreshItemFromURL(r.Context(), rc.DB, *row); err != nil {
|
||||
return fmt.Errorf("refresh item: %w", err)
|
||||
}
|
||||
|
||||
http.Redirect(w, r, fmt.Sprintf("/items/%d", id), http.StatusSeeOther)
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseTags(value string) []string {
|
||||
var tags []string
|
||||
for _, tagValue := range strings.Split(value, ",") {
|
||||
clean := strings.TrimSpace(tagValue)
|
||||
if clean == "" {
|
||||
continue
|
||||
}
|
||||
tags = append(tags, clean)
|
||||
}
|
||||
return tags
|
||||
}
|
||||
42
internal/handlers/router.go
Normal file
42
internal/handlers/router.go
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
package handlers
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// Router wraps http.ServeMux and automatically injects RequestContext into handlers.
|
||||
type Router struct {
|
||||
mux *http.ServeMux
|
||||
rc *RequestContext
|
||||
}
|
||||
|
||||
func NewRouter(rc *RequestContext) *Router {
|
||||
return &Router{
|
||||
mux: http.NewServeMux(),
|
||||
rc: rc,
|
||||
}
|
||||
}
|
||||
|
||||
// Handle registers a handler that returns an error.
|
||||
// The RequestContext is automatically injected and error handling is applied.
|
||||
func (rt *Router) Handle(pattern string, h Handler) {
|
||||
rt.mux.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
|
||||
rc := &RequestContext{
|
||||
DB: rt.rc.DB,
|
||||
Logger: rt.rc.Logger,
|
||||
TmplCache: rt.rc.TmplCache,
|
||||
}
|
||||
|
||||
handler := WithErrorHandling(rc, h)
|
||||
handler(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// HandleStd registers a standard http.Handler (for static files, etc.)
|
||||
func (rt *Router) HandleStd(pattern string, h http.Handler) {
|
||||
rt.mux.Handle(pattern, h)
|
||||
}
|
||||
|
||||
func (rt *Router) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
rt.mux.ServeHTTP(w, r)
|
||||
}
|
||||
23
internal/handlers/templates.go
Normal file
23
internal/handlers/templates.go
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
package handlers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"html/template"
|
||||
"strings"
|
||||
|
||||
"git.soup.land/soup/lookbook/internal/static"
|
||||
)
|
||||
|
||||
var TemplateFuncs = template.FuncMap{
|
||||
"staticURL": static.VersionedPath,
|
||||
"json": jsonTemplate,
|
||||
}
|
||||
|
||||
func jsonTemplate(v any) template.JS {
|
||||
payload, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return template.JS("null")
|
||||
}
|
||||
safe := strings.ReplaceAll(string(payload), "</", "<\\/")
|
||||
return template.JS(safe)
|
||||
}
|
||||
44
internal/middleware/logger.go
Normal file
44
internal/middleware/logger.go
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
package middleware
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Logging emits structured request logs.
|
||||
func Logging(logger *slog.Logger) func(http.Handler) http.Handler {
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
start := time.Now()
|
||||
ww := &responseWriter{ResponseWriter: w, status: http.StatusOK}
|
||||
|
||||
next.ServeHTTP(ww, r)
|
||||
|
||||
logger.LogAttrs(r.Context(), slog.LevelInfo, "request",
|
||||
slog.String("method", r.Method),
|
||||
slog.String("path", r.URL.Path),
|
||||
slog.Int("status", ww.status),
|
||||
slog.Int("bytes", ww.bytes),
|
||||
slog.Duration("latency", time.Since(start)),
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type responseWriter struct {
|
||||
http.ResponseWriter
|
||||
status int
|
||||
bytes int
|
||||
}
|
||||
|
||||
func (w *responseWriter) WriteHeader(statusCode int) {
|
||||
w.status = statusCode
|
||||
w.ResponseWriter.WriteHeader(statusCode)
|
||||
}
|
||||
|
||||
func (w *responseWriter) Write(b []byte) (int, error) {
|
||||
n, err := w.ResponseWriter.Write(b)
|
||||
w.bytes += n
|
||||
return n, err
|
||||
}
|
||||
130
internal/migrations/migrations.go
Normal file
130
internal/migrations/migrations.go
Normal file
|
|
@ -0,0 +1,130 @@
|
|||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"embed"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
|
||||
_ "github.com/jackc/pgx/v5/stdlib"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
//go:embed sql/*.sql
|
||||
var FS embed.FS
|
||||
|
||||
const DefaultURL = "postgres://postgres:postgres@localhost:5432/lookbook?sslmode=disable"
|
||||
|
||||
// Up applies all available migrations using the provided database URL.
|
||||
func Up(ctx context.Context, dbURL string, logger *slog.Logger) error {
|
||||
url := dbURL
|
||||
if url == "" {
|
||||
url = DefaultURL
|
||||
}
|
||||
|
||||
db, err := openDB(url, logger)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
if err := goose.UpContext(ctx, db, "sql"); err != nil {
|
||||
return fmt.Errorf("apply migrations: %w", err)
|
||||
}
|
||||
|
||||
logger.Info("database migrated")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Down rolls back migrations. If targetVersion < 0, it steps back one migration; otherwise it migrates down to the target version.
|
||||
func Down(ctx context.Context, dbURL string, targetVersion int64, logger *slog.Logger) error {
|
||||
url := dbURL
|
||||
if url == "" {
|
||||
url = DefaultURL
|
||||
}
|
||||
|
||||
db, err := openDB(url, logger)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
if targetVersion < 0 {
|
||||
if err := goose.DownContext(ctx, db, "sql"); err != nil {
|
||||
return fmt.Errorf("rollback one: %w", err)
|
||||
}
|
||||
logger.Info("rolled back one migration")
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := goose.DownToContext(ctx, db, "sql", targetVersion); err != nil {
|
||||
return fmt.Errorf("rollback to version %d: %w", targetVersion, err)
|
||||
}
|
||||
logger.Info("rolled back to version", slog.Int64("version", targetVersion))
|
||||
return nil
|
||||
}
|
||||
|
||||
func CheckPending(ctx context.Context, dbURL string, logger *slog.Logger) (int, error) {
|
||||
url := dbURL
|
||||
if url == "" {
|
||||
url = DefaultURL
|
||||
}
|
||||
|
||||
db, err := openDB(url, logger)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
current, err := goose.GetDBVersionContext(ctx, db)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("get db version: %w", err)
|
||||
}
|
||||
|
||||
migrations, err := goose.CollectMigrations("sql", 0, goose.MaxVersion)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("collect migrations: %w", err)
|
||||
}
|
||||
|
||||
pending := 0
|
||||
for _, m := range migrations {
|
||||
if m.Version > current {
|
||||
pending++
|
||||
}
|
||||
}
|
||||
|
||||
return pending, nil
|
||||
}
|
||||
|
||||
// slogLogger adapts slog to goose's minimal logging interface.
|
||||
type slogLogger struct {
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
func (l slogLogger) Printf(format string, v ...any) {
|
||||
l.logger.Info(fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
func (l slogLogger) Fatalf(format string, v ...any) {
|
||||
l.logger.Error(fmt.Sprintf(format, v...))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func openDB(url string, logger *slog.Logger) (*sql.DB, error) {
|
||||
goose.SetBaseFS(FS)
|
||||
goose.SetLogger(slogLogger{logger: logger})
|
||||
|
||||
db, err := sql.Open("pgx", url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open db: %w", err)
|
||||
}
|
||||
|
||||
if err := db.Ping(); err != nil {
|
||||
db.Close()
|
||||
return nil, fmt.Errorf("ping db: %w", err)
|
||||
}
|
||||
|
||||
return db, nil
|
||||
}
|
||||
73
internal/migrations/sql/0001_init.sql
Normal file
73
internal/migrations/sql/0001_init.sql
Normal file
|
|
@ -0,0 +1,73 @@
|
|||
-- +goose Up
|
||||
CREATE EXTENSION IF NOT EXISTS pgcrypto;
|
||||
|
||||
CREATE TABLE item (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
pub_id UUID NOT NULL DEFAULT gen_random_uuid(),
|
||||
source_url TEXT NOT NULL,
|
||||
title TEXT NOT NULL DEFAULT '',
|
||||
description TEXT NOT NULL DEFAULT '',
|
||||
site_name TEXT NOT NULL DEFAULT '',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
deleted_at TIMESTAMPTZ,
|
||||
UNIQUE (pub_id)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_item_created_at ON item(created_at DESC);
|
||||
CREATE INDEX idx_item_deleted_at ON item(deleted_at);
|
||||
|
||||
CREATE TABLE image (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
item_id BIGINT NOT NULL REFERENCES item(id) ON DELETE CASCADE,
|
||||
original_url TEXT NOT NULL DEFAULT '',
|
||||
content_type TEXT NOT NULL DEFAULT '',
|
||||
bytes BYTEA NOT NULL,
|
||||
width INT NOT NULL DEFAULT 0,
|
||||
height INT NOT NULL DEFAULT 0,
|
||||
is_thumb BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX idx_image_item ON image(item_id);
|
||||
CREATE INDEX idx_image_thumb ON image(item_id, is_thumb);
|
||||
|
||||
CREATE TABLE tag (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
name TEXT NOT NULL UNIQUE,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE TABLE item_tag (
|
||||
item_id BIGINT NOT NULL REFERENCES item(id) ON DELETE CASCADE,
|
||||
tag_id BIGINT NOT NULL REFERENCES tag(id) ON DELETE CASCADE,
|
||||
PRIMARY KEY (item_id, tag_id)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_item_tag_item ON item_tag(item_id);
|
||||
CREATE INDEX idx_item_tag_tag ON item_tag(tag_id);
|
||||
|
||||
CREATE TABLE auth (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
password_hash BYTEA NOT NULL,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE TABLE session (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
token TEXT NOT NULL UNIQUE,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
expires_at TIMESTAMPTZ NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX idx_session_token ON session(token);
|
||||
CREATE INDEX idx_session_expires_at ON session(expires_at);
|
||||
|
||||
-- +goose Down
|
||||
DROP TABLE IF EXISTS session;
|
||||
DROP TABLE IF EXISTS auth;
|
||||
DROP TABLE IF EXISTS item_tag;
|
||||
DROP TABLE IF EXISTS tag;
|
||||
DROP TABLE IF EXISTS image;
|
||||
DROP TABLE IF EXISTS item;
|
||||
DROP EXTENSION IF EXISTS pgcrypto;
|
||||
288
internal/services/metadata.go
Normal file
288
internal/services/metadata.go
Normal file
|
|
@ -0,0 +1,288 @@
|
|||
package services
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/disintegration/imaging"
|
||||
"golang.org/x/net/html"
|
||||
|
||||
"git.soup.land/soup/lookbook/internal/data/image"
|
||||
"git.soup.land/soup/lookbook/internal/data/item"
|
||||
)
|
||||
|
||||
const thumbWidth = 480
|
||||
|
||||
func CreateItemFromURL(ctx context.Context, db *sql.DB, sourceURL string) (item.Row, error) {
|
||||
meta, err := FetchMetadata(ctx, sourceURL)
|
||||
if err != nil {
|
||||
return item.Row{}, err
|
||||
}
|
||||
|
||||
row, err := item.QCreate(ctx, db, sourceURL, meta.Title, meta.Description, meta.SiteName)
|
||||
if err != nil {
|
||||
return item.Row{}, err
|
||||
}
|
||||
|
||||
if err := storeImages(ctx, db, row.ID, meta); err != nil {
|
||||
return row, err
|
||||
}
|
||||
|
||||
return row, nil
|
||||
}
|
||||
|
||||
func RefreshItemFromURL(ctx context.Context, db *sql.DB, row item.Row) error {
|
||||
meta, err := FetchMetadata(ctx, row.SourceURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := item.QUpdateMeta(ctx, db, row.ID, meta.Title, meta.Description, meta.SiteName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := image.QDeleteByItem(ctx, db, row.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return storeImages(ctx, db, row.ID, meta)
|
||||
}
|
||||
|
||||
type Metadata struct {
|
||||
Title string
|
||||
Description string
|
||||
SiteName string
|
||||
ImageURL string
|
||||
}
|
||||
|
||||
func FetchMetadata(ctx context.Context, sourceURL string) (Metadata, error) {
|
||||
resp, err := fetchURL(ctx, sourceURL)
|
||||
if err != nil {
|
||||
return Metadata{}, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(io.LimitReader(resp.Body, 8<<20))
|
||||
if err != nil {
|
||||
return Metadata{}, err
|
||||
}
|
||||
|
||||
meta := Metadata{}
|
||||
contentType := resp.Header.Get("Content-Type")
|
||||
meta.ImageURL = extractImageURL(resp.Request.URL, contentType)
|
||||
|
||||
if strings.HasPrefix(strings.ToLower(contentType), "image/") {
|
||||
if meta.Title == "" {
|
||||
meta.Title = path.Base(resp.Request.URL.Path)
|
||||
}
|
||||
if meta.SiteName == "" {
|
||||
meta.SiteName = resp.Request.URL.Hostname()
|
||||
}
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
doc, err := html.Parse(bytes.NewReader(body))
|
||||
if err != nil {
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
extractMeta(doc, &meta)
|
||||
|
||||
if meta.Title == "" {
|
||||
meta.Title = titleFromDoc(doc)
|
||||
}
|
||||
|
||||
if meta.ImageURL == "" {
|
||||
if oembed, err := fetchOEmbed(ctx, sourceURL); err == nil {
|
||||
if meta.Title == "" {
|
||||
meta.Title = oembed.Title
|
||||
}
|
||||
if meta.Description == "" {
|
||||
meta.Description = oembed.Description
|
||||
}
|
||||
if meta.ImageURL == "" {
|
||||
meta.ImageURL = oembed.ThumbnailURL
|
||||
}
|
||||
if meta.SiteName == "" {
|
||||
meta.SiteName = oembed.ProviderName
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
type oEmbedResponse struct {
|
||||
Title string `json:"title"`
|
||||
Description string `json:"description"`
|
||||
ThumbnailURL string `json:"thumbnail_url"`
|
||||
ProviderName string `json:"provider_name"`
|
||||
}
|
||||
|
||||
func fetchOEmbed(ctx context.Context, sourceURL string) (oEmbedResponse, error) {
|
||||
oembedURL := fmt.Sprintf("https://noembed.com/embed?url=%s", url.QueryEscape(sourceURL))
|
||||
resp, err := fetchURL(ctx, oembedURL)
|
||||
if err != nil {
|
||||
return oEmbedResponse{}, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var payload oEmbedResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&payload); err != nil {
|
||||
return oEmbedResponse{}, err
|
||||
}
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
func fetchURL(ctx context.Context, rawURL string) (*http.Response, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, rawURL, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("User-Agent", "lookbook/1.0")
|
||||
client := &http.Client{Timeout: 12 * time.Second}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
|
||||
resp.Body.Close()
|
||||
return nil, fmt.Errorf("fetch %s: status %d", rawURL, resp.StatusCode)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func extractMeta(n *html.Node, meta *Metadata) {
|
||||
if n.Type == html.ElementNode && n.Data == "meta" {
|
||||
var property, content, name string
|
||||
for _, attr := range n.Attr {
|
||||
switch strings.ToLower(attr.Key) {
|
||||
case "property":
|
||||
property = strings.ToLower(attr.Val)
|
||||
case "content":
|
||||
content = strings.TrimSpace(attr.Val)
|
||||
case "name":
|
||||
name = strings.ToLower(attr.Val)
|
||||
}
|
||||
}
|
||||
if content != "" {
|
||||
switch property {
|
||||
case "og:title", "twitter:title":
|
||||
if meta.Title == "" {
|
||||
meta.Title = content
|
||||
}
|
||||
case "og:description", "twitter:description":
|
||||
if meta.Description == "" {
|
||||
meta.Description = content
|
||||
}
|
||||
case "og:site_name":
|
||||
if meta.SiteName == "" {
|
||||
meta.SiteName = content
|
||||
}
|
||||
case "og:image", "twitter:image":
|
||||
if meta.ImageURL == "" {
|
||||
meta.ImageURL = content
|
||||
}
|
||||
}
|
||||
if meta.Description == "" && name == "description" {
|
||||
meta.Description = content
|
||||
}
|
||||
}
|
||||
}
|
||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
||||
extractMeta(c, meta)
|
||||
}
|
||||
}
|
||||
|
||||
func titleFromDoc(n *html.Node) string {
|
||||
if n.Type == html.ElementNode && n.Data == "title" && n.FirstChild != nil {
|
||||
return strings.TrimSpace(n.FirstChild.Data)
|
||||
}
|
||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
||||
if title := titleFromDoc(c); title != "" {
|
||||
return title
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func extractImageURL(baseURL *url.URL, contentType string) string {
|
||||
if strings.HasPrefix(strings.ToLower(contentType), "image/") {
|
||||
return baseURL.String()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func storeImages(ctx context.Context, db *sql.DB, itemID int64, meta Metadata) error {
|
||||
if meta.ImageURL == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
resp, err := fetchURL(ctx, meta.ImageURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
payload, err := io.ReadAll(io.LimitReader(resp.Body, 16<<20))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
contentType := resp.Header.Get("Content-Type")
|
||||
if contentType == "" {
|
||||
contentType = mime.TypeByExtension(strings.ToLower(path.Ext(resp.Request.URL.Path)))
|
||||
}
|
||||
|
||||
width, height, thumbBytes, thumbHeight, err := createThumb(payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = image.QCreate(ctx, db, itemID, meta.ImageURL, contentType, payload, width, height, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if thumbBytes != nil {
|
||||
_, err = image.QCreate(ctx, db, itemID, meta.ImageURL, thumbContentType(contentType), thumbBytes, thumbWidth, thumbHeight, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createThumb(payload []byte) (int, int, []byte, int, error) {
|
||||
img, err := imaging.Decode(bytes.NewReader(payload))
|
||||
if err != nil {
|
||||
return 0, 0, nil, 0, nil
|
||||
}
|
||||
bounds := img.Bounds()
|
||||
width := bounds.Dx()
|
||||
height := bounds.Dy()
|
||||
|
||||
if width <= thumbWidth {
|
||||
return width, height, payload, height, nil
|
||||
}
|
||||
|
||||
thumb := imaging.Resize(img, thumbWidth, 0, imaging.Lanczos)
|
||||
buf := new(bytes.Buffer)
|
||||
if err := imaging.Encode(buf, thumb, imaging.JPEG); err != nil {
|
||||
return width, height, nil, 0, err
|
||||
}
|
||||
return width, height, buf.Bytes(), thumb.Bounds().Dy(), nil
|
||||
}
|
||||
|
||||
func thumbContentType(_ string) string {
|
||||
return "image/jpeg"
|
||||
}
|
||||
4
internal/static/css/pico.min.css
vendored
Normal file
4
internal/static/css/pico.min.css
vendored
Normal file
File diff suppressed because one or more lines are too long
331
internal/static/css/styles.css
Normal file
331
internal/static/css/styles.css
Normal file
|
|
@ -0,0 +1,331 @@
|
|||
/* Commit Mono */
|
||||
@font-face {
|
||||
font-family: "CommitMono";
|
||||
font-style: normal;
|
||||
font-weight: 400;
|
||||
font-display: swap;
|
||||
src: url("../fonts/CommitMono-400-Regular.woff2") format("woff2");
|
||||
}
|
||||
|
||||
@font-face {
|
||||
font-family: "CommitMono";
|
||||
font-style: italic;
|
||||
font-weight: 400;
|
||||
font-display: swap;
|
||||
src: url("../fonts/CommitMono-400-Italic.woff2") format("woff2");
|
||||
}
|
||||
|
||||
@font-face {
|
||||
font-family: "CommitMono-Light";
|
||||
font-style: normal;
|
||||
font-weight: 400;
|
||||
font-display: swap;
|
||||
src: url("../fonts/CommitMono-450-Regular.woff2") format("woff2");
|
||||
}
|
||||
|
||||
@font-face {
|
||||
font-family: "CommitMono-Light";
|
||||
font-style: italic;
|
||||
font-weight: 400;
|
||||
font-display: swap;
|
||||
src: url("../fonts/CommitMono-450-Italic.woff2") format("woff2");
|
||||
}
|
||||
|
||||
@font-face {
|
||||
font-family: "CommitMono";
|
||||
font-style: normal;
|
||||
font-weight: 700;
|
||||
font-display: swap;
|
||||
src: url("../fonts/CommitMono-700-Regular.woff2") format("woff2");
|
||||
}
|
||||
|
||||
@font-face {
|
||||
font-family: "CommitMono";
|
||||
font-style: italic;
|
||||
font-weight: 700;
|
||||
font-display: swap;
|
||||
src: url("../fonts/CommitMono-700-Italic.woff2") format("woff2");
|
||||
}
|
||||
|
||||
@font-face {
|
||||
font-family: "CommitMono-Light";
|
||||
font-style: normal;
|
||||
font-weight: 700;
|
||||
font-display: swap;
|
||||
src: url("../fonts/CommitMono-700-Regular.woff2") format("woff2");
|
||||
}
|
||||
|
||||
@font-face {
|
||||
font-family: "CommitMono-Light";
|
||||
font-style: italic;
|
||||
font-weight: 700;
|
||||
font-display: swap;
|
||||
src: url("../fonts/CommitMono-700-Italic.woff2") format("woff2");
|
||||
}
|
||||
|
||||
:root {
|
||||
color-scheme: light dark;
|
||||
font-family:
|
||||
"CommitMono-Light",
|
||||
"CommitMono",
|
||||
monospace;
|
||||
font-size: 13px;
|
||||
}
|
||||
|
||||
@media (prefers-color-scheme: dark) {
|
||||
:root {
|
||||
font-family:
|
||||
"CommitMono",
|
||||
monospace;
|
||||
}
|
||||
}
|
||||
|
||||
body {
|
||||
margin: 0;
|
||||
background: var(--pico-background-color);
|
||||
color: var(--pico-color);
|
||||
}
|
||||
|
||||
.container {
|
||||
max-width: 1200px;
|
||||
margin: 0 auto;
|
||||
padding: 2rem 1.5rem 4rem;
|
||||
}
|
||||
|
||||
.site-header {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
padding: 1.5rem 2rem;
|
||||
border-bottom: 2px solid currentColor;
|
||||
}
|
||||
|
||||
.site-header .title {
|
||||
font-weight: 700;
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.08em;
|
||||
}
|
||||
|
||||
.site-nav {
|
||||
display: flex;
|
||||
gap: 0.75rem;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.site-nav a,
|
||||
.site-nav button {
|
||||
font-size: 0.85rem;
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.08em;
|
||||
border: 1px solid currentColor;
|
||||
padding: 0.35rem 0.75rem;
|
||||
background: transparent;
|
||||
color: inherit;
|
||||
}
|
||||
|
||||
.site-nav button.ghost {
|
||||
background: transparent;
|
||||
}
|
||||
|
||||
button.ghost {
|
||||
border: 1px solid currentColor;
|
||||
padding: 0.35rem 0.75rem;
|
||||
background: transparent;
|
||||
color: inherit;
|
||||
}
|
||||
|
||||
.hero {
|
||||
display: grid;
|
||||
grid-template-columns: minmax(0, 1fr);
|
||||
gap: 1rem;
|
||||
margin-bottom: 2rem;
|
||||
}
|
||||
|
||||
.hero form {
|
||||
display: grid;
|
||||
grid-template-columns: minmax(0, 1fr) auto;
|
||||
gap: 0.5rem;
|
||||
}
|
||||
|
||||
.hero input[type="url"] {
|
||||
border-radius: 0;
|
||||
border: 2px solid currentColor;
|
||||
background: transparent;
|
||||
padding: 0.6rem 0.75rem;
|
||||
}
|
||||
|
||||
.hero button {
|
||||
border-radius: 0;
|
||||
border: 2px solid currentColor;
|
||||
background: currentColor;
|
||||
color: var(--pico-background-color);
|
||||
padding: 0.6rem 1rem;
|
||||
}
|
||||
|
||||
.filters {
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
gap: 0.5rem;
|
||||
margin-bottom: 2rem;
|
||||
}
|
||||
|
||||
.filter-pill {
|
||||
border: 1px solid currentColor;
|
||||
padding: 0.35rem 0.6rem;
|
||||
font-size: 0.8rem;
|
||||
cursor: pointer;
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.06em;
|
||||
}
|
||||
|
||||
.filter-pill.active {
|
||||
background: currentColor;
|
||||
color: var(--pico-background-color);
|
||||
}
|
||||
|
||||
.gallery {
|
||||
display: grid;
|
||||
gap: 1.5rem;
|
||||
grid-template-columns: repeat(auto-fill, minmax(220px, 1fr));
|
||||
}
|
||||
|
||||
.card {
|
||||
position: relative;
|
||||
border: 2px solid currentColor;
|
||||
background: transparent;
|
||||
overflow: hidden;
|
||||
min-height: 160px;
|
||||
}
|
||||
|
||||
.card img,
|
||||
.card video {
|
||||
display: block;
|
||||
width: 100%;
|
||||
height: auto;
|
||||
}
|
||||
|
||||
.card a {
|
||||
color: inherit;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
.card .placeholder {
|
||||
padding: 1rem;
|
||||
font-size: 0.9rem;
|
||||
}
|
||||
|
||||
.card .overlay {
|
||||
position: absolute;
|
||||
inset: 0;
|
||||
background: rgba(0, 0, 0, 0.75);
|
||||
color: #fff;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
justify-content: flex-end;
|
||||
gap: 0.5rem;
|
||||
padding: 0.8rem;
|
||||
opacity: 0;
|
||||
transition: opacity 0.15s ease;
|
||||
}
|
||||
|
||||
.card:hover .overlay {
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
.card .overlay .title {
|
||||
font-weight: 600;
|
||||
font-size: 0.85rem;
|
||||
}
|
||||
|
||||
.card .overlay .tags {
|
||||
font-size: 0.7rem;
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.08em;
|
||||
}
|
||||
|
||||
.detail {
|
||||
display: grid;
|
||||
grid-template-columns: minmax(0, 1.1fr) minmax(0, 0.9fr);
|
||||
gap: 2rem;
|
||||
}
|
||||
|
||||
.detail img {
|
||||
width: 100%;
|
||||
border: 2px solid currentColor;
|
||||
}
|
||||
|
||||
.detail .meta {
|
||||
display: grid;
|
||||
gap: 0.75rem;
|
||||
}
|
||||
|
||||
.detail .meta h1 {
|
||||
margin: 0;
|
||||
font-size: 1.5rem;
|
||||
}
|
||||
|
||||
.tag-list {
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
gap: 0.5rem;
|
||||
}
|
||||
|
||||
.tag-chip {
|
||||
border: 1px solid currentColor;
|
||||
padding: 0.25rem 0.5rem;
|
||||
font-size: 0.75rem;
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.06em;
|
||||
}
|
||||
|
||||
.tag-editor {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 0.5rem;
|
||||
}
|
||||
|
||||
.tag-editor input {
|
||||
border-radius: 0;
|
||||
border: 2px solid currentColor;
|
||||
padding: 0.45rem 0.6rem;
|
||||
background: transparent;
|
||||
}
|
||||
|
||||
.tag-suggestions {
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
gap: 0.5rem;
|
||||
}
|
||||
|
||||
.tag-suggestions button {
|
||||
border-radius: 0;
|
||||
border: 1px solid currentColor;
|
||||
padding: 0.25rem 0.5rem;
|
||||
background: transparent;
|
||||
}
|
||||
|
||||
.actions {
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
gap: 0.5rem;
|
||||
}
|
||||
|
||||
.actions button,
|
||||
.actions form button {
|
||||
border-radius: 0;
|
||||
border: 1px solid currentColor;
|
||||
padding: 0.45rem 0.8rem;
|
||||
background: transparent;
|
||||
}
|
||||
|
||||
.notice {
|
||||
border: 2px dashed currentColor;
|
||||
padding: 1rem;
|
||||
font-size: 0.85rem;
|
||||
}
|
||||
|
||||
@media (max-width: 900px) {
|
||||
.detail {
|
||||
grid-template-columns: 1fr;
|
||||
}
|
||||
}
|
||||
BIN
internal/static/fonts/CommitMono-400-Italic.woff2
Normal file
BIN
internal/static/fonts/CommitMono-400-Italic.woff2
Normal file
Binary file not shown.
BIN
internal/static/fonts/CommitMono-400-Regular.woff2
Normal file
BIN
internal/static/fonts/CommitMono-400-Regular.woff2
Normal file
Binary file not shown.
BIN
internal/static/fonts/CommitMono-450-Italic.woff2
Normal file
BIN
internal/static/fonts/CommitMono-450-Italic.woff2
Normal file
Binary file not shown.
BIN
internal/static/fonts/CommitMono-450-Regular.woff2
Normal file
BIN
internal/static/fonts/CommitMono-450-Regular.woff2
Normal file
Binary file not shown.
BIN
internal/static/fonts/CommitMono-700-Italic.woff2
Normal file
BIN
internal/static/fonts/CommitMono-700-Italic.woff2
Normal file
Binary file not shown.
BIN
internal/static/fonts/CommitMono-700-Regular.woff2
Normal file
BIN
internal/static/fonts/CommitMono-700-Regular.woff2
Normal file
Binary file not shown.
39
internal/static/fonts/OFL.txt
Normal file
39
internal/static/fonts/OFL.txt
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
Copyright (c) 2023 Eigil Nikolajsen (eigi0088@gmail.com)
|
||||
|
||||
This Font Software is licensed under the SIL Open Font License, Version 1.1.
|
||||
This license is copied below, and is also available with a FAQ at:
|
||||
http://scripts.sil.org/OFL
|
||||
|
||||
-----------------------------------------------------------
|
||||
SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
|
||||
-----------------------------------------------------------
|
||||
|
||||
PREAMBLE
|
||||
The goals of the Open Font License (OFL) are to stimulate worldwide
|
||||
development of collaborative font projects, to support the font creation
|
||||
efforts of academic and linguistic communities, and to provide a free and
|
||||
open framework in which fonts may be shared and improved in partnership
|
||||
with others.
|
||||
|
||||
The OFL allows the licensed fonts to be used, studied, modified and
|
||||
redistributed freely as long as they are not sold by themselves. The
|
||||
fonts, including any derivative works, can be bundled, embedded,
|
||||
redistributed and/or sold with any software provided that any reserved
|
||||
names are not used by derivative works. The fonts and derivatives,
|
||||
however, cannot be released under any other type of license. The
|
||||
requirement for fonts to remain under this license does not apply
|
||||
to any document created using the fonts or their derivatives.
|
||||
|
||||
DEFINITIONS
|
||||
"Font Software" refers to the set of files released by the Copyright
|
||||
Holder(s) under this license and clearly marked as such. This may
|
||||
include source files, build scripts and documentation.
|
||||
|
||||
"Reserved Font Name" refers to any names specified as such after the
|
||||
copyright statement(s).
|
||||
|
||||
"Original Version" refers to the collection of Font Software components as
|
||||
distributed by the Copyright Holder(s).
|
||||
|
||||
"Modified Version" refers to any derivative made by adding to, deleting,
|
||||
or substituting -- in part or in whole
|
||||
181
internal/static/js/app.js
Normal file
181
internal/static/js/app.js
Normal file
|
|
@ -0,0 +1,181 @@
|
|||
(function() {
|
||||
const logoutButton = document.querySelector('.site-nav button[onclick="logout()"]');
|
||||
const authStatusEl = document.querySelector('[data-auth-status]');
|
||||
|
||||
window.auth = async function() {
|
||||
const password = window.prompt('Enter password');
|
||||
if (!password) return;
|
||||
const resp = await fetch('/auth/login', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ password })
|
||||
});
|
||||
if (resp.ok) {
|
||||
await refreshAuth();
|
||||
} else {
|
||||
alert('Auth failed');
|
||||
}
|
||||
};
|
||||
|
||||
window.logout = async function() {
|
||||
await fetch('/auth/logout', { method: 'POST' });
|
||||
await refreshAuth();
|
||||
};
|
||||
|
||||
async function refreshAuth() {
|
||||
const resp = await fetch('/auth/status');
|
||||
if (!resp.ok) return;
|
||||
const data = await resp.json();
|
||||
window.LOOKBOOK_AUTH = data.authenticated;
|
||||
if (logoutButton) {
|
||||
logoutButton.hidden = !data.authenticated;
|
||||
}
|
||||
if (authStatusEl) {
|
||||
authStatusEl.textContent = data.authenticated ? 'Authed' : 'Read only';
|
||||
}
|
||||
const gated = document.querySelectorAll('[data-auth-required]');
|
||||
gated.forEach((el) => {
|
||||
el.toggleAttribute('hidden', !data.authenticated);
|
||||
});
|
||||
}
|
||||
|
||||
const tagInput = document.querySelector('[data-tag-input]');
|
||||
const tagSuggestions = document.querySelector('[data-tag-suggestions]');
|
||||
const tagForm = document.querySelector('[data-tag-form]');
|
||||
const tagEditor = document.querySelector('[data-tag-editor]');
|
||||
const tagToggle = document.querySelector('[data-tag-toggle]');
|
||||
const tagList = document.querySelector('[data-tag-list]');
|
||||
const tagData = tagSuggestions ? JSON.parse(tagSuggestions.getAttribute('data-tags') || '[]') : [];
|
||||
|
||||
if (tagToggle && tagEditor) {
|
||||
tagToggle.addEventListener('click', () => {
|
||||
const isHidden = tagEditor.hasAttribute('hidden');
|
||||
tagEditor.toggleAttribute('hidden', !isHidden);
|
||||
});
|
||||
}
|
||||
|
||||
if (tagList && tagEditor) {
|
||||
tagList.addEventListener('click', () => {
|
||||
if (!window.LOOKBOOK_AUTH) return;
|
||||
const isHidden = tagEditor.hasAttribute('hidden');
|
||||
tagEditor.toggleAttribute('hidden', !isHidden);
|
||||
});
|
||||
}
|
||||
|
||||
if (tagSuggestions) {
|
||||
if (tagInput) {
|
||||
tagInput.addEventListener('input', () => renderSuggestions(tagInput.value));
|
||||
renderSuggestions(tagInput.value);
|
||||
} else {
|
||||
renderSuggestions('');
|
||||
}
|
||||
}
|
||||
|
||||
function renderSuggestions(value) {
|
||||
if (!tagSuggestions) return;
|
||||
const query = (value || '').trim().toLowerCase();
|
||||
tagSuggestions.innerHTML = '';
|
||||
const matches = fuzzyMatchTags(query, tagData).slice(0, 10);
|
||||
matches.forEach((tag) => {
|
||||
const button = document.createElement('button');
|
||||
button.type = 'button';
|
||||
button.textContent = tag;
|
||||
button.addEventListener('click', () => {
|
||||
if (tagInput) {
|
||||
addTag(tag);
|
||||
return;
|
||||
}
|
||||
const target = Array.from(filterButtons).find((btn) => btn.dataset.tagFilter === tag);
|
||||
if (target) target.click();
|
||||
});
|
||||
tagSuggestions.appendChild(button);
|
||||
});
|
||||
}
|
||||
|
||||
function addTag(tag) {
|
||||
if (!tagInput) return;
|
||||
const tags = parseTags(tagInput.value);
|
||||
if (!tags.includes(tag)) tags.push(tag);
|
||||
tagInput.value = tags.join(', ');
|
||||
renderSuggestions(tagInput.value);
|
||||
}
|
||||
|
||||
function parseTags(value) {
|
||||
return value
|
||||
.split(',')
|
||||
.map((tag) => tag.trim())
|
||||
.filter((tag) => tag.length > 0);
|
||||
}
|
||||
|
||||
function fuzzyMatchTags(query, tags) {
|
||||
if (!query) return tags;
|
||||
const scores = tags.map((tag) => ({ tag, score: fuzzyScore(tag, query) }))
|
||||
.filter((entry) => entry.score > 0)
|
||||
.sort((a, b) => b.score - a.score || a.tag.localeCompare(b.tag));
|
||||
return scores.map((entry) => entry.tag);
|
||||
}
|
||||
|
||||
function fuzzyScore(tag, query) {
|
||||
let score = 0;
|
||||
let ti = 0;
|
||||
for (const qc of query) {
|
||||
const idx = tag.indexOf(qc, ti);
|
||||
if (idx === -1) return 0;
|
||||
score += idx === ti ? 3 : 1;
|
||||
ti = idx + 1;
|
||||
}
|
||||
return score;
|
||||
}
|
||||
|
||||
const filterButtons = document.querySelectorAll('[data-tag-filter]');
|
||||
const gridItems = document.querySelectorAll('[data-item-tags]');
|
||||
const selectedTags = new Set();
|
||||
|
||||
filterButtons.forEach((button) => {
|
||||
button.addEventListener('click', (event) => {
|
||||
const tag = button.dataset.tagFilter;
|
||||
const multi = event.ctrlKey || event.metaKey;
|
||||
if (!multi) {
|
||||
selectedTags.clear();
|
||||
filterButtons.forEach((b) => b.classList.remove('active'));
|
||||
}
|
||||
if (selectedTags.has(tag)) {
|
||||
selectedTags.delete(tag);
|
||||
button.classList.remove('active');
|
||||
} else {
|
||||
selectedTags.add(tag);
|
||||
button.classList.add('active');
|
||||
}
|
||||
applyFilters();
|
||||
});
|
||||
});
|
||||
|
||||
function applyFilters() {
|
||||
const selected = Array.from(selectedTags);
|
||||
gridItems.forEach((item) => {
|
||||
const tags = (item.getAttribute('data-item-tags') || '').split(',').map((t) => t.trim()).filter(Boolean);
|
||||
if (selected.length === 0) {
|
||||
item.removeAttribute('hidden');
|
||||
return;
|
||||
}
|
||||
const matches = selected.some((tag) => tags.includes(tag));
|
||||
item.toggleAttribute('hidden', !matches);
|
||||
});
|
||||
}
|
||||
|
||||
if (tagForm) {
|
||||
tagForm.addEventListener('submit', async (event) => {
|
||||
event.preventDefault();
|
||||
const formData = new FormData(tagForm);
|
||||
const resp = await fetch(tagForm.action, {
|
||||
method: 'POST',
|
||||
body: formData
|
||||
});
|
||||
if (resp.ok) {
|
||||
window.location.reload();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
refreshAuth();
|
||||
})();
|
||||
42
internal/static/static.go
Normal file
42
internal/static/static.go
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
package static
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"embed"
|
||||
"encoding/hex"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
//go:embed css/* js/* fonts/*
|
||||
var staticFS embed.FS
|
||||
|
||||
// Version is set via -ldflags in production
|
||||
var Version string
|
||||
|
||||
func init() {
|
||||
if Version == "" {
|
||||
h := sha256.Sum256([]byte(time.Now().String()))
|
||||
Version = hex.EncodeToString(h[:4])
|
||||
}
|
||||
}
|
||||
|
||||
func VersionedPath(path string) string {
|
||||
return "/static/" + Version + "/" + path
|
||||
}
|
||||
|
||||
func Handler() http.Handler {
|
||||
fileServer := http.FileServer(http.FS(staticFS))
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
path := r.URL.Path
|
||||
path = strings.TrimPrefix(path, "/static/")
|
||||
if idx := strings.Index(path, "/"); idx != -1 {
|
||||
path = path[idx+1:]
|
||||
}
|
||||
r.URL.Path = "/" + path
|
||||
|
||||
w.Header().Set("Cache-Control", "public, max-age=31536000, immutable")
|
||||
fileServer.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
15
shell.nix
Normal file
15
shell.nix
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
let
|
||||
pins = import ./npins;
|
||||
sx-nix = import ../sx-nix {};
|
||||
pkgs = sx-nix.pkgs;
|
||||
in
|
||||
|
||||
pkgs.mkShell {
|
||||
packages = [
|
||||
(sx-nix.go.goToolchainBin { version = "1.25.5"; sha256 = "sha256-npt1XWOzas8wwSqaP8N5JDcUwcbT3XKGHaY38zbrs1s="; })
|
||||
pkgs.postgresql
|
||||
pkgs.entr
|
||||
pkgs.fd
|
||||
pkgs.hivemind
|
||||
];
|
||||
}
|
||||
71
vendor/git.soup.land/soup/sxgo/ssr/ssr.go
vendored
Normal file
71
vendor/git.soup.land/soup/sxgo/ssr/ssr.go
vendored
Normal file
|
|
@ -0,0 +1,71 @@
|
|||
package ssr
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"html/template"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/cespare/xxhash/v2"
|
||||
)
|
||||
|
||||
type Writer struct {
|
||||
err error
|
||||
w io.Writer
|
||||
cache *TmplCache
|
||||
}
|
||||
|
||||
func NewWriter(w io.Writer, cache *TmplCache) *Writer {
|
||||
return &Writer{w: w, cache: cache}
|
||||
}
|
||||
|
||||
func (w *Writer) Raw(s string) error {
|
||||
_, err := w.w.Write([]byte(s))
|
||||
w.err = errors.Join(w.err, err)
|
||||
return w.err
|
||||
}
|
||||
|
||||
func (w *Writer) Tmpl(data any, tmpl string) error {
|
||||
t, err := w.cache.GetOrCompile(tmpl)
|
||||
if err != nil {
|
||||
w.err = errors.Join(w.err, err)
|
||||
return w.err
|
||||
}
|
||||
|
||||
err = t.Execute(w.w, data)
|
||||
w.err = errors.Join(w.err, err)
|
||||
return w.err
|
||||
}
|
||||
|
||||
func (w *Writer) Error() error {
|
||||
return w.err
|
||||
}
|
||||
|
||||
type TmplCache struct {
|
||||
funcs template.FuncMap
|
||||
cache sync.Map // uint64 -> *template.Template
|
||||
}
|
||||
|
||||
func NewTmplCache(funcs template.FuncMap) *TmplCache {
|
||||
return &TmplCache{funcs: funcs}
|
||||
}
|
||||
|
||||
func (t *TmplCache) GetOrCompile(content string) (*template.Template, error) {
|
||||
key := xxhash.Sum64String(content)
|
||||
|
||||
if cached, ok := t.cache.Load(key); ok {
|
||||
return cached.(*template.Template), nil
|
||||
}
|
||||
|
||||
tmpl, err := template.New("").Funcs(t.funcs).Parse(content)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t.cache.Store(key, tmpl)
|
||||
return tmpl, nil
|
||||
}
|
||||
|
||||
type Renderable interface {
|
||||
Render(sw *Writer) error
|
||||
}
|
||||
22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt
generated
vendored
Normal file
22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt
generated
vendored
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
Copyright (c) 2016 Caleb Spare
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
74
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
Normal file
74
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,74 @@
|
|||
# xxhash
|
||||
|
||||
[](https://pkg.go.dev/github.com/cespare/xxhash/v2)
|
||||
[](https://github.com/cespare/xxhash/actions/workflows/test.yml)
|
||||
|
||||
xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
|
||||
high-quality hashing algorithm that is much faster than anything in the Go
|
||||
standard library.
|
||||
|
||||
This package provides a straightforward API:
|
||||
|
||||
```
|
||||
func Sum64(b []byte) uint64
|
||||
func Sum64String(s string) uint64
|
||||
type Digest struct{ ... }
|
||||
func New() *Digest
|
||||
```
|
||||
|
||||
The `Digest` type implements hash.Hash64. Its key methods are:
|
||||
|
||||
```
|
||||
func (*Digest) Write([]byte) (int, error)
|
||||
func (*Digest) WriteString(string) (int, error)
|
||||
func (*Digest) Sum64() uint64
|
||||
```
|
||||
|
||||
The package is written with optimized pure Go and also contains even faster
|
||||
assembly implementations for amd64 and arm64. If desired, the `purego` build tag
|
||||
opts into using the Go code even on those architectures.
|
||||
|
||||
[xxHash]: http://cyan4973.github.io/xxHash/
|
||||
|
||||
## Compatibility
|
||||
|
||||
This package is in a module and the latest code is in version 2 of the module.
|
||||
You need a version of Go with at least "minimal module compatibility" to use
|
||||
github.com/cespare/xxhash/v2:
|
||||
|
||||
* 1.9.7+ for Go 1.9
|
||||
* 1.10.3+ for Go 1.10
|
||||
* Go 1.11 or later
|
||||
|
||||
I recommend using the latest release of Go.
|
||||
|
||||
## Benchmarks
|
||||
|
||||
Here are some quick benchmarks comparing the pure-Go and assembly
|
||||
implementations of Sum64.
|
||||
|
||||
| input size | purego | asm |
|
||||
| ---------- | --------- | --------- |
|
||||
| 4 B | 1.3 GB/s | 1.2 GB/s |
|
||||
| 16 B | 2.9 GB/s | 3.5 GB/s |
|
||||
| 100 B | 6.9 GB/s | 8.1 GB/s |
|
||||
| 4 KB | 11.7 GB/s | 16.7 GB/s |
|
||||
| 10 MB | 12.0 GB/s | 17.3 GB/s |
|
||||
|
||||
These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
|
||||
CPU using the following commands under Go 1.19.2:
|
||||
|
||||
```
|
||||
benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||
benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||
```
|
||||
|
||||
## Projects using this package
|
||||
|
||||
- [InfluxDB](https://github.com/influxdata/influxdb)
|
||||
- [Prometheus](https://github.com/prometheus/prometheus)
|
||||
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
- [FreeCache](https://github.com/coocood/freecache)
|
||||
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
|
||||
- [Ristretto](https://github.com/dgraph-io/ristretto)
|
||||
- [Badger](https://github.com/dgraph-io/badger)
|
||||
10
vendor/github.com/cespare/xxhash/v2/testall.sh
generated
vendored
Normal file
10
vendor/github.com/cespare/xxhash/v2/testall.sh
generated
vendored
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
#!/bin/bash
|
||||
set -eu -o pipefail
|
||||
|
||||
# Small convenience script for running the tests with various combinations of
|
||||
# arch/tags. This assumes we're running on amd64 and have qemu available.
|
||||
|
||||
go test ./...
|
||||
go test -tags purego ./...
|
||||
GOARCH=arm64 go test
|
||||
GOARCH=arm64 go test -tags purego
|
||||
243
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
Normal file
243
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
Normal file
|
|
@ -0,0 +1,243 @@
|
|||
// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
|
||||
// at http://cyan4973.github.io/xxHash/.
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
const (
|
||||
prime1 uint64 = 11400714785074694791
|
||||
prime2 uint64 = 14029467366897019727
|
||||
prime3 uint64 = 1609587929392839161
|
||||
prime4 uint64 = 9650029242287828579
|
||||
prime5 uint64 = 2870177450012600261
|
||||
)
|
||||
|
||||
// Store the primes in an array as well.
|
||||
//
|
||||
// The consts are used when possible in Go code to avoid MOVs but we need a
|
||||
// contiguous array for the assembly code.
|
||||
var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
|
||||
|
||||
// Digest implements hash.Hash64.
|
||||
//
|
||||
// Note that a zero-valued Digest is not ready to receive writes.
|
||||
// Call Reset or create a Digest using New before calling other methods.
|
||||
type Digest struct {
|
||||
v1 uint64
|
||||
v2 uint64
|
||||
v3 uint64
|
||||
v4 uint64
|
||||
total uint64
|
||||
mem [32]byte
|
||||
n int // how much of mem is used
|
||||
}
|
||||
|
||||
// New creates a new Digest with a zero seed.
|
||||
func New() *Digest {
|
||||
return NewWithSeed(0)
|
||||
}
|
||||
|
||||
// NewWithSeed creates a new Digest with the given seed.
|
||||
func NewWithSeed(seed uint64) *Digest {
|
||||
var d Digest
|
||||
d.ResetWithSeed(seed)
|
||||
return &d
|
||||
}
|
||||
|
||||
// Reset clears the Digest's state so that it can be reused.
|
||||
// It uses a seed value of zero.
|
||||
func (d *Digest) Reset() {
|
||||
d.ResetWithSeed(0)
|
||||
}
|
||||
|
||||
// ResetWithSeed clears the Digest's state so that it can be reused.
|
||||
// It uses the given seed to initialize the state.
|
||||
func (d *Digest) ResetWithSeed(seed uint64) {
|
||||
d.v1 = seed + prime1 + prime2
|
||||
d.v2 = seed + prime2
|
||||
d.v3 = seed
|
||||
d.v4 = seed - prime1
|
||||
d.total = 0
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
// Size always returns 8 bytes.
|
||||
func (d *Digest) Size() int { return 8 }
|
||||
|
||||
// BlockSize always returns 32 bytes.
|
||||
func (d *Digest) BlockSize() int { return 32 }
|
||||
|
||||
// Write adds more data to d. It always returns len(b), nil.
|
||||
func (d *Digest) Write(b []byte) (n int, err error) {
|
||||
n = len(b)
|
||||
d.total += uint64(n)
|
||||
|
||||
memleft := d.mem[d.n&(len(d.mem)-1):]
|
||||
|
||||
if d.n+n < 32 {
|
||||
// This new data doesn't even fill the current block.
|
||||
copy(memleft, b)
|
||||
d.n += n
|
||||
return
|
||||
}
|
||||
|
||||
if d.n > 0 {
|
||||
// Finish off the partial block.
|
||||
c := copy(memleft, b)
|
||||
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
||||
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
||||
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
||||
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
||||
b = b[c:]
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
if len(b) >= 32 {
|
||||
// One or more full blocks left.
|
||||
nw := writeBlocks(d, b)
|
||||
b = b[nw:]
|
||||
}
|
||||
|
||||
// Store any remaining partial block.
|
||||
copy(d.mem[:], b)
|
||||
d.n = len(b)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
func (d *Digest) Sum(b []byte) []byte {
|
||||
s := d.Sum64()
|
||||
return append(
|
||||
b,
|
||||
byte(s>>56),
|
||||
byte(s>>48),
|
||||
byte(s>>40),
|
||||
byte(s>>32),
|
||||
byte(s>>24),
|
||||
byte(s>>16),
|
||||
byte(s>>8),
|
||||
byte(s),
|
||||
)
|
||||
}
|
||||
|
||||
// Sum64 returns the current hash.
|
||||
func (d *Digest) Sum64() uint64 {
|
||||
var h uint64
|
||||
|
||||
if d.total >= 32 {
|
||||
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||
h = mergeRound(h, v1)
|
||||
h = mergeRound(h, v2)
|
||||
h = mergeRound(h, v3)
|
||||
h = mergeRound(h, v4)
|
||||
} else {
|
||||
h = d.v3 + prime5
|
||||
}
|
||||
|
||||
h += d.total
|
||||
|
||||
b := d.mem[:d.n&(len(d.mem)-1)]
|
||||
for ; len(b) >= 8; b = b[8:] {
|
||||
k1 := round(0, u64(b[:8]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if len(b) >= 4 {
|
||||
h ^= uint64(u32(b[:4])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
b = b[4:]
|
||||
}
|
||||
for ; len(b) > 0; b = b[1:] {
|
||||
h ^= uint64(b[0]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
h *= prime2
|
||||
h ^= h >> 29
|
||||
h *= prime3
|
||||
h ^= h >> 32
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
const (
|
||||
magic = "xxh\x06"
|
||||
marshaledSize = len(magic) + 8*5 + 32
|
||||
)
|
||||
|
||||
// MarshalBinary implements the encoding.BinaryMarshaler interface.
|
||||
func (d *Digest) MarshalBinary() ([]byte, error) {
|
||||
b := make([]byte, 0, marshaledSize)
|
||||
b = append(b, magic...)
|
||||
b = appendUint64(b, d.v1)
|
||||
b = appendUint64(b, d.v2)
|
||||
b = appendUint64(b, d.v3)
|
||||
b = appendUint64(b, d.v4)
|
||||
b = appendUint64(b, d.total)
|
||||
b = append(b, d.mem[:d.n]...)
|
||||
b = b[:len(b)+len(d.mem)-d.n]
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
|
||||
func (d *Digest) UnmarshalBinary(b []byte) error {
|
||||
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
|
||||
return errors.New("xxhash: invalid hash state identifier")
|
||||
}
|
||||
if len(b) != marshaledSize {
|
||||
return errors.New("xxhash: invalid hash state size")
|
||||
}
|
||||
b = b[len(magic):]
|
||||
b, d.v1 = consumeUint64(b)
|
||||
b, d.v2 = consumeUint64(b)
|
||||
b, d.v3 = consumeUint64(b)
|
||||
b, d.v4 = consumeUint64(b)
|
||||
b, d.total = consumeUint64(b)
|
||||
copy(d.mem[:], b)
|
||||
d.n = int(d.total % uint64(len(d.mem)))
|
||||
return nil
|
||||
}
|
||||
|
||||
func appendUint64(b []byte, x uint64) []byte {
|
||||
var a [8]byte
|
||||
binary.LittleEndian.PutUint64(a[:], x)
|
||||
return append(b, a[:]...)
|
||||
}
|
||||
|
||||
func consumeUint64(b []byte) ([]byte, uint64) {
|
||||
x := u64(b)
|
||||
return b[8:], x
|
||||
}
|
||||
|
||||
func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
|
||||
func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
|
||||
|
||||
func round(acc, input uint64) uint64 {
|
||||
acc += input * prime2
|
||||
acc = rol31(acc)
|
||||
acc *= prime1
|
||||
return acc
|
||||
}
|
||||
|
||||
func mergeRound(acc, val uint64) uint64 {
|
||||
val = round(0, val)
|
||||
acc ^= val
|
||||
acc = acc*prime1 + prime4
|
||||
return acc
|
||||
}
|
||||
|
||||
func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
|
||||
func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
|
||||
func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
|
||||
func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
|
||||
func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
|
||||
func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
|
||||
func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
|
||||
func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
|
||||
209
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
Normal file
209
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
Normal file
|
|
@ -0,0 +1,209 @@
|
|||
//go:build !appengine && gc && !purego
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Registers:
|
||||
#define h AX
|
||||
#define d AX
|
||||
#define p SI // pointer to advance through b
|
||||
#define n DX
|
||||
#define end BX // loop end
|
||||
#define v1 R8
|
||||
#define v2 R9
|
||||
#define v3 R10
|
||||
#define v4 R11
|
||||
#define x R12
|
||||
#define prime1 R13
|
||||
#define prime2 R14
|
||||
#define prime4 DI
|
||||
|
||||
#define round(acc, x) \
|
||||
IMULQ prime2, x \
|
||||
ADDQ x, acc \
|
||||
ROLQ $31, acc \
|
||||
IMULQ prime1, acc
|
||||
|
||||
// round0 performs the operation x = round(0, x).
|
||||
#define round0(x) \
|
||||
IMULQ prime2, x \
|
||||
ROLQ $31, x \
|
||||
IMULQ prime1, x
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and x.
|
||||
// It assumes that prime1, prime2, and prime4 have been loaded.
|
||||
#define mergeRound(acc, x) \
|
||||
round0(x) \
|
||||
XORQ x, acc \
|
||||
IMULQ prime1, acc \
|
||||
ADDQ prime4, acc
|
||||
|
||||
// blockLoop processes as many 32-byte blocks as possible,
|
||||
// updating v1, v2, v3, and v4. It assumes that there is at least one block
|
||||
// to process.
|
||||
#define blockLoop() \
|
||||
loop: \
|
||||
MOVQ +0(p), x \
|
||||
round(v1, x) \
|
||||
MOVQ +8(p), x \
|
||||
round(v2, x) \
|
||||
MOVQ +16(p), x \
|
||||
round(v3, x) \
|
||||
MOVQ +24(p), x \
|
||||
round(v4, x) \
|
||||
ADDQ $32, p \
|
||||
CMPQ p, end \
|
||||
JLE loop
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||||
// Load fixed primes.
|
||||
MOVQ ·primes+0(SB), prime1
|
||||
MOVQ ·primes+8(SB), prime2
|
||||
MOVQ ·primes+24(SB), prime4
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+0(FP), p
|
||||
MOVQ b_len+8(FP), n
|
||||
LEAQ (p)(n*1), end
|
||||
|
||||
// The first loop limit will be len(b)-32.
|
||||
SUBQ $32, end
|
||||
|
||||
// Check whether we have at least one block.
|
||||
CMPQ n, $32
|
||||
JLT noBlocks
|
||||
|
||||
// Set up initial state (v1, v2, v3, v4).
|
||||
MOVQ prime1, v1
|
||||
ADDQ prime2, v1
|
||||
MOVQ prime2, v2
|
||||
XORQ v3, v3
|
||||
XORQ v4, v4
|
||||
SUBQ prime1, v4
|
||||
|
||||
blockLoop()
|
||||
|
||||
MOVQ v1, h
|
||||
ROLQ $1, h
|
||||
MOVQ v2, x
|
||||
ROLQ $7, x
|
||||
ADDQ x, h
|
||||
MOVQ v3, x
|
||||
ROLQ $12, x
|
||||
ADDQ x, h
|
||||
MOVQ v4, x
|
||||
ROLQ $18, x
|
||||
ADDQ x, h
|
||||
|
||||
mergeRound(h, v1)
|
||||
mergeRound(h, v2)
|
||||
mergeRound(h, v3)
|
||||
mergeRound(h, v4)
|
||||
|
||||
JMP afterBlocks
|
||||
|
||||
noBlocks:
|
||||
MOVQ ·primes+32(SB), h
|
||||
|
||||
afterBlocks:
|
||||
ADDQ n, h
|
||||
|
||||
ADDQ $24, end
|
||||
CMPQ p, end
|
||||
JG try4
|
||||
|
||||
loop8:
|
||||
MOVQ (p), x
|
||||
ADDQ $8, p
|
||||
round0(x)
|
||||
XORQ x, h
|
||||
ROLQ $27, h
|
||||
IMULQ prime1, h
|
||||
ADDQ prime4, h
|
||||
|
||||
CMPQ p, end
|
||||
JLE loop8
|
||||
|
||||
try4:
|
||||
ADDQ $4, end
|
||||
CMPQ p, end
|
||||
JG try1
|
||||
|
||||
MOVL (p), x
|
||||
ADDQ $4, p
|
||||
IMULQ prime1, x
|
||||
XORQ x, h
|
||||
|
||||
ROLQ $23, h
|
||||
IMULQ prime2, h
|
||||
ADDQ ·primes+16(SB), h
|
||||
|
||||
try1:
|
||||
ADDQ $4, end
|
||||
CMPQ p, end
|
||||
JGE finalize
|
||||
|
||||
loop1:
|
||||
MOVBQZX (p), x
|
||||
ADDQ $1, p
|
||||
IMULQ ·primes+32(SB), x
|
||||
XORQ x, h
|
||||
ROLQ $11, h
|
||||
IMULQ prime1, h
|
||||
|
||||
CMPQ p, end
|
||||
JL loop1
|
||||
|
||||
finalize:
|
||||
MOVQ h, x
|
||||
SHRQ $33, x
|
||||
XORQ x, h
|
||||
IMULQ prime2, h
|
||||
MOVQ h, x
|
||||
SHRQ $29, x
|
||||
XORQ x, h
|
||||
IMULQ ·primes+16(SB), h
|
||||
MOVQ h, x
|
||||
SHRQ $32, x
|
||||
XORQ x, h
|
||||
|
||||
MOVQ h, ret+24(FP)
|
||||
RET
|
||||
|
||||
// func writeBlocks(d *Digest, b []byte) int
|
||||
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||||
// Load fixed primes needed for round.
|
||||
MOVQ ·primes+0(SB), prime1
|
||||
MOVQ ·primes+8(SB), prime2
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+8(FP), p
|
||||
MOVQ b_len+16(FP), n
|
||||
LEAQ (p)(n*1), end
|
||||
SUBQ $32, end
|
||||
|
||||
// Load vN from d.
|
||||
MOVQ s+0(FP), d
|
||||
MOVQ 0(d), v1
|
||||
MOVQ 8(d), v2
|
||||
MOVQ 16(d), v3
|
||||
MOVQ 24(d), v4
|
||||
|
||||
// We don't need to check the loop condition here; this function is
|
||||
// always called with at least one block of data to process.
|
||||
blockLoop()
|
||||
|
||||
// Copy vN back to d.
|
||||
MOVQ v1, 0(d)
|
||||
MOVQ v2, 8(d)
|
||||
MOVQ v3, 16(d)
|
||||
MOVQ v4, 24(d)
|
||||
|
||||
// The number of bytes written is p minus the old base pointer.
|
||||
SUBQ b_base+8(FP), p
|
||||
MOVQ p, ret+32(FP)
|
||||
|
||||
RET
|
||||
183
vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
generated
vendored
Normal file
183
vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
generated
vendored
Normal file
|
|
@ -0,0 +1,183 @@
|
|||
//go:build !appengine && gc && !purego
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Registers:
|
||||
#define digest R1
|
||||
#define h R2 // return value
|
||||
#define p R3 // input pointer
|
||||
#define n R4 // input length
|
||||
#define nblocks R5 // n / 32
|
||||
#define prime1 R7
|
||||
#define prime2 R8
|
||||
#define prime3 R9
|
||||
#define prime4 R10
|
||||
#define prime5 R11
|
||||
#define v1 R12
|
||||
#define v2 R13
|
||||
#define v3 R14
|
||||
#define v4 R15
|
||||
#define x1 R20
|
||||
#define x2 R21
|
||||
#define x3 R22
|
||||
#define x4 R23
|
||||
|
||||
#define round(acc, x) \
|
||||
MADD prime2, acc, x, acc \
|
||||
ROR $64-31, acc \
|
||||
MUL prime1, acc
|
||||
|
||||
// round0 performs the operation x = round(0, x).
|
||||
#define round0(x) \
|
||||
MUL prime2, x \
|
||||
ROR $64-31, x \
|
||||
MUL prime1, x
|
||||
|
||||
#define mergeRound(acc, x) \
|
||||
round0(x) \
|
||||
EOR x, acc \
|
||||
MADD acc, prime4, prime1, acc
|
||||
|
||||
// blockLoop processes as many 32-byte blocks as possible,
|
||||
// updating v1, v2, v3, and v4. It assumes that n >= 32.
|
||||
#define blockLoop() \
|
||||
LSR $5, n, nblocks \
|
||||
PCALIGN $16 \
|
||||
loop: \
|
||||
LDP.P 16(p), (x1, x2) \
|
||||
LDP.P 16(p), (x3, x4) \
|
||||
round(v1, x1) \
|
||||
round(v2, x2) \
|
||||
round(v3, x3) \
|
||||
round(v4, x4) \
|
||||
SUB $1, nblocks \
|
||||
CBNZ nblocks, loop
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||||
LDP b_base+0(FP), (p, n)
|
||||
|
||||
LDP ·primes+0(SB), (prime1, prime2)
|
||||
LDP ·primes+16(SB), (prime3, prime4)
|
||||
MOVD ·primes+32(SB), prime5
|
||||
|
||||
CMP $32, n
|
||||
CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
|
||||
BLT afterLoop
|
||||
|
||||
ADD prime1, prime2, v1
|
||||
MOVD prime2, v2
|
||||
MOVD $0, v3
|
||||
NEG prime1, v4
|
||||
|
||||
blockLoop()
|
||||
|
||||
ROR $64-1, v1, x1
|
||||
ROR $64-7, v2, x2
|
||||
ADD x1, x2
|
||||
ROR $64-12, v3, x3
|
||||
ROR $64-18, v4, x4
|
||||
ADD x3, x4
|
||||
ADD x2, x4, h
|
||||
|
||||
mergeRound(h, v1)
|
||||
mergeRound(h, v2)
|
||||
mergeRound(h, v3)
|
||||
mergeRound(h, v4)
|
||||
|
||||
afterLoop:
|
||||
ADD n, h
|
||||
|
||||
TBZ $4, n, try8
|
||||
LDP.P 16(p), (x1, x2)
|
||||
|
||||
round0(x1)
|
||||
|
||||
// NOTE: here and below, sequencing the EOR after the ROR (using a
|
||||
// rotated register) is worth a small but measurable speedup for small
|
||||
// inputs.
|
||||
ROR $64-27, h
|
||||
EOR x1 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
round0(x2)
|
||||
ROR $64-27, h
|
||||
EOR x2 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
try8:
|
||||
TBZ $3, n, try4
|
||||
MOVD.P 8(p), x1
|
||||
|
||||
round0(x1)
|
||||
ROR $64-27, h
|
||||
EOR x1 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
try4:
|
||||
TBZ $2, n, try2
|
||||
MOVWU.P 4(p), x2
|
||||
|
||||
MUL prime1, x2
|
||||
ROR $64-23, h
|
||||
EOR x2 @> 64-23, h, h
|
||||
MADD h, prime3, prime2, h
|
||||
|
||||
try2:
|
||||
TBZ $1, n, try1
|
||||
MOVHU.P 2(p), x3
|
||||
AND $255, x3, x1
|
||||
LSR $8, x3, x2
|
||||
|
||||
MUL prime5, x1
|
||||
ROR $64-11, h
|
||||
EOR x1 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
MUL prime5, x2
|
||||
ROR $64-11, h
|
||||
EOR x2 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
try1:
|
||||
TBZ $0, n, finalize
|
||||
MOVBU (p), x4
|
||||
|
||||
MUL prime5, x4
|
||||
ROR $64-11, h
|
||||
EOR x4 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
finalize:
|
||||
EOR h >> 33, h
|
||||
MUL prime2, h
|
||||
EOR h >> 29, h
|
||||
MUL prime3, h
|
||||
EOR h >> 32, h
|
||||
|
||||
MOVD h, ret+24(FP)
|
||||
RET
|
||||
|
||||
// func writeBlocks(d *Digest, b []byte) int
|
||||
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||||
LDP ·primes+0(SB), (prime1, prime2)
|
||||
|
||||
// Load state. Assume v[1-4] are stored contiguously.
|
||||
MOVD d+0(FP), digest
|
||||
LDP 0(digest), (v1, v2)
|
||||
LDP 16(digest), (v3, v4)
|
||||
|
||||
LDP b_base+8(FP), (p, n)
|
||||
|
||||
blockLoop()
|
||||
|
||||
// Store updated state.
|
||||
STP (v1, v2), 0(digest)
|
||||
STP (v3, v4), 16(digest)
|
||||
|
||||
BIC $31, n
|
||||
MOVD n, ret+32(FP)
|
||||
RET
|
||||
15
vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
generated
vendored
Normal file
15
vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
generated
vendored
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
//go:build (amd64 || arm64) && !appengine && gc && !purego
|
||||
// +build amd64 arm64
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
|
||||
//
|
||||
//go:noescape
|
||||
func Sum64(b []byte) uint64
|
||||
|
||||
//go:noescape
|
||||
func writeBlocks(d *Digest, b []byte) int
|
||||
76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
Normal file
76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
Normal file
|
|
@ -0,0 +1,76 @@
|
|||
//go:build (!amd64 && !arm64) || appengine || !gc || purego
|
||||
// +build !amd64,!arm64 appengine !gc purego
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
|
||||
func Sum64(b []byte) uint64 {
|
||||
// A simpler version would be
|
||||
// d := New()
|
||||
// d.Write(b)
|
||||
// return d.Sum64()
|
||||
// but this is faster, particularly for small inputs.
|
||||
|
||||
n := len(b)
|
||||
var h uint64
|
||||
|
||||
if n >= 32 {
|
||||
v1 := primes[0] + prime2
|
||||
v2 := prime2
|
||||
v3 := uint64(0)
|
||||
v4 := -primes[0]
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||
b = b[32:len(b):len(b)]
|
||||
}
|
||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||
h = mergeRound(h, v1)
|
||||
h = mergeRound(h, v2)
|
||||
h = mergeRound(h, v3)
|
||||
h = mergeRound(h, v4)
|
||||
} else {
|
||||
h = prime5
|
||||
}
|
||||
|
||||
h += uint64(n)
|
||||
|
||||
for ; len(b) >= 8; b = b[8:] {
|
||||
k1 := round(0, u64(b[:8]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if len(b) >= 4 {
|
||||
h ^= uint64(u32(b[:4])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
b = b[4:]
|
||||
}
|
||||
for ; len(b) > 0; b = b[1:] {
|
||||
h ^= uint64(b[0]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
h *= prime2
|
||||
h ^= h >> 29
|
||||
h *= prime3
|
||||
h ^= h >> 32
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func writeBlocks(d *Digest, b []byte) int {
|
||||
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
||||
n := len(b)
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||
b = b[32:len(b):len(b)]
|
||||
}
|
||||
d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
|
||||
return n - len(b)
|
||||
}
|
||||
16
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
Normal file
16
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
//go:build appengine
|
||||
// +build appengine
|
||||
|
||||
// This file contains the safe implementations of otherwise unsafe-using code.
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
|
||||
func Sum64String(s string) uint64 {
|
||||
return Sum64([]byte(s))
|
||||
}
|
||||
|
||||
// WriteString adds more data to d. It always returns len(s), nil.
|
||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||
return d.Write([]byte(s))
|
||||
}
|
||||
58
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
Normal file
58
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
//go:build !appengine
|
||||
// +build !appengine
|
||||
|
||||
// This file encapsulates usage of unsafe.
|
||||
// xxhash_safe.go contains the safe implementations.
|
||||
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// In the future it's possible that compiler optimizations will make these
|
||||
// XxxString functions unnecessary by realizing that calls such as
|
||||
// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205.
|
||||
// If that happens, even if we keep these functions they can be replaced with
|
||||
// the trivial safe code.
|
||||
|
||||
// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
|
||||
//
|
||||
// var b []byte
|
||||
// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
// bh.Len = len(s)
|
||||
// bh.Cap = len(s)
|
||||
//
|
||||
// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
|
||||
// weight to this sequence of expressions that any function that uses it will
|
||||
// not be inlined. Instead, the functions below use a different unsafe
|
||||
// conversion designed to minimize the inliner weight and allow both to be
|
||||
// inlined. There is also a test (TestInlining) which verifies that these are
|
||||
// inlined.
|
||||
//
|
||||
// See https://github.com/golang/go/issues/42739 for discussion.
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
|
||||
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
||||
func Sum64String(s string) uint64 {
|
||||
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
|
||||
return Sum64(b)
|
||||
}
|
||||
|
||||
// WriteString adds more data to d. It always returns len(s), nil.
|
||||
// It may be faster than Write([]byte(s)) by avoiding a copy.
|
||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||
d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
|
||||
// d.Write always returns len(s), nil.
|
||||
// Ignoring the return output and returning these fixed values buys a
|
||||
// savings of 6 in the inliner's cost model.
|
||||
return len(s), nil
|
||||
}
|
||||
|
||||
// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
|
||||
// of the first two words is the same as the layout of a string.
|
||||
type sliceHeader struct {
|
||||
s string
|
||||
cap int
|
||||
}
|
||||
12
vendor/github.com/disintegration/imaging/.travis.yml
generated
vendored
Normal file
12
vendor/github.com/disintegration/imaging/.travis.yml
generated
vendored
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
language: go
|
||||
go:
|
||||
- "1.10.x"
|
||||
- "1.11.x"
|
||||
- "1.12.x"
|
||||
|
||||
before_install:
|
||||
- go get github.com/mattn/goveralls
|
||||
|
||||
script:
|
||||
- go test -v -race -cover
|
||||
- $GOPATH/bin/goveralls -service=travis-ci
|
||||
21
vendor/github.com/disintegration/imaging/LICENSE
generated
vendored
Normal file
21
vendor/github.com/disintegration/imaging/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2012 Grigory Dryapak
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
226
vendor/github.com/disintegration/imaging/README.md
generated
vendored
Normal file
226
vendor/github.com/disintegration/imaging/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,226 @@
|
|||
# Imaging
|
||||
|
||||
[](https://godoc.org/github.com/disintegration/imaging)
|
||||
[](https://travis-ci.org/disintegration/imaging)
|
||||
[](https://coveralls.io/github/disintegration/imaging?branch=master)
|
||||
[](https://goreportcard.com/report/github.com/disintegration/imaging)
|
||||
|
||||
Package imaging provides basic image processing functions (resize, rotate, crop, brightness/contrast adjustments, etc.).
|
||||
|
||||
All the image processing functions provided by the package accept any image type that implements `image.Image` interface
|
||||
as an input, and return a new image of `*image.NRGBA` type (32bit RGBA colors, non-premultiplied alpha).
|
||||
|
||||
## Installation
|
||||
|
||||
go get -u github.com/disintegration/imaging
|
||||
|
||||
## Documentation
|
||||
|
||||
http://godoc.org/github.com/disintegration/imaging
|
||||
|
||||
## Usage examples
|
||||
|
||||
A few usage examples can be found below. See the documentation for the full list of supported functions.
|
||||
|
||||
### Image resizing
|
||||
|
||||
```go
|
||||
// Resize srcImage to size = 128x128px using the Lanczos filter.
|
||||
dstImage128 := imaging.Resize(srcImage, 128, 128, imaging.Lanczos)
|
||||
|
||||
// Resize srcImage to width = 800px preserving the aspect ratio.
|
||||
dstImage800 := imaging.Resize(srcImage, 800, 0, imaging.Lanczos)
|
||||
|
||||
// Scale down srcImage to fit the 800x600px bounding box.
|
||||
dstImageFit := imaging.Fit(srcImage, 800, 600, imaging.Lanczos)
|
||||
|
||||
// Resize and crop the srcImage to fill the 100x100px area.
|
||||
dstImageFill := imaging.Fill(srcImage, 100, 100, imaging.Center, imaging.Lanczos)
|
||||
```
|
||||
|
||||
Imaging supports image resizing using various resampling filters. The most notable ones:
|
||||
- `Lanczos` - A high-quality resampling filter for photographic images yielding sharp results.
|
||||
- `CatmullRom` - A sharp cubic filter that is faster than Lanczos filter while providing similar results.
|
||||
- `MitchellNetravali` - A cubic filter that produces smoother results with less ringing artifacts than CatmullRom.
|
||||
- `Linear` - Bilinear resampling filter, produces smooth output. Faster than cubic filters.
|
||||
- `Box` - Simple and fast averaging filter appropriate for downscaling. When upscaling it's similar to NearestNeighbor.
|
||||
- `NearestNeighbor` - Fastest resampling filter, no antialiasing.
|
||||
|
||||
The full list of supported filters: NearestNeighbor, Box, Linear, Hermite, MitchellNetravali, CatmullRom, BSpline, Gaussian, Lanczos, Hann, Hamming, Blackman, Bartlett, Welch, Cosine. Custom filters can be created using ResampleFilter struct.
|
||||
|
||||
**Resampling filters comparison**
|
||||
|
||||
Original image:
|
||||
|
||||

|
||||
|
||||
The same image resized from 600x400px to 150x100px using different resampling filters.
|
||||
From faster (lower quality) to slower (higher quality):
|
||||
|
||||
Filter | Resize result
|
||||
--------------------------|---------------------------------------------
|
||||
`imaging.NearestNeighbor` | 
|
||||
`imaging.Linear` | 
|
||||
`imaging.CatmullRom` | 
|
||||
`imaging.Lanczos` | 
|
||||
|
||||
|
||||
### Gaussian Blur
|
||||
|
||||
```go
|
||||
dstImage := imaging.Blur(srcImage, 0.5)
|
||||
```
|
||||
|
||||
Sigma parameter allows to control the strength of the blurring effect.
|
||||
|
||||
Original image | Sigma = 0.5 | Sigma = 1.5
|
||||
-----------------------------------|----------------------------------------|---------------------------------------
|
||||
 |  | 
|
||||
|
||||
### Sharpening
|
||||
|
||||
```go
|
||||
dstImage := imaging.Sharpen(srcImage, 0.5)
|
||||
```
|
||||
|
||||
`Sharpen` uses gaussian function internally. Sigma parameter allows to control the strength of the sharpening effect.
|
||||
|
||||
Original image | Sigma = 0.5 | Sigma = 1.5
|
||||
-----------------------------------|-------------------------------------------|------------------------------------------
|
||||
 |  | 
|
||||
|
||||
### Gamma correction
|
||||
|
||||
```go
|
||||
dstImage := imaging.AdjustGamma(srcImage, 0.75)
|
||||
```
|
||||
|
||||
Original image | Gamma = 0.75 | Gamma = 1.25
|
||||
-----------------------------------|------------------------------------------|-----------------------------------------
|
||||
 |  | 
|
||||
|
||||
### Contrast adjustment
|
||||
|
||||
```go
|
||||
dstImage := imaging.AdjustContrast(srcImage, 20)
|
||||
```
|
||||
|
||||
Original image | Contrast = 15 | Contrast = -15
|
||||
-----------------------------------|--------------------------------------------|-------------------------------------------
|
||||
 |  | 
|
||||
|
||||
### Brightness adjustment
|
||||
|
||||
```go
|
||||
dstImage := imaging.AdjustBrightness(srcImage, 20)
|
||||
```
|
||||
|
||||
Original image | Brightness = 10 | Brightness = -10
|
||||
-----------------------------------|----------------------------------------------|---------------------------------------------
|
||||
 |  | 
|
||||
|
||||
### Saturation adjustment
|
||||
|
||||
```go
|
||||
dstImage := imaging.AdjustSaturation(srcImage, 20)
|
||||
```
|
||||
|
||||
Original image | Saturation = 30 | Saturation = -30
|
||||
-----------------------------------|----------------------------------------------|---------------------------------------------
|
||||
 |  | 
|
||||
|
||||
## FAQ
|
||||
|
||||
### Incorrect image orientation after processing (e.g. an image appears rotated after resizing)
|
||||
|
||||
Most probably, the given image contains the EXIF orientation tag.
|
||||
The stadard `image/*` packages do not support loading and saving
|
||||
this kind of information. To fix the issue, try opening images with
|
||||
the `AutoOrientation` decode option. If this option is set to `true`,
|
||||
the image orientation is changed after decoding, according to the
|
||||
orientation tag (if present). Here's the example:
|
||||
|
||||
```go
|
||||
img, err := imaging.Open("test.jpg", imaging.AutoOrientation(true))
|
||||
```
|
||||
|
||||
### What's the difference between `imaging` and `gift` packages?
|
||||
|
||||
[imaging](https://github.com/disintegration/imaging)
|
||||
is designed to be a lightweight and simple image manipulation package.
|
||||
It provides basic image processing functions and a few helper functions
|
||||
such as `Open` and `Save`. It consistently returns *image.NRGBA image
|
||||
type (8 bits per channel, RGBA).
|
||||
|
||||
[gift](https://github.com/disintegration/gift)
|
||||
supports more advanced image processing, for example, sRGB/Linear color
|
||||
space conversions. It also supports different output image types
|
||||
(e.g. 16 bits per channel) and provides easy-to-use API for chaining
|
||||
multiple processing steps together.
|
||||
|
||||
## Example code
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"image"
|
||||
"image/color"
|
||||
"log"
|
||||
|
||||
"github.com/disintegration/imaging"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Open a test image.
|
||||
src, err := imaging.Open("testdata/flowers.png")
|
||||
if err != nil {
|
||||
log.Fatalf("failed to open image: %v", err)
|
||||
}
|
||||
|
||||
// Crop the original image to 300x300px size using the center anchor.
|
||||
src = imaging.CropAnchor(src, 300, 300, imaging.Center)
|
||||
|
||||
// Resize the cropped image to width = 200px preserving the aspect ratio.
|
||||
src = imaging.Resize(src, 200, 0, imaging.Lanczos)
|
||||
|
||||
// Create a blurred version of the image.
|
||||
img1 := imaging.Blur(src, 5)
|
||||
|
||||
// Create a grayscale version of the image with higher contrast and sharpness.
|
||||
img2 := imaging.Grayscale(src)
|
||||
img2 = imaging.AdjustContrast(img2, 20)
|
||||
img2 = imaging.Sharpen(img2, 2)
|
||||
|
||||
// Create an inverted version of the image.
|
||||
img3 := imaging.Invert(src)
|
||||
|
||||
// Create an embossed version of the image using a convolution filter.
|
||||
img4 := imaging.Convolve3x3(
|
||||
src,
|
||||
[9]float64{
|
||||
-1, -1, 0,
|
||||
-1, 1, 1,
|
||||
0, 1, 1,
|
||||
},
|
||||
nil,
|
||||
)
|
||||
|
||||
// Create a new image and paste the four produced images into it.
|
||||
dst := imaging.New(400, 400, color.NRGBA{0, 0, 0, 0})
|
||||
dst = imaging.Paste(dst, img1, image.Pt(0, 0))
|
||||
dst = imaging.Paste(dst, img2, image.Pt(0, 200))
|
||||
dst = imaging.Paste(dst, img3, image.Pt(200, 0))
|
||||
dst = imaging.Paste(dst, img4, image.Pt(200, 200))
|
||||
|
||||
// Save the resulting image as JPEG.
|
||||
err = imaging.Save(dst, "testdata/out_example.jpg")
|
||||
if err != nil {
|
||||
log.Fatalf("failed to save image: %v", err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Output:
|
||||
|
||||

|
||||
253
vendor/github.com/disintegration/imaging/adjust.go
generated
vendored
Normal file
253
vendor/github.com/disintegration/imaging/adjust.go
generated
vendored
Normal file
|
|
@ -0,0 +1,253 @@
|
|||
package imaging
|
||||
|
||||
import (
|
||||
"image"
|
||||
"image/color"
|
||||
"math"
|
||||
)
|
||||
|
||||
// Grayscale produces a grayscale version of the image.
|
||||
func Grayscale(img image.Image) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
|
||||
parallel(0, src.h, func(ys <-chan int) {
|
||||
for y := range ys {
|
||||
i := y * dst.Stride
|
||||
src.scan(0, y, src.w, y+1, dst.Pix[i:i+src.w*4])
|
||||
for x := 0; x < src.w; x++ {
|
||||
d := dst.Pix[i : i+3 : i+3]
|
||||
r := d[0]
|
||||
g := d[1]
|
||||
b := d[2]
|
||||
f := 0.299*float64(r) + 0.587*float64(g) + 0.114*float64(b)
|
||||
y := uint8(f + 0.5)
|
||||
d[0] = y
|
||||
d[1] = y
|
||||
d[2] = y
|
||||
i += 4
|
||||
}
|
||||
}
|
||||
})
|
||||
return dst
|
||||
}
|
||||
|
||||
// Invert produces an inverted (negated) version of the image.
|
||||
func Invert(img image.Image) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
|
||||
parallel(0, src.h, func(ys <-chan int) {
|
||||
for y := range ys {
|
||||
i := y * dst.Stride
|
||||
src.scan(0, y, src.w, y+1, dst.Pix[i:i+src.w*4])
|
||||
for x := 0; x < src.w; x++ {
|
||||
d := dst.Pix[i : i+3 : i+3]
|
||||
d[0] = 255 - d[0]
|
||||
d[1] = 255 - d[1]
|
||||
d[2] = 255 - d[2]
|
||||
i += 4
|
||||
}
|
||||
}
|
||||
})
|
||||
return dst
|
||||
}
|
||||
|
||||
// AdjustSaturation changes the saturation of the image using the percentage parameter and returns the adjusted image.
|
||||
// The percentage must be in the range (-100, 100).
|
||||
// The percentage = 0 gives the original image.
|
||||
// The percentage = 100 gives the image with the saturation value doubled for each pixel.
|
||||
// The percentage = -100 gives the image with the saturation value zeroed for each pixel (grayscale).
|
||||
//
|
||||
// Examples:
|
||||
// dstImage = imaging.AdjustSaturation(srcImage, 25) // Increase image saturation by 25%.
|
||||
// dstImage = imaging.AdjustSaturation(srcImage, -10) // Decrease image saturation by 10%.
|
||||
//
|
||||
func AdjustSaturation(img image.Image, percentage float64) *image.NRGBA {
|
||||
percentage = math.Min(math.Max(percentage, -100), 100)
|
||||
multiplier := 1 + percentage/100
|
||||
|
||||
return AdjustFunc(img, func(c color.NRGBA) color.NRGBA {
|
||||
h, s, l := rgbToHSL(c.R, c.G, c.B)
|
||||
s *= multiplier
|
||||
if s > 1 {
|
||||
s = 1
|
||||
}
|
||||
r, g, b := hslToRGB(h, s, l)
|
||||
return color.NRGBA{r, g, b, c.A}
|
||||
})
|
||||
}
|
||||
|
||||
// AdjustContrast changes the contrast of the image using the percentage parameter and returns the adjusted image.
|
||||
// The percentage must be in range (-100, 100). The percentage = 0 gives the original image.
|
||||
// The percentage = -100 gives solid gray image.
|
||||
//
|
||||
// Examples:
|
||||
//
|
||||
// dstImage = imaging.AdjustContrast(srcImage, -10) // Decrease image contrast by 10%.
|
||||
// dstImage = imaging.AdjustContrast(srcImage, 20) // Increase image contrast by 20%.
|
||||
//
|
||||
func AdjustContrast(img image.Image, percentage float64) *image.NRGBA {
|
||||
percentage = math.Min(math.Max(percentage, -100.0), 100.0)
|
||||
lut := make([]uint8, 256)
|
||||
|
||||
v := (100.0 + percentage) / 100.0
|
||||
for i := 0; i < 256; i++ {
|
||||
switch {
|
||||
case 0 <= v && v <= 1:
|
||||
lut[i] = clamp((0.5 + (float64(i)/255.0-0.5)*v) * 255.0)
|
||||
case 1 < v && v < 2:
|
||||
lut[i] = clamp((0.5 + (float64(i)/255.0-0.5)*(1/(2.0-v))) * 255.0)
|
||||
default:
|
||||
lut[i] = uint8(float64(i)/255.0+0.5) * 255
|
||||
}
|
||||
}
|
||||
|
||||
return adjustLUT(img, lut)
|
||||
}
|
||||
|
||||
// AdjustBrightness changes the brightness of the image using the percentage parameter and returns the adjusted image.
|
||||
// The percentage must be in range (-100, 100). The percentage = 0 gives the original image.
|
||||
// The percentage = -100 gives solid black image. The percentage = 100 gives solid white image.
|
||||
//
|
||||
// Examples:
|
||||
//
|
||||
// dstImage = imaging.AdjustBrightness(srcImage, -15) // Decrease image brightness by 15%.
|
||||
// dstImage = imaging.AdjustBrightness(srcImage, 10) // Increase image brightness by 10%.
|
||||
//
|
||||
func AdjustBrightness(img image.Image, percentage float64) *image.NRGBA {
|
||||
percentage = math.Min(math.Max(percentage, -100.0), 100.0)
|
||||
lut := make([]uint8, 256)
|
||||
|
||||
shift := 255.0 * percentage / 100.0
|
||||
for i := 0; i < 256; i++ {
|
||||
lut[i] = clamp(float64(i) + shift)
|
||||
}
|
||||
|
||||
return adjustLUT(img, lut)
|
||||
}
|
||||
|
||||
// AdjustGamma performs a gamma correction on the image and returns the adjusted image.
|
||||
// Gamma parameter must be positive. Gamma = 1.0 gives the original image.
|
||||
// Gamma less than 1.0 darkens the image and gamma greater than 1.0 lightens it.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// dstImage = imaging.AdjustGamma(srcImage, 0.7)
|
||||
//
|
||||
func AdjustGamma(img image.Image, gamma float64) *image.NRGBA {
|
||||
e := 1.0 / math.Max(gamma, 0.0001)
|
||||
lut := make([]uint8, 256)
|
||||
|
||||
for i := 0; i < 256; i++ {
|
||||
lut[i] = clamp(math.Pow(float64(i)/255.0, e) * 255.0)
|
||||
}
|
||||
|
||||
return adjustLUT(img, lut)
|
||||
}
|
||||
|
||||
// AdjustSigmoid changes the contrast of the image using a sigmoidal function and returns the adjusted image.
|
||||
// It's a non-linear contrast change useful for photo adjustments as it preserves highlight and shadow detail.
|
||||
// The midpoint parameter is the midpoint of contrast that must be between 0 and 1, typically 0.5.
|
||||
// The factor parameter indicates how much to increase or decrease the contrast, typically in range (-10, 10).
|
||||
// If the factor parameter is positive the image contrast is increased otherwise the contrast is decreased.
|
||||
//
|
||||
// Examples:
|
||||
//
|
||||
// dstImage = imaging.AdjustSigmoid(srcImage, 0.5, 3.0) // Increase the contrast.
|
||||
// dstImage = imaging.AdjustSigmoid(srcImage, 0.5, -3.0) // Decrease the contrast.
|
||||
//
|
||||
func AdjustSigmoid(img image.Image, midpoint, factor float64) *image.NRGBA {
|
||||
if factor == 0 {
|
||||
return Clone(img)
|
||||
}
|
||||
|
||||
lut := make([]uint8, 256)
|
||||
a := math.Min(math.Max(midpoint, 0.0), 1.0)
|
||||
b := math.Abs(factor)
|
||||
sig0 := sigmoid(a, b, 0)
|
||||
sig1 := sigmoid(a, b, 1)
|
||||
e := 1.0e-6
|
||||
|
||||
if factor > 0 {
|
||||
for i := 0; i < 256; i++ {
|
||||
x := float64(i) / 255.0
|
||||
sigX := sigmoid(a, b, x)
|
||||
f := (sigX - sig0) / (sig1 - sig0)
|
||||
lut[i] = clamp(f * 255.0)
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < 256; i++ {
|
||||
x := float64(i) / 255.0
|
||||
arg := math.Min(math.Max((sig1-sig0)*x+sig0, e), 1.0-e)
|
||||
f := a - math.Log(1.0/arg-1.0)/b
|
||||
lut[i] = clamp(f * 255.0)
|
||||
}
|
||||
}
|
||||
|
||||
return adjustLUT(img, lut)
|
||||
}
|
||||
|
||||
func sigmoid(a, b, x float64) float64 {
|
||||
return 1 / (1 + math.Exp(b*(a-x)))
|
||||
}
|
||||
|
||||
// adjustLUT applies the given lookup table to the colors of the image.
|
||||
func adjustLUT(img image.Image, lut []uint8) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
|
||||
lut = lut[0:256]
|
||||
parallel(0, src.h, func(ys <-chan int) {
|
||||
for y := range ys {
|
||||
i := y * dst.Stride
|
||||
src.scan(0, y, src.w, y+1, dst.Pix[i:i+src.w*4])
|
||||
for x := 0; x < src.w; x++ {
|
||||
d := dst.Pix[i : i+3 : i+3]
|
||||
d[0] = lut[d[0]]
|
||||
d[1] = lut[d[1]]
|
||||
d[2] = lut[d[2]]
|
||||
i += 4
|
||||
}
|
||||
}
|
||||
})
|
||||
return dst
|
||||
}
|
||||
|
||||
// AdjustFunc applies the fn function to each pixel of the img image and returns the adjusted image.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// dstImage = imaging.AdjustFunc(
|
||||
// srcImage,
|
||||
// func(c color.NRGBA) color.NRGBA {
|
||||
// // Shift the red channel by 16.
|
||||
// r := int(c.R) + 16
|
||||
// if r > 255 {
|
||||
// r = 255
|
||||
// }
|
||||
// return color.NRGBA{uint8(r), c.G, c.B, c.A}
|
||||
// }
|
||||
// )
|
||||
//
|
||||
func AdjustFunc(img image.Image, fn func(c color.NRGBA) color.NRGBA) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
|
||||
parallel(0, src.h, func(ys <-chan int) {
|
||||
for y := range ys {
|
||||
i := y * dst.Stride
|
||||
src.scan(0, y, src.w, y+1, dst.Pix[i:i+src.w*4])
|
||||
for x := 0; x < src.w; x++ {
|
||||
d := dst.Pix[i : i+4 : i+4]
|
||||
r := d[0]
|
||||
g := d[1]
|
||||
b := d[2]
|
||||
a := d[3]
|
||||
c := fn(color.NRGBA{r, g, b, a})
|
||||
d[0] = c.R
|
||||
d[1] = c.G
|
||||
d[2] = c.B
|
||||
d[3] = c.A
|
||||
i += 4
|
||||
}
|
||||
}
|
||||
})
|
||||
return dst
|
||||
}
|
||||
148
vendor/github.com/disintegration/imaging/convolution.go
generated
vendored
Normal file
148
vendor/github.com/disintegration/imaging/convolution.go
generated
vendored
Normal file
|
|
@ -0,0 +1,148 @@
|
|||
package imaging
|
||||
|
||||
import (
|
||||
"image"
|
||||
)
|
||||
|
||||
// ConvolveOptions are convolution parameters.
|
||||
type ConvolveOptions struct {
|
||||
// If Normalize is true the kernel is normalized before convolution.
|
||||
Normalize bool
|
||||
|
||||
// If Abs is true the absolute value of each color channel is taken after convolution.
|
||||
Abs bool
|
||||
|
||||
// Bias is added to each color channel value after convolution.
|
||||
Bias int
|
||||
}
|
||||
|
||||
// Convolve3x3 convolves the image with the specified 3x3 convolution kernel.
|
||||
// Default parameters are used if a nil *ConvolveOptions is passed.
|
||||
func Convolve3x3(img image.Image, kernel [9]float64, options *ConvolveOptions) *image.NRGBA {
|
||||
return convolve(img, kernel[:], options)
|
||||
}
|
||||
|
||||
// Convolve5x5 convolves the image with the specified 5x5 convolution kernel.
|
||||
// Default parameters are used if a nil *ConvolveOptions is passed.
|
||||
func Convolve5x5(img image.Image, kernel [25]float64, options *ConvolveOptions) *image.NRGBA {
|
||||
return convolve(img, kernel[:], options)
|
||||
}
|
||||
|
||||
func convolve(img image.Image, kernel []float64, options *ConvolveOptions) *image.NRGBA {
|
||||
src := toNRGBA(img)
|
||||
w := src.Bounds().Max.X
|
||||
h := src.Bounds().Max.Y
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, w, h))
|
||||
|
||||
if w < 1 || h < 1 {
|
||||
return dst
|
||||
}
|
||||
|
||||
if options == nil {
|
||||
options = &ConvolveOptions{}
|
||||
}
|
||||
|
||||
if options.Normalize {
|
||||
normalizeKernel(kernel)
|
||||
}
|
||||
|
||||
type coef struct {
|
||||
x, y int
|
||||
k float64
|
||||
}
|
||||
var coefs []coef
|
||||
var m int
|
||||
|
||||
switch len(kernel) {
|
||||
case 9:
|
||||
m = 1
|
||||
case 25:
|
||||
m = 2
|
||||
}
|
||||
|
||||
i := 0
|
||||
for y := -m; y <= m; y++ {
|
||||
for x := -m; x <= m; x++ {
|
||||
if kernel[i] != 0 {
|
||||
coefs = append(coefs, coef{x: x, y: y, k: kernel[i]})
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
parallel(0, h, func(ys <-chan int) {
|
||||
for y := range ys {
|
||||
for x := 0; x < w; x++ {
|
||||
var r, g, b float64
|
||||
for _, c := range coefs {
|
||||
ix := x + c.x
|
||||
if ix < 0 {
|
||||
ix = 0
|
||||
} else if ix >= w {
|
||||
ix = w - 1
|
||||
}
|
||||
|
||||
iy := y + c.y
|
||||
if iy < 0 {
|
||||
iy = 0
|
||||
} else if iy >= h {
|
||||
iy = h - 1
|
||||
}
|
||||
|
||||
off := iy*src.Stride + ix*4
|
||||
s := src.Pix[off : off+3 : off+3]
|
||||
r += float64(s[0]) * c.k
|
||||
g += float64(s[1]) * c.k
|
||||
b += float64(s[2]) * c.k
|
||||
}
|
||||
|
||||
if options.Abs {
|
||||
if r < 0 {
|
||||
r = -r
|
||||
}
|
||||
if g < 0 {
|
||||
g = -g
|
||||
}
|
||||
if b < 0 {
|
||||
b = -b
|
||||
}
|
||||
}
|
||||
|
||||
if options.Bias != 0 {
|
||||
r += float64(options.Bias)
|
||||
g += float64(options.Bias)
|
||||
b += float64(options.Bias)
|
||||
}
|
||||
|
||||
srcOff := y*src.Stride + x*4
|
||||
dstOff := y*dst.Stride + x*4
|
||||
d := dst.Pix[dstOff : dstOff+4 : dstOff+4]
|
||||
d[0] = clamp(r)
|
||||
d[1] = clamp(g)
|
||||
d[2] = clamp(b)
|
||||
d[3] = src.Pix[srcOff+3]
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
func normalizeKernel(kernel []float64) {
|
||||
var sum, sumpos float64
|
||||
for i := range kernel {
|
||||
sum += kernel[i]
|
||||
if kernel[i] > 0 {
|
||||
sumpos += kernel[i]
|
||||
}
|
||||
}
|
||||
if sum != 0 {
|
||||
for i := range kernel {
|
||||
kernel[i] /= sum
|
||||
}
|
||||
} else if sumpos != 0 {
|
||||
for i := range kernel {
|
||||
kernel[i] /= sumpos
|
||||
}
|
||||
}
|
||||
}
|
||||
7
vendor/github.com/disintegration/imaging/doc.go
generated
vendored
Normal file
7
vendor/github.com/disintegration/imaging/doc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
/*
|
||||
Package imaging provides basic image processing functions (resize, rotate, crop, brightness/contrast adjustments, etc.).
|
||||
|
||||
All the image processing functions provided by the package accept any image type that implements image.Image interface
|
||||
as an input, and return a new image of *image.NRGBA type (32bit RGBA colors, non-premultiplied alpha).
|
||||
*/
|
||||
package imaging
|
||||
169
vendor/github.com/disintegration/imaging/effects.go
generated
vendored
Normal file
169
vendor/github.com/disintegration/imaging/effects.go
generated
vendored
Normal file
|
|
@ -0,0 +1,169 @@
|
|||
package imaging
|
||||
|
||||
import (
|
||||
"image"
|
||||
"math"
|
||||
)
|
||||
|
||||
func gaussianBlurKernel(x, sigma float64) float64 {
|
||||
return math.Exp(-(x*x)/(2*sigma*sigma)) / (sigma * math.Sqrt(2*math.Pi))
|
||||
}
|
||||
|
||||
// Blur produces a blurred version of the image using a Gaussian function.
|
||||
// Sigma parameter must be positive and indicates how much the image will be blurred.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// dstImage := imaging.Blur(srcImage, 3.5)
|
||||
//
|
||||
func Blur(img image.Image, sigma float64) *image.NRGBA {
|
||||
if sigma <= 0 {
|
||||
return Clone(img)
|
||||
}
|
||||
|
||||
radius := int(math.Ceil(sigma * 3.0))
|
||||
kernel := make([]float64, radius+1)
|
||||
|
||||
for i := 0; i <= radius; i++ {
|
||||
kernel[i] = gaussianBlurKernel(float64(i), sigma)
|
||||
}
|
||||
|
||||
return blurVertical(blurHorizontal(img, kernel), kernel)
|
||||
}
|
||||
|
||||
func blurHorizontal(img image.Image, kernel []float64) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
|
||||
radius := len(kernel) - 1
|
||||
|
||||
parallel(0, src.h, func(ys <-chan int) {
|
||||
scanLine := make([]uint8, src.w*4)
|
||||
scanLineF := make([]float64, len(scanLine))
|
||||
for y := range ys {
|
||||
src.scan(0, y, src.w, y+1, scanLine)
|
||||
for i, v := range scanLine {
|
||||
scanLineF[i] = float64(v)
|
||||
}
|
||||
for x := 0; x < src.w; x++ {
|
||||
min := x - radius
|
||||
if min < 0 {
|
||||
min = 0
|
||||
}
|
||||
max := x + radius
|
||||
if max > src.w-1 {
|
||||
max = src.w - 1
|
||||
}
|
||||
var r, g, b, a, wsum float64
|
||||
for ix := min; ix <= max; ix++ {
|
||||
i := ix * 4
|
||||
weight := kernel[absint(x-ix)]
|
||||
wsum += weight
|
||||
s := scanLineF[i : i+4 : i+4]
|
||||
wa := s[3] * weight
|
||||
r += s[0] * wa
|
||||
g += s[1] * wa
|
||||
b += s[2] * wa
|
||||
a += wa
|
||||
}
|
||||
if a != 0 {
|
||||
aInv := 1 / a
|
||||
j := y*dst.Stride + x*4
|
||||
d := dst.Pix[j : j+4 : j+4]
|
||||
d[0] = clamp(r * aInv)
|
||||
d[1] = clamp(g * aInv)
|
||||
d[2] = clamp(b * aInv)
|
||||
d[3] = clamp(a / wsum)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
func blurVertical(img image.Image, kernel []float64) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
|
||||
radius := len(kernel) - 1
|
||||
|
||||
parallel(0, src.w, func(xs <-chan int) {
|
||||
scanLine := make([]uint8, src.h*4)
|
||||
scanLineF := make([]float64, len(scanLine))
|
||||
for x := range xs {
|
||||
src.scan(x, 0, x+1, src.h, scanLine)
|
||||
for i, v := range scanLine {
|
||||
scanLineF[i] = float64(v)
|
||||
}
|
||||
for y := 0; y < src.h; y++ {
|
||||
min := y - radius
|
||||
if min < 0 {
|
||||
min = 0
|
||||
}
|
||||
max := y + radius
|
||||
if max > src.h-1 {
|
||||
max = src.h - 1
|
||||
}
|
||||
var r, g, b, a, wsum float64
|
||||
for iy := min; iy <= max; iy++ {
|
||||
i := iy * 4
|
||||
weight := kernel[absint(y-iy)]
|
||||
wsum += weight
|
||||
s := scanLineF[i : i+4 : i+4]
|
||||
wa := s[3] * weight
|
||||
r += s[0] * wa
|
||||
g += s[1] * wa
|
||||
b += s[2] * wa
|
||||
a += wa
|
||||
}
|
||||
if a != 0 {
|
||||
aInv := 1 / a
|
||||
j := y*dst.Stride + x*4
|
||||
d := dst.Pix[j : j+4 : j+4]
|
||||
d[0] = clamp(r * aInv)
|
||||
d[1] = clamp(g * aInv)
|
||||
d[2] = clamp(b * aInv)
|
||||
d[3] = clamp(a / wsum)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
// Sharpen produces a sharpened version of the image.
|
||||
// Sigma parameter must be positive and indicates how much the image will be sharpened.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// dstImage := imaging.Sharpen(srcImage, 3.5)
|
||||
//
|
||||
func Sharpen(img image.Image, sigma float64) *image.NRGBA {
|
||||
if sigma <= 0 {
|
||||
return Clone(img)
|
||||
}
|
||||
|
||||
src := newScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
|
||||
blurred := Blur(img, sigma)
|
||||
|
||||
parallel(0, src.h, func(ys <-chan int) {
|
||||
scanLine := make([]uint8, src.w*4)
|
||||
for y := range ys {
|
||||
src.scan(0, y, src.w, y+1, scanLine)
|
||||
j := y * dst.Stride
|
||||
for i := 0; i < src.w*4; i++ {
|
||||
val := int(scanLine[i])<<1 - int(blurred.Pix[j])
|
||||
if val < 0 {
|
||||
val = 0
|
||||
} else if val > 0xff {
|
||||
val = 0xff
|
||||
}
|
||||
dst.Pix[j] = uint8(val)
|
||||
j++
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
return dst
|
||||
}
|
||||
52
vendor/github.com/disintegration/imaging/histogram.go
generated
vendored
Normal file
52
vendor/github.com/disintegration/imaging/histogram.go
generated
vendored
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
package imaging
|
||||
|
||||
import (
|
||||
"image"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Histogram returns a normalized histogram of an image.
|
||||
//
|
||||
// Resulting histogram is represented as an array of 256 floats, where
|
||||
// histogram[i] is a probability of a pixel being of a particular luminance i.
|
||||
func Histogram(img image.Image) [256]float64 {
|
||||
var mu sync.Mutex
|
||||
var histogram [256]float64
|
||||
var total float64
|
||||
|
||||
src := newScanner(img)
|
||||
if src.w == 0 || src.h == 0 {
|
||||
return histogram
|
||||
}
|
||||
|
||||
parallel(0, src.h, func(ys <-chan int) {
|
||||
var tmpHistogram [256]float64
|
||||
var tmpTotal float64
|
||||
scanLine := make([]uint8, src.w*4)
|
||||
for y := range ys {
|
||||
src.scan(0, y, src.w, y+1, scanLine)
|
||||
i := 0
|
||||
for x := 0; x < src.w; x++ {
|
||||
s := scanLine[i : i+3 : i+3]
|
||||
r := s[0]
|
||||
g := s[1]
|
||||
b := s[2]
|
||||
y := 0.299*float32(r) + 0.587*float32(g) + 0.114*float32(b)
|
||||
tmpHistogram[int(y+0.5)]++
|
||||
tmpTotal++
|
||||
i += 4
|
||||
}
|
||||
}
|
||||
mu.Lock()
|
||||
for i := 0; i < 256; i++ {
|
||||
histogram[i] += tmpHistogram[i]
|
||||
}
|
||||
total += tmpTotal
|
||||
mu.Unlock()
|
||||
})
|
||||
|
||||
for i := 0; i < 256; i++ {
|
||||
histogram[i] = histogram[i] / total
|
||||
}
|
||||
return histogram
|
||||
}
|
||||
444
vendor/github.com/disintegration/imaging/io.go
generated
vendored
Normal file
444
vendor/github.com/disintegration/imaging/io.go
generated
vendored
Normal file
|
|
@ -0,0 +1,444 @@
|
|||
package imaging
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"image"
|
||||
"image/draw"
|
||||
"image/gif"
|
||||
"image/jpeg"
|
||||
"image/png"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/image/bmp"
|
||||
"golang.org/x/image/tiff"
|
||||
)
|
||||
|
||||
type fileSystem interface {
|
||||
Create(string) (io.WriteCloser, error)
|
||||
Open(string) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
type localFS struct{}
|
||||
|
||||
func (localFS) Create(name string) (io.WriteCloser, error) { return os.Create(name) }
|
||||
func (localFS) Open(name string) (io.ReadCloser, error) { return os.Open(name) }
|
||||
|
||||
var fs fileSystem = localFS{}
|
||||
|
||||
type decodeConfig struct {
|
||||
autoOrientation bool
|
||||
}
|
||||
|
||||
var defaultDecodeConfig = decodeConfig{
|
||||
autoOrientation: false,
|
||||
}
|
||||
|
||||
// DecodeOption sets an optional parameter for the Decode and Open functions.
|
||||
type DecodeOption func(*decodeConfig)
|
||||
|
||||
// AutoOrientation returns a DecodeOption that sets the auto-orientation mode.
|
||||
// If auto-orientation is enabled, the image will be transformed after decoding
|
||||
// according to the EXIF orientation tag (if present). By default it's disabled.
|
||||
func AutoOrientation(enabled bool) DecodeOption {
|
||||
return func(c *decodeConfig) {
|
||||
c.autoOrientation = enabled
|
||||
}
|
||||
}
|
||||
|
||||
// Decode reads an image from r.
|
||||
func Decode(r io.Reader, opts ...DecodeOption) (image.Image, error) {
|
||||
cfg := defaultDecodeConfig
|
||||
for _, option := range opts {
|
||||
option(&cfg)
|
||||
}
|
||||
|
||||
if !cfg.autoOrientation {
|
||||
img, _, err := image.Decode(r)
|
||||
return img, err
|
||||
}
|
||||
|
||||
var orient orientation
|
||||
pr, pw := io.Pipe()
|
||||
r = io.TeeReader(r, pw)
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
orient = readOrientation(pr)
|
||||
io.Copy(ioutil.Discard, pr)
|
||||
}()
|
||||
|
||||
img, _, err := image.Decode(r)
|
||||
pw.Close()
|
||||
<-done
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return fixOrientation(img, orient), nil
|
||||
}
|
||||
|
||||
// Open loads an image from file.
|
||||
//
|
||||
// Examples:
|
||||
//
|
||||
// // Load an image from file.
|
||||
// img, err := imaging.Open("test.jpg")
|
||||
//
|
||||
// // Load an image and transform it depending on the EXIF orientation tag (if present).
|
||||
// img, err := imaging.Open("test.jpg", imaging.AutoOrientation(true))
|
||||
//
|
||||
func Open(filename string, opts ...DecodeOption) (image.Image, error) {
|
||||
file, err := fs.Open(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
return Decode(file, opts...)
|
||||
}
|
||||
|
||||
// Format is an image file format.
|
||||
type Format int
|
||||
|
||||
// Image file formats.
|
||||
const (
|
||||
JPEG Format = iota
|
||||
PNG
|
||||
GIF
|
||||
TIFF
|
||||
BMP
|
||||
)
|
||||
|
||||
var formatExts = map[string]Format{
|
||||
"jpg": JPEG,
|
||||
"jpeg": JPEG,
|
||||
"png": PNG,
|
||||
"gif": GIF,
|
||||
"tif": TIFF,
|
||||
"tiff": TIFF,
|
||||
"bmp": BMP,
|
||||
}
|
||||
|
||||
var formatNames = map[Format]string{
|
||||
JPEG: "JPEG",
|
||||
PNG: "PNG",
|
||||
GIF: "GIF",
|
||||
TIFF: "TIFF",
|
||||
BMP: "BMP",
|
||||
}
|
||||
|
||||
func (f Format) String() string {
|
||||
return formatNames[f]
|
||||
}
|
||||
|
||||
// ErrUnsupportedFormat means the given image format is not supported.
|
||||
var ErrUnsupportedFormat = errors.New("imaging: unsupported image format")
|
||||
|
||||
// FormatFromExtension parses image format from filename extension:
|
||||
// "jpg" (or "jpeg"), "png", "gif", "tif" (or "tiff") and "bmp" are supported.
|
||||
func FormatFromExtension(ext string) (Format, error) {
|
||||
if f, ok := formatExts[strings.ToLower(strings.TrimPrefix(ext, "."))]; ok {
|
||||
return f, nil
|
||||
}
|
||||
return -1, ErrUnsupportedFormat
|
||||
}
|
||||
|
||||
// FormatFromFilename parses image format from filename:
|
||||
// "jpg" (or "jpeg"), "png", "gif", "tif" (or "tiff") and "bmp" are supported.
|
||||
func FormatFromFilename(filename string) (Format, error) {
|
||||
ext := filepath.Ext(filename)
|
||||
return FormatFromExtension(ext)
|
||||
}
|
||||
|
||||
type encodeConfig struct {
|
||||
jpegQuality int
|
||||
gifNumColors int
|
||||
gifQuantizer draw.Quantizer
|
||||
gifDrawer draw.Drawer
|
||||
pngCompressionLevel png.CompressionLevel
|
||||
}
|
||||
|
||||
var defaultEncodeConfig = encodeConfig{
|
||||
jpegQuality: 95,
|
||||
gifNumColors: 256,
|
||||
gifQuantizer: nil,
|
||||
gifDrawer: nil,
|
||||
pngCompressionLevel: png.DefaultCompression,
|
||||
}
|
||||
|
||||
// EncodeOption sets an optional parameter for the Encode and Save functions.
|
||||
type EncodeOption func(*encodeConfig)
|
||||
|
||||
// JPEGQuality returns an EncodeOption that sets the output JPEG quality.
|
||||
// Quality ranges from 1 to 100 inclusive, higher is better. Default is 95.
|
||||
func JPEGQuality(quality int) EncodeOption {
|
||||
return func(c *encodeConfig) {
|
||||
c.jpegQuality = quality
|
||||
}
|
||||
}
|
||||
|
||||
// GIFNumColors returns an EncodeOption that sets the maximum number of colors
|
||||
// used in the GIF-encoded image. It ranges from 1 to 256. Default is 256.
|
||||
func GIFNumColors(numColors int) EncodeOption {
|
||||
return func(c *encodeConfig) {
|
||||
c.gifNumColors = numColors
|
||||
}
|
||||
}
|
||||
|
||||
// GIFQuantizer returns an EncodeOption that sets the quantizer that is used to produce
|
||||
// a palette of the GIF-encoded image.
|
||||
func GIFQuantizer(quantizer draw.Quantizer) EncodeOption {
|
||||
return func(c *encodeConfig) {
|
||||
c.gifQuantizer = quantizer
|
||||
}
|
||||
}
|
||||
|
||||
// GIFDrawer returns an EncodeOption that sets the drawer that is used to convert
|
||||
// the source image to the desired palette of the GIF-encoded image.
|
||||
func GIFDrawer(drawer draw.Drawer) EncodeOption {
|
||||
return func(c *encodeConfig) {
|
||||
c.gifDrawer = drawer
|
||||
}
|
||||
}
|
||||
|
||||
// PNGCompressionLevel returns an EncodeOption that sets the compression level
|
||||
// of the PNG-encoded image. Default is png.DefaultCompression.
|
||||
func PNGCompressionLevel(level png.CompressionLevel) EncodeOption {
|
||||
return func(c *encodeConfig) {
|
||||
c.pngCompressionLevel = level
|
||||
}
|
||||
}
|
||||
|
||||
// Encode writes the image img to w in the specified format (JPEG, PNG, GIF, TIFF or BMP).
|
||||
func Encode(w io.Writer, img image.Image, format Format, opts ...EncodeOption) error {
|
||||
cfg := defaultEncodeConfig
|
||||
for _, option := range opts {
|
||||
option(&cfg)
|
||||
}
|
||||
|
||||
switch format {
|
||||
case JPEG:
|
||||
if nrgba, ok := img.(*image.NRGBA); ok && nrgba.Opaque() {
|
||||
rgba := &image.RGBA{
|
||||
Pix: nrgba.Pix,
|
||||
Stride: nrgba.Stride,
|
||||
Rect: nrgba.Rect,
|
||||
}
|
||||
return jpeg.Encode(w, rgba, &jpeg.Options{Quality: cfg.jpegQuality})
|
||||
}
|
||||
return jpeg.Encode(w, img, &jpeg.Options{Quality: cfg.jpegQuality})
|
||||
|
||||
case PNG:
|
||||
encoder := png.Encoder{CompressionLevel: cfg.pngCompressionLevel}
|
||||
return encoder.Encode(w, img)
|
||||
|
||||
case GIF:
|
||||
return gif.Encode(w, img, &gif.Options{
|
||||
NumColors: cfg.gifNumColors,
|
||||
Quantizer: cfg.gifQuantizer,
|
||||
Drawer: cfg.gifDrawer,
|
||||
})
|
||||
|
||||
case TIFF:
|
||||
return tiff.Encode(w, img, &tiff.Options{Compression: tiff.Deflate, Predictor: true})
|
||||
|
||||
case BMP:
|
||||
return bmp.Encode(w, img)
|
||||
}
|
||||
|
||||
return ErrUnsupportedFormat
|
||||
}
|
||||
|
||||
// Save saves the image to file with the specified filename.
|
||||
// The format is determined from the filename extension:
|
||||
// "jpg" (or "jpeg"), "png", "gif", "tif" (or "tiff") and "bmp" are supported.
|
||||
//
|
||||
// Examples:
|
||||
//
|
||||
// // Save the image as PNG.
|
||||
// err := imaging.Save(img, "out.png")
|
||||
//
|
||||
// // Save the image as JPEG with optional quality parameter set to 80.
|
||||
// err := imaging.Save(img, "out.jpg", imaging.JPEGQuality(80))
|
||||
//
|
||||
func Save(img image.Image, filename string, opts ...EncodeOption) (err error) {
|
||||
f, err := FormatFromFilename(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
file, err := fs.Create(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = Encode(file, img, f, opts...)
|
||||
errc := file.Close()
|
||||
if err == nil {
|
||||
err = errc
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// orientation is an EXIF flag that specifies the transformation
|
||||
// that should be applied to image to display it correctly.
|
||||
type orientation int
|
||||
|
||||
const (
|
||||
orientationUnspecified = 0
|
||||
orientationNormal = 1
|
||||
orientationFlipH = 2
|
||||
orientationRotate180 = 3
|
||||
orientationFlipV = 4
|
||||
orientationTranspose = 5
|
||||
orientationRotate270 = 6
|
||||
orientationTransverse = 7
|
||||
orientationRotate90 = 8
|
||||
)
|
||||
|
||||
// readOrientation tries to read the orientation EXIF flag from image data in r.
|
||||
// If the EXIF data block is not found or the orientation flag is not found
|
||||
// or any other error occures while reading the data, it returns the
|
||||
// orientationUnspecified (0) value.
|
||||
func readOrientation(r io.Reader) orientation {
|
||||
const (
|
||||
markerSOI = 0xffd8
|
||||
markerAPP1 = 0xffe1
|
||||
exifHeader = 0x45786966
|
||||
byteOrderBE = 0x4d4d
|
||||
byteOrderLE = 0x4949
|
||||
orientationTag = 0x0112
|
||||
)
|
||||
|
||||
// Check if JPEG SOI marker is present.
|
||||
var soi uint16
|
||||
if err := binary.Read(r, binary.BigEndian, &soi); err != nil {
|
||||
return orientationUnspecified
|
||||
}
|
||||
if soi != markerSOI {
|
||||
return orientationUnspecified // Missing JPEG SOI marker.
|
||||
}
|
||||
|
||||
// Find JPEG APP1 marker.
|
||||
for {
|
||||
var marker, size uint16
|
||||
if err := binary.Read(r, binary.BigEndian, &marker); err != nil {
|
||||
return orientationUnspecified
|
||||
}
|
||||
if err := binary.Read(r, binary.BigEndian, &size); err != nil {
|
||||
return orientationUnspecified
|
||||
}
|
||||
if marker>>8 != 0xff {
|
||||
return orientationUnspecified // Invalid JPEG marker.
|
||||
}
|
||||
if marker == markerAPP1 {
|
||||
break
|
||||
}
|
||||
if size < 2 {
|
||||
return orientationUnspecified // Invalid block size.
|
||||
}
|
||||
if _, err := io.CopyN(ioutil.Discard, r, int64(size-2)); err != nil {
|
||||
return orientationUnspecified
|
||||
}
|
||||
}
|
||||
|
||||
// Check if EXIF header is present.
|
||||
var header uint32
|
||||
if err := binary.Read(r, binary.BigEndian, &header); err != nil {
|
||||
return orientationUnspecified
|
||||
}
|
||||
if header != exifHeader {
|
||||
return orientationUnspecified
|
||||
}
|
||||
if _, err := io.CopyN(ioutil.Discard, r, 2); err != nil {
|
||||
return orientationUnspecified
|
||||
}
|
||||
|
||||
// Read byte order information.
|
||||
var (
|
||||
byteOrderTag uint16
|
||||
byteOrder binary.ByteOrder
|
||||
)
|
||||
if err := binary.Read(r, binary.BigEndian, &byteOrderTag); err != nil {
|
||||
return orientationUnspecified
|
||||
}
|
||||
switch byteOrderTag {
|
||||
case byteOrderBE:
|
||||
byteOrder = binary.BigEndian
|
||||
case byteOrderLE:
|
||||
byteOrder = binary.LittleEndian
|
||||
default:
|
||||
return orientationUnspecified // Invalid byte order flag.
|
||||
}
|
||||
if _, err := io.CopyN(ioutil.Discard, r, 2); err != nil {
|
||||
return orientationUnspecified
|
||||
}
|
||||
|
||||
// Skip the EXIF offset.
|
||||
var offset uint32
|
||||
if err := binary.Read(r, byteOrder, &offset); err != nil {
|
||||
return orientationUnspecified
|
||||
}
|
||||
if offset < 8 {
|
||||
return orientationUnspecified // Invalid offset value.
|
||||
}
|
||||
if _, err := io.CopyN(ioutil.Discard, r, int64(offset-8)); err != nil {
|
||||
return orientationUnspecified
|
||||
}
|
||||
|
||||
// Read the number of tags.
|
||||
var numTags uint16
|
||||
if err := binary.Read(r, byteOrder, &numTags); err != nil {
|
||||
return orientationUnspecified
|
||||
}
|
||||
|
||||
// Find the orientation tag.
|
||||
for i := 0; i < int(numTags); i++ {
|
||||
var tag uint16
|
||||
if err := binary.Read(r, byteOrder, &tag); err != nil {
|
||||
return orientationUnspecified
|
||||
}
|
||||
if tag != orientationTag {
|
||||
if _, err := io.CopyN(ioutil.Discard, r, 10); err != nil {
|
||||
return orientationUnspecified
|
||||
}
|
||||
continue
|
||||
}
|
||||
if _, err := io.CopyN(ioutil.Discard, r, 6); err != nil {
|
||||
return orientationUnspecified
|
||||
}
|
||||
var val uint16
|
||||
if err := binary.Read(r, byteOrder, &val); err != nil {
|
||||
return orientationUnspecified
|
||||
}
|
||||
if val < 1 || val > 8 {
|
||||
return orientationUnspecified // Invalid tag value.
|
||||
}
|
||||
return orientation(val)
|
||||
}
|
||||
return orientationUnspecified // Missing orientation tag.
|
||||
}
|
||||
|
||||
// fixOrientation applies a transform to img corresponding to the given orientation flag.
|
||||
func fixOrientation(img image.Image, o orientation) image.Image {
|
||||
switch o {
|
||||
case orientationNormal:
|
||||
case orientationFlipH:
|
||||
img = FlipH(img)
|
||||
case orientationFlipV:
|
||||
img = FlipV(img)
|
||||
case orientationRotate90:
|
||||
img = Rotate90(img)
|
||||
case orientationRotate180:
|
||||
img = Rotate180(img)
|
||||
case orientationRotate270:
|
||||
img = Rotate270(img)
|
||||
case orientationTranspose:
|
||||
img = Transpose(img)
|
||||
case orientationTransverse:
|
||||
img = Transverse(img)
|
||||
}
|
||||
return img
|
||||
}
|
||||
595
vendor/github.com/disintegration/imaging/resize.go
generated
vendored
Normal file
595
vendor/github.com/disintegration/imaging/resize.go
generated
vendored
Normal file
|
|
@ -0,0 +1,595 @@
|
|||
package imaging
|
||||
|
||||
import (
|
||||
"image"
|
||||
"math"
|
||||
)
|
||||
|
||||
type indexWeight struct {
|
||||
index int
|
||||
weight float64
|
||||
}
|
||||
|
||||
func precomputeWeights(dstSize, srcSize int, filter ResampleFilter) [][]indexWeight {
|
||||
du := float64(srcSize) / float64(dstSize)
|
||||
scale := du
|
||||
if scale < 1.0 {
|
||||
scale = 1.0
|
||||
}
|
||||
ru := math.Ceil(scale * filter.Support)
|
||||
|
||||
out := make([][]indexWeight, dstSize)
|
||||
tmp := make([]indexWeight, 0, dstSize*int(ru+2)*2)
|
||||
|
||||
for v := 0; v < dstSize; v++ {
|
||||
fu := (float64(v)+0.5)*du - 0.5
|
||||
|
||||
begin := int(math.Ceil(fu - ru))
|
||||
if begin < 0 {
|
||||
begin = 0
|
||||
}
|
||||
end := int(math.Floor(fu + ru))
|
||||
if end > srcSize-1 {
|
||||
end = srcSize - 1
|
||||
}
|
||||
|
||||
var sum float64
|
||||
for u := begin; u <= end; u++ {
|
||||
w := filter.Kernel((float64(u) - fu) / scale)
|
||||
if w != 0 {
|
||||
sum += w
|
||||
tmp = append(tmp, indexWeight{index: u, weight: w})
|
||||
}
|
||||
}
|
||||
if sum != 0 {
|
||||
for i := range tmp {
|
||||
tmp[i].weight /= sum
|
||||
}
|
||||
}
|
||||
|
||||
out[v] = tmp
|
||||
tmp = tmp[len(tmp):]
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// Resize resizes the image to the specified width and height using the specified resampling
|
||||
// filter and returns the transformed image. If one of width or height is 0, the image aspect
|
||||
// ratio is preserved.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// dstImage := imaging.Resize(srcImage, 800, 600, imaging.Lanczos)
|
||||
//
|
||||
func Resize(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA {
|
||||
dstW, dstH := width, height
|
||||
if dstW < 0 || dstH < 0 {
|
||||
return &image.NRGBA{}
|
||||
}
|
||||
if dstW == 0 && dstH == 0 {
|
||||
return &image.NRGBA{}
|
||||
}
|
||||
|
||||
srcW := img.Bounds().Dx()
|
||||
srcH := img.Bounds().Dy()
|
||||
if srcW <= 0 || srcH <= 0 {
|
||||
return &image.NRGBA{}
|
||||
}
|
||||
|
||||
// If new width or height is 0 then preserve aspect ratio, minimum 1px.
|
||||
if dstW == 0 {
|
||||
tmpW := float64(dstH) * float64(srcW) / float64(srcH)
|
||||
dstW = int(math.Max(1.0, math.Floor(tmpW+0.5)))
|
||||
}
|
||||
if dstH == 0 {
|
||||
tmpH := float64(dstW) * float64(srcH) / float64(srcW)
|
||||
dstH = int(math.Max(1.0, math.Floor(tmpH+0.5)))
|
||||
}
|
||||
|
||||
if filter.Support <= 0 {
|
||||
// Nearest-neighbor special case.
|
||||
return resizeNearest(img, dstW, dstH)
|
||||
}
|
||||
|
||||
if srcW != dstW && srcH != dstH {
|
||||
return resizeVertical(resizeHorizontal(img, dstW, filter), dstH, filter)
|
||||
}
|
||||
if srcW != dstW {
|
||||
return resizeHorizontal(img, dstW, filter)
|
||||
}
|
||||
if srcH != dstH {
|
||||
return resizeVertical(img, dstH, filter)
|
||||
}
|
||||
return Clone(img)
|
||||
}
|
||||
|
||||
func resizeHorizontal(img image.Image, width int, filter ResampleFilter) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, width, src.h))
|
||||
weights := precomputeWeights(width, src.w, filter)
|
||||
parallel(0, src.h, func(ys <-chan int) {
|
||||
scanLine := make([]uint8, src.w*4)
|
||||
for y := range ys {
|
||||
src.scan(0, y, src.w, y+1, scanLine)
|
||||
j0 := y * dst.Stride
|
||||
for x := range weights {
|
||||
var r, g, b, a float64
|
||||
for _, w := range weights[x] {
|
||||
i := w.index * 4
|
||||
s := scanLine[i : i+4 : i+4]
|
||||
aw := float64(s[3]) * w.weight
|
||||
r += float64(s[0]) * aw
|
||||
g += float64(s[1]) * aw
|
||||
b += float64(s[2]) * aw
|
||||
a += aw
|
||||
}
|
||||
if a != 0 {
|
||||
aInv := 1 / a
|
||||
j := j0 + x*4
|
||||
d := dst.Pix[j : j+4 : j+4]
|
||||
d[0] = clamp(r * aInv)
|
||||
d[1] = clamp(g * aInv)
|
||||
d[2] = clamp(b * aInv)
|
||||
d[3] = clamp(a)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
return dst
|
||||
}
|
||||
|
||||
func resizeVertical(img image.Image, height int, filter ResampleFilter) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, src.w, height))
|
||||
weights := precomputeWeights(height, src.h, filter)
|
||||
parallel(0, src.w, func(xs <-chan int) {
|
||||
scanLine := make([]uint8, src.h*4)
|
||||
for x := range xs {
|
||||
src.scan(x, 0, x+1, src.h, scanLine)
|
||||
for y := range weights {
|
||||
var r, g, b, a float64
|
||||
for _, w := range weights[y] {
|
||||
i := w.index * 4
|
||||
s := scanLine[i : i+4 : i+4]
|
||||
aw := float64(s[3]) * w.weight
|
||||
r += float64(s[0]) * aw
|
||||
g += float64(s[1]) * aw
|
||||
b += float64(s[2]) * aw
|
||||
a += aw
|
||||
}
|
||||
if a != 0 {
|
||||
aInv := 1 / a
|
||||
j := y*dst.Stride + x*4
|
||||
d := dst.Pix[j : j+4 : j+4]
|
||||
d[0] = clamp(r * aInv)
|
||||
d[1] = clamp(g * aInv)
|
||||
d[2] = clamp(b * aInv)
|
||||
d[3] = clamp(a)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
return dst
|
||||
}
|
||||
|
||||
// resizeNearest is a fast nearest-neighbor resize, no filtering.
|
||||
func resizeNearest(img image.Image, width, height int) *image.NRGBA {
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, width, height))
|
||||
dx := float64(img.Bounds().Dx()) / float64(width)
|
||||
dy := float64(img.Bounds().Dy()) / float64(height)
|
||||
|
||||
if dx > 1 && dy > 1 {
|
||||
src := newScanner(img)
|
||||
parallel(0, height, func(ys <-chan int) {
|
||||
for y := range ys {
|
||||
srcY := int((float64(y) + 0.5) * dy)
|
||||
dstOff := y * dst.Stride
|
||||
for x := 0; x < width; x++ {
|
||||
srcX := int((float64(x) + 0.5) * dx)
|
||||
src.scan(srcX, srcY, srcX+1, srcY+1, dst.Pix[dstOff:dstOff+4])
|
||||
dstOff += 4
|
||||
}
|
||||
}
|
||||
})
|
||||
} else {
|
||||
src := toNRGBA(img)
|
||||
parallel(0, height, func(ys <-chan int) {
|
||||
for y := range ys {
|
||||
srcY := int((float64(y) + 0.5) * dy)
|
||||
srcOff0 := srcY * src.Stride
|
||||
dstOff := y * dst.Stride
|
||||
for x := 0; x < width; x++ {
|
||||
srcX := int((float64(x) + 0.5) * dx)
|
||||
srcOff := srcOff0 + srcX*4
|
||||
copy(dst.Pix[dstOff:dstOff+4], src.Pix[srcOff:srcOff+4])
|
||||
dstOff += 4
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
// Fit scales down the image using the specified resample filter to fit the specified
|
||||
// maximum width and height and returns the transformed image.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// dstImage := imaging.Fit(srcImage, 800, 600, imaging.Lanczos)
|
||||
//
|
||||
func Fit(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA {
|
||||
maxW, maxH := width, height
|
||||
|
||||
if maxW <= 0 || maxH <= 0 {
|
||||
return &image.NRGBA{}
|
||||
}
|
||||
|
||||
srcBounds := img.Bounds()
|
||||
srcW := srcBounds.Dx()
|
||||
srcH := srcBounds.Dy()
|
||||
|
||||
if srcW <= 0 || srcH <= 0 {
|
||||
return &image.NRGBA{}
|
||||
}
|
||||
|
||||
if srcW <= maxW && srcH <= maxH {
|
||||
return Clone(img)
|
||||
}
|
||||
|
||||
srcAspectRatio := float64(srcW) / float64(srcH)
|
||||
maxAspectRatio := float64(maxW) / float64(maxH)
|
||||
|
||||
var newW, newH int
|
||||
if srcAspectRatio > maxAspectRatio {
|
||||
newW = maxW
|
||||
newH = int(float64(newW) / srcAspectRatio)
|
||||
} else {
|
||||
newH = maxH
|
||||
newW = int(float64(newH) * srcAspectRatio)
|
||||
}
|
||||
|
||||
return Resize(img, newW, newH, filter)
|
||||
}
|
||||
|
||||
// Fill creates an image with the specified dimensions and fills it with the scaled source image.
|
||||
// To achieve the correct aspect ratio without stretching, the source image will be cropped.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// dstImage := imaging.Fill(srcImage, 800, 600, imaging.Center, imaging.Lanczos)
|
||||
//
|
||||
func Fill(img image.Image, width, height int, anchor Anchor, filter ResampleFilter) *image.NRGBA {
|
||||
dstW, dstH := width, height
|
||||
|
||||
if dstW <= 0 || dstH <= 0 {
|
||||
return &image.NRGBA{}
|
||||
}
|
||||
|
||||
srcBounds := img.Bounds()
|
||||
srcW := srcBounds.Dx()
|
||||
srcH := srcBounds.Dy()
|
||||
|
||||
if srcW <= 0 || srcH <= 0 {
|
||||
return &image.NRGBA{}
|
||||
}
|
||||
|
||||
if srcW == dstW && srcH == dstH {
|
||||
return Clone(img)
|
||||
}
|
||||
|
||||
if srcW >= 100 && srcH >= 100 {
|
||||
return cropAndResize(img, dstW, dstH, anchor, filter)
|
||||
}
|
||||
return resizeAndCrop(img, dstW, dstH, anchor, filter)
|
||||
}
|
||||
|
||||
// cropAndResize crops the image to the smallest possible size that has the required aspect ratio using
|
||||
// the given anchor point, then scales it to the specified dimensions and returns the transformed image.
|
||||
//
|
||||
// This is generally faster than resizing first, but may result in inaccuracies when used on small source images.
|
||||
func cropAndResize(img image.Image, width, height int, anchor Anchor, filter ResampleFilter) *image.NRGBA {
|
||||
dstW, dstH := width, height
|
||||
|
||||
srcBounds := img.Bounds()
|
||||
srcW := srcBounds.Dx()
|
||||
srcH := srcBounds.Dy()
|
||||
srcAspectRatio := float64(srcW) / float64(srcH)
|
||||
dstAspectRatio := float64(dstW) / float64(dstH)
|
||||
|
||||
var tmp *image.NRGBA
|
||||
if srcAspectRatio < dstAspectRatio {
|
||||
cropH := float64(srcW) * float64(dstH) / float64(dstW)
|
||||
tmp = CropAnchor(img, srcW, int(math.Max(1, cropH)+0.5), anchor)
|
||||
} else {
|
||||
cropW := float64(srcH) * float64(dstW) / float64(dstH)
|
||||
tmp = CropAnchor(img, int(math.Max(1, cropW)+0.5), srcH, anchor)
|
||||
}
|
||||
|
||||
return Resize(tmp, dstW, dstH, filter)
|
||||
}
|
||||
|
||||
// resizeAndCrop resizes the image to the smallest possible size that will cover the specified dimensions,
|
||||
// crops the resized image to the specified dimensions using the given anchor point and returns
|
||||
// the transformed image.
|
||||
func resizeAndCrop(img image.Image, width, height int, anchor Anchor, filter ResampleFilter) *image.NRGBA {
|
||||
dstW, dstH := width, height
|
||||
|
||||
srcBounds := img.Bounds()
|
||||
srcW := srcBounds.Dx()
|
||||
srcH := srcBounds.Dy()
|
||||
srcAspectRatio := float64(srcW) / float64(srcH)
|
||||
dstAspectRatio := float64(dstW) / float64(dstH)
|
||||
|
||||
var tmp *image.NRGBA
|
||||
if srcAspectRatio < dstAspectRatio {
|
||||
tmp = Resize(img, dstW, 0, filter)
|
||||
} else {
|
||||
tmp = Resize(img, 0, dstH, filter)
|
||||
}
|
||||
|
||||
return CropAnchor(tmp, dstW, dstH, anchor)
|
||||
}
|
||||
|
||||
// Thumbnail scales the image up or down using the specified resample filter, crops it
|
||||
// to the specified width and hight and returns the transformed image.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// dstImage := imaging.Thumbnail(srcImage, 100, 100, imaging.Lanczos)
|
||||
//
|
||||
func Thumbnail(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA {
|
||||
return Fill(img, width, height, Center, filter)
|
||||
}
|
||||
|
||||
// ResampleFilter specifies a resampling filter to be used for image resizing.
|
||||
//
|
||||
// General filter recommendations:
|
||||
//
|
||||
// - Lanczos
|
||||
// A high-quality resampling filter for photographic images yielding sharp results.
|
||||
//
|
||||
// - CatmullRom
|
||||
// A sharp cubic filter that is faster than Lanczos filter while providing similar results.
|
||||
//
|
||||
// - MitchellNetravali
|
||||
// A cubic filter that produces smoother results with less ringing artifacts than CatmullRom.
|
||||
//
|
||||
// - Linear
|
||||
// Bilinear resampling filter, produces a smooth output. Faster than cubic filters.
|
||||
//
|
||||
// - Box
|
||||
// Simple and fast averaging filter appropriate for downscaling.
|
||||
// When upscaling it's similar to NearestNeighbor.
|
||||
//
|
||||
// - NearestNeighbor
|
||||
// Fastest resampling filter, no antialiasing.
|
||||
//
|
||||
type ResampleFilter struct {
|
||||
Support float64
|
||||
Kernel func(float64) float64
|
||||
}
|
||||
|
||||
// NearestNeighbor is a nearest-neighbor filter (no anti-aliasing).
|
||||
var NearestNeighbor ResampleFilter
|
||||
|
||||
// Box filter (averaging pixels).
|
||||
var Box ResampleFilter
|
||||
|
||||
// Linear filter.
|
||||
var Linear ResampleFilter
|
||||
|
||||
// Hermite cubic spline filter (BC-spline; B=0; C=0).
|
||||
var Hermite ResampleFilter
|
||||
|
||||
// MitchellNetravali is Mitchell-Netravali cubic filter (BC-spline; B=1/3; C=1/3).
|
||||
var MitchellNetravali ResampleFilter
|
||||
|
||||
// CatmullRom is a Catmull-Rom - sharp cubic filter (BC-spline; B=0; C=0.5).
|
||||
var CatmullRom ResampleFilter
|
||||
|
||||
// BSpline is a smooth cubic filter (BC-spline; B=1; C=0).
|
||||
var BSpline ResampleFilter
|
||||
|
||||
// Gaussian is a Gaussian blurring filter.
|
||||
var Gaussian ResampleFilter
|
||||
|
||||
// Bartlett is a Bartlett-windowed sinc filter (3 lobes).
|
||||
var Bartlett ResampleFilter
|
||||
|
||||
// Lanczos filter (3 lobes).
|
||||
var Lanczos ResampleFilter
|
||||
|
||||
// Hann is a Hann-windowed sinc filter (3 lobes).
|
||||
var Hann ResampleFilter
|
||||
|
||||
// Hamming is a Hamming-windowed sinc filter (3 lobes).
|
||||
var Hamming ResampleFilter
|
||||
|
||||
// Blackman is a Blackman-windowed sinc filter (3 lobes).
|
||||
var Blackman ResampleFilter
|
||||
|
||||
// Welch is a Welch-windowed sinc filter (parabolic window, 3 lobes).
|
||||
var Welch ResampleFilter
|
||||
|
||||
// Cosine is a Cosine-windowed sinc filter (3 lobes).
|
||||
var Cosine ResampleFilter
|
||||
|
||||
func bcspline(x, b, c float64) float64 {
|
||||
var y float64
|
||||
x = math.Abs(x)
|
||||
if x < 1.0 {
|
||||
y = ((12-9*b-6*c)*x*x*x + (-18+12*b+6*c)*x*x + (6 - 2*b)) / 6
|
||||
} else if x < 2.0 {
|
||||
y = ((-b-6*c)*x*x*x + (6*b+30*c)*x*x + (-12*b-48*c)*x + (8*b + 24*c)) / 6
|
||||
}
|
||||
return y
|
||||
}
|
||||
|
||||
func sinc(x float64) float64 {
|
||||
if x == 0 {
|
||||
return 1
|
||||
}
|
||||
return math.Sin(math.Pi*x) / (math.Pi * x)
|
||||
}
|
||||
|
||||
func init() {
|
||||
NearestNeighbor = ResampleFilter{
|
||||
Support: 0.0, // special case - not applying the filter
|
||||
}
|
||||
|
||||
Box = ResampleFilter{
|
||||
Support: 0.5,
|
||||
Kernel: func(x float64) float64 {
|
||||
x = math.Abs(x)
|
||||
if x <= 0.5 {
|
||||
return 1.0
|
||||
}
|
||||
return 0
|
||||
},
|
||||
}
|
||||
|
||||
Linear = ResampleFilter{
|
||||
Support: 1.0,
|
||||
Kernel: func(x float64) float64 {
|
||||
x = math.Abs(x)
|
||||
if x < 1.0 {
|
||||
return 1.0 - x
|
||||
}
|
||||
return 0
|
||||
},
|
||||
}
|
||||
|
||||
Hermite = ResampleFilter{
|
||||
Support: 1.0,
|
||||
Kernel: func(x float64) float64 {
|
||||
x = math.Abs(x)
|
||||
if x < 1.0 {
|
||||
return bcspline(x, 0.0, 0.0)
|
||||
}
|
||||
return 0
|
||||
},
|
||||
}
|
||||
|
||||
MitchellNetravali = ResampleFilter{
|
||||
Support: 2.0,
|
||||
Kernel: func(x float64) float64 {
|
||||
x = math.Abs(x)
|
||||
if x < 2.0 {
|
||||
return bcspline(x, 1.0/3.0, 1.0/3.0)
|
||||
}
|
||||
return 0
|
||||
},
|
||||
}
|
||||
|
||||
CatmullRom = ResampleFilter{
|
||||
Support: 2.0,
|
||||
Kernel: func(x float64) float64 {
|
||||
x = math.Abs(x)
|
||||
if x < 2.0 {
|
||||
return bcspline(x, 0.0, 0.5)
|
||||
}
|
||||
return 0
|
||||
},
|
||||
}
|
||||
|
||||
BSpline = ResampleFilter{
|
||||
Support: 2.0,
|
||||
Kernel: func(x float64) float64 {
|
||||
x = math.Abs(x)
|
||||
if x < 2.0 {
|
||||
return bcspline(x, 1.0, 0.0)
|
||||
}
|
||||
return 0
|
||||
},
|
||||
}
|
||||
|
||||
Gaussian = ResampleFilter{
|
||||
Support: 2.0,
|
||||
Kernel: func(x float64) float64 {
|
||||
x = math.Abs(x)
|
||||
if x < 2.0 {
|
||||
return math.Exp(-2 * x * x)
|
||||
}
|
||||
return 0
|
||||
},
|
||||
}
|
||||
|
||||
Bartlett = ResampleFilter{
|
||||
Support: 3.0,
|
||||
Kernel: func(x float64) float64 {
|
||||
x = math.Abs(x)
|
||||
if x < 3.0 {
|
||||
return sinc(x) * (3.0 - x) / 3.0
|
||||
}
|
||||
return 0
|
||||
},
|
||||
}
|
||||
|
||||
Lanczos = ResampleFilter{
|
||||
Support: 3.0,
|
||||
Kernel: func(x float64) float64 {
|
||||
x = math.Abs(x)
|
||||
if x < 3.0 {
|
||||
return sinc(x) * sinc(x/3.0)
|
||||
}
|
||||
return 0
|
||||
},
|
||||
}
|
||||
|
||||
Hann = ResampleFilter{
|
||||
Support: 3.0,
|
||||
Kernel: func(x float64) float64 {
|
||||
x = math.Abs(x)
|
||||
if x < 3.0 {
|
||||
return sinc(x) * (0.5 + 0.5*math.Cos(math.Pi*x/3.0))
|
||||
}
|
||||
return 0
|
||||
},
|
||||
}
|
||||
|
||||
Hamming = ResampleFilter{
|
||||
Support: 3.0,
|
||||
Kernel: func(x float64) float64 {
|
||||
x = math.Abs(x)
|
||||
if x < 3.0 {
|
||||
return sinc(x) * (0.54 + 0.46*math.Cos(math.Pi*x/3.0))
|
||||
}
|
||||
return 0
|
||||
},
|
||||
}
|
||||
|
||||
Blackman = ResampleFilter{
|
||||
Support: 3.0,
|
||||
Kernel: func(x float64) float64 {
|
||||
x = math.Abs(x)
|
||||
if x < 3.0 {
|
||||
return sinc(x) * (0.42 - 0.5*math.Cos(math.Pi*x/3.0+math.Pi) + 0.08*math.Cos(2.0*math.Pi*x/3.0))
|
||||
}
|
||||
return 0
|
||||
},
|
||||
}
|
||||
|
||||
Welch = ResampleFilter{
|
||||
Support: 3.0,
|
||||
Kernel: func(x float64) float64 {
|
||||
x = math.Abs(x)
|
||||
if x < 3.0 {
|
||||
return sinc(x) * (1.0 - (x * x / 9.0))
|
||||
}
|
||||
return 0
|
||||
},
|
||||
}
|
||||
|
||||
Cosine = ResampleFilter{
|
||||
Support: 3.0,
|
||||
Kernel: func(x float64) float64 {
|
||||
x = math.Abs(x)
|
||||
if x < 3.0 {
|
||||
return sinc(x) * math.Cos((math.Pi/2.0)*(x/3.0))
|
||||
}
|
||||
return 0
|
||||
},
|
||||
}
|
||||
}
|
||||
285
vendor/github.com/disintegration/imaging/scanner.go
generated
vendored
Normal file
285
vendor/github.com/disintegration/imaging/scanner.go
generated
vendored
Normal file
|
|
@ -0,0 +1,285 @@
|
|||
package imaging
|
||||
|
||||
import (
|
||||
"image"
|
||||
"image/color"
|
||||
)
|
||||
|
||||
type scanner struct {
|
||||
image image.Image
|
||||
w, h int
|
||||
palette []color.NRGBA
|
||||
}
|
||||
|
||||
func newScanner(img image.Image) *scanner {
|
||||
s := &scanner{
|
||||
image: img,
|
||||
w: img.Bounds().Dx(),
|
||||
h: img.Bounds().Dy(),
|
||||
}
|
||||
if img, ok := img.(*image.Paletted); ok {
|
||||
s.palette = make([]color.NRGBA, len(img.Palette))
|
||||
for i := 0; i < len(img.Palette); i++ {
|
||||
s.palette[i] = color.NRGBAModel.Convert(img.Palette[i]).(color.NRGBA)
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// scan scans the given rectangular region of the image into dst.
|
||||
func (s *scanner) scan(x1, y1, x2, y2 int, dst []uint8) {
|
||||
switch img := s.image.(type) {
|
||||
case *image.NRGBA:
|
||||
size := (x2 - x1) * 4
|
||||
j := 0
|
||||
i := y1*img.Stride + x1*4
|
||||
if size == 4 {
|
||||
for y := y1; y < y2; y++ {
|
||||
d := dst[j : j+4 : j+4]
|
||||
s := img.Pix[i : i+4 : i+4]
|
||||
d[0] = s[0]
|
||||
d[1] = s[1]
|
||||
d[2] = s[2]
|
||||
d[3] = s[3]
|
||||
j += size
|
||||
i += img.Stride
|
||||
}
|
||||
} else {
|
||||
for y := y1; y < y2; y++ {
|
||||
copy(dst[j:j+size], img.Pix[i:i+size])
|
||||
j += size
|
||||
i += img.Stride
|
||||
}
|
||||
}
|
||||
|
||||
case *image.NRGBA64:
|
||||
j := 0
|
||||
for y := y1; y < y2; y++ {
|
||||
i := y*img.Stride + x1*8
|
||||
for x := x1; x < x2; x++ {
|
||||
s := img.Pix[i : i+8 : i+8]
|
||||
d := dst[j : j+4 : j+4]
|
||||
d[0] = s[0]
|
||||
d[1] = s[2]
|
||||
d[2] = s[4]
|
||||
d[3] = s[6]
|
||||
j += 4
|
||||
i += 8
|
||||
}
|
||||
}
|
||||
|
||||
case *image.RGBA:
|
||||
j := 0
|
||||
for y := y1; y < y2; y++ {
|
||||
i := y*img.Stride + x1*4
|
||||
for x := x1; x < x2; x++ {
|
||||
d := dst[j : j+4 : j+4]
|
||||
a := img.Pix[i+3]
|
||||
switch a {
|
||||
case 0:
|
||||
d[0] = 0
|
||||
d[1] = 0
|
||||
d[2] = 0
|
||||
d[3] = a
|
||||
case 0xff:
|
||||
s := img.Pix[i : i+4 : i+4]
|
||||
d[0] = s[0]
|
||||
d[1] = s[1]
|
||||
d[2] = s[2]
|
||||
d[3] = a
|
||||
default:
|
||||
s := img.Pix[i : i+4 : i+4]
|
||||
r16 := uint16(s[0])
|
||||
g16 := uint16(s[1])
|
||||
b16 := uint16(s[2])
|
||||
a16 := uint16(a)
|
||||
d[0] = uint8(r16 * 0xff / a16)
|
||||
d[1] = uint8(g16 * 0xff / a16)
|
||||
d[2] = uint8(b16 * 0xff / a16)
|
||||
d[3] = a
|
||||
}
|
||||
j += 4
|
||||
i += 4
|
||||
}
|
||||
}
|
||||
|
||||
case *image.RGBA64:
|
||||
j := 0
|
||||
for y := y1; y < y2; y++ {
|
||||
i := y*img.Stride + x1*8
|
||||
for x := x1; x < x2; x++ {
|
||||
s := img.Pix[i : i+8 : i+8]
|
||||
d := dst[j : j+4 : j+4]
|
||||
a := s[6]
|
||||
switch a {
|
||||
case 0:
|
||||
d[0] = 0
|
||||
d[1] = 0
|
||||
d[2] = 0
|
||||
case 0xff:
|
||||
d[0] = s[0]
|
||||
d[1] = s[2]
|
||||
d[2] = s[4]
|
||||
default:
|
||||
r32 := uint32(s[0])<<8 | uint32(s[1])
|
||||
g32 := uint32(s[2])<<8 | uint32(s[3])
|
||||
b32 := uint32(s[4])<<8 | uint32(s[5])
|
||||
a32 := uint32(s[6])<<8 | uint32(s[7])
|
||||
d[0] = uint8((r32 * 0xffff / a32) >> 8)
|
||||
d[1] = uint8((g32 * 0xffff / a32) >> 8)
|
||||
d[2] = uint8((b32 * 0xffff / a32) >> 8)
|
||||
}
|
||||
d[3] = a
|
||||
j += 4
|
||||
i += 8
|
||||
}
|
||||
}
|
||||
|
||||
case *image.Gray:
|
||||
j := 0
|
||||
for y := y1; y < y2; y++ {
|
||||
i := y*img.Stride + x1
|
||||
for x := x1; x < x2; x++ {
|
||||
c := img.Pix[i]
|
||||
d := dst[j : j+4 : j+4]
|
||||
d[0] = c
|
||||
d[1] = c
|
||||
d[2] = c
|
||||
d[3] = 0xff
|
||||
j += 4
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
case *image.Gray16:
|
||||
j := 0
|
||||
for y := y1; y < y2; y++ {
|
||||
i := y*img.Stride + x1*2
|
||||
for x := x1; x < x2; x++ {
|
||||
c := img.Pix[i]
|
||||
d := dst[j : j+4 : j+4]
|
||||
d[0] = c
|
||||
d[1] = c
|
||||
d[2] = c
|
||||
d[3] = 0xff
|
||||
j += 4
|
||||
i += 2
|
||||
}
|
||||
}
|
||||
|
||||
case *image.YCbCr:
|
||||
j := 0
|
||||
x1 += img.Rect.Min.X
|
||||
x2 += img.Rect.Min.X
|
||||
y1 += img.Rect.Min.Y
|
||||
y2 += img.Rect.Min.Y
|
||||
|
||||
hy := img.Rect.Min.Y / 2
|
||||
hx := img.Rect.Min.X / 2
|
||||
for y := y1; y < y2; y++ {
|
||||
iy := (y-img.Rect.Min.Y)*img.YStride + (x1 - img.Rect.Min.X)
|
||||
|
||||
var yBase int
|
||||
switch img.SubsampleRatio {
|
||||
case image.YCbCrSubsampleRatio444, image.YCbCrSubsampleRatio422:
|
||||
yBase = (y - img.Rect.Min.Y) * img.CStride
|
||||
case image.YCbCrSubsampleRatio420, image.YCbCrSubsampleRatio440:
|
||||
yBase = (y/2 - hy) * img.CStride
|
||||
}
|
||||
|
||||
for x := x1; x < x2; x++ {
|
||||
var ic int
|
||||
switch img.SubsampleRatio {
|
||||
case image.YCbCrSubsampleRatio444, image.YCbCrSubsampleRatio440:
|
||||
ic = yBase + (x - img.Rect.Min.X)
|
||||
case image.YCbCrSubsampleRatio422, image.YCbCrSubsampleRatio420:
|
||||
ic = yBase + (x/2 - hx)
|
||||
default:
|
||||
ic = img.COffset(x, y)
|
||||
}
|
||||
|
||||
yy1 := int32(img.Y[iy]) * 0x10101
|
||||
cb1 := int32(img.Cb[ic]) - 128
|
||||
cr1 := int32(img.Cr[ic]) - 128
|
||||
|
||||
r := yy1 + 91881*cr1
|
||||
if uint32(r)&0xff000000 == 0 {
|
||||
r >>= 16
|
||||
} else {
|
||||
r = ^(r >> 31)
|
||||
}
|
||||
|
||||
g := yy1 - 22554*cb1 - 46802*cr1
|
||||
if uint32(g)&0xff000000 == 0 {
|
||||
g >>= 16
|
||||
} else {
|
||||
g = ^(g >> 31)
|
||||
}
|
||||
|
||||
b := yy1 + 116130*cb1
|
||||
if uint32(b)&0xff000000 == 0 {
|
||||
b >>= 16
|
||||
} else {
|
||||
b = ^(b >> 31)
|
||||
}
|
||||
|
||||
d := dst[j : j+4 : j+4]
|
||||
d[0] = uint8(r)
|
||||
d[1] = uint8(g)
|
||||
d[2] = uint8(b)
|
||||
d[3] = 0xff
|
||||
|
||||
iy++
|
||||
j += 4
|
||||
}
|
||||
}
|
||||
|
||||
case *image.Paletted:
|
||||
j := 0
|
||||
for y := y1; y < y2; y++ {
|
||||
i := y*img.Stride + x1
|
||||
for x := x1; x < x2; x++ {
|
||||
c := s.palette[img.Pix[i]]
|
||||
d := dst[j : j+4 : j+4]
|
||||
d[0] = c.R
|
||||
d[1] = c.G
|
||||
d[2] = c.B
|
||||
d[3] = c.A
|
||||
j += 4
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
j := 0
|
||||
b := s.image.Bounds()
|
||||
x1 += b.Min.X
|
||||
x2 += b.Min.X
|
||||
y1 += b.Min.Y
|
||||
y2 += b.Min.Y
|
||||
for y := y1; y < y2; y++ {
|
||||
for x := x1; x < x2; x++ {
|
||||
r16, g16, b16, a16 := s.image.At(x, y).RGBA()
|
||||
d := dst[j : j+4 : j+4]
|
||||
switch a16 {
|
||||
case 0xffff:
|
||||
d[0] = uint8(r16 >> 8)
|
||||
d[1] = uint8(g16 >> 8)
|
||||
d[2] = uint8(b16 >> 8)
|
||||
d[3] = 0xff
|
||||
case 0:
|
||||
d[0] = 0
|
||||
d[1] = 0
|
||||
d[2] = 0
|
||||
d[3] = 0
|
||||
default:
|
||||
d[0] = uint8(((r16 * 0xffff) / a16) >> 8)
|
||||
d[1] = uint8(((g16 * 0xffff) / a16) >> 8)
|
||||
d[2] = uint8(((b16 * 0xffff) / a16) >> 8)
|
||||
d[3] = uint8(a16 >> 8)
|
||||
}
|
||||
j += 4
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
249
vendor/github.com/disintegration/imaging/tools.go
generated
vendored
Normal file
249
vendor/github.com/disintegration/imaging/tools.go
generated
vendored
Normal file
|
|
@ -0,0 +1,249 @@
|
|||
package imaging
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"image"
|
||||
"image/color"
|
||||
"math"
|
||||
)
|
||||
|
||||
// New creates a new image with the specified width and height, and fills it with the specified color.
|
||||
func New(width, height int, fillColor color.Color) *image.NRGBA {
|
||||
if width <= 0 || height <= 0 {
|
||||
return &image.NRGBA{}
|
||||
}
|
||||
|
||||
c := color.NRGBAModel.Convert(fillColor).(color.NRGBA)
|
||||
if (c == color.NRGBA{0, 0, 0, 0}) {
|
||||
return image.NewNRGBA(image.Rect(0, 0, width, height))
|
||||
}
|
||||
|
||||
return &image.NRGBA{
|
||||
Pix: bytes.Repeat([]byte{c.R, c.G, c.B, c.A}, width*height),
|
||||
Stride: 4 * width,
|
||||
Rect: image.Rect(0, 0, width, height),
|
||||
}
|
||||
}
|
||||
|
||||
// Clone returns a copy of the given image.
|
||||
func Clone(img image.Image) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
|
||||
size := src.w * 4
|
||||
parallel(0, src.h, func(ys <-chan int) {
|
||||
for y := range ys {
|
||||
i := y * dst.Stride
|
||||
src.scan(0, y, src.w, y+1, dst.Pix[i:i+size])
|
||||
}
|
||||
})
|
||||
return dst
|
||||
}
|
||||
|
||||
// Anchor is the anchor point for image alignment.
|
||||
type Anchor int
|
||||
|
||||
// Anchor point positions.
|
||||
const (
|
||||
Center Anchor = iota
|
||||
TopLeft
|
||||
Top
|
||||
TopRight
|
||||
Left
|
||||
Right
|
||||
BottomLeft
|
||||
Bottom
|
||||
BottomRight
|
||||
)
|
||||
|
||||
func anchorPt(b image.Rectangle, w, h int, anchor Anchor) image.Point {
|
||||
var x, y int
|
||||
switch anchor {
|
||||
case TopLeft:
|
||||
x = b.Min.X
|
||||
y = b.Min.Y
|
||||
case Top:
|
||||
x = b.Min.X + (b.Dx()-w)/2
|
||||
y = b.Min.Y
|
||||
case TopRight:
|
||||
x = b.Max.X - w
|
||||
y = b.Min.Y
|
||||
case Left:
|
||||
x = b.Min.X
|
||||
y = b.Min.Y + (b.Dy()-h)/2
|
||||
case Right:
|
||||
x = b.Max.X - w
|
||||
y = b.Min.Y + (b.Dy()-h)/2
|
||||
case BottomLeft:
|
||||
x = b.Min.X
|
||||
y = b.Max.Y - h
|
||||
case Bottom:
|
||||
x = b.Min.X + (b.Dx()-w)/2
|
||||
y = b.Max.Y - h
|
||||
case BottomRight:
|
||||
x = b.Max.X - w
|
||||
y = b.Max.Y - h
|
||||
default:
|
||||
x = b.Min.X + (b.Dx()-w)/2
|
||||
y = b.Min.Y + (b.Dy()-h)/2
|
||||
}
|
||||
return image.Pt(x, y)
|
||||
}
|
||||
|
||||
// Crop cuts out a rectangular region with the specified bounds
|
||||
// from the image and returns the cropped image.
|
||||
func Crop(img image.Image, rect image.Rectangle) *image.NRGBA {
|
||||
r := rect.Intersect(img.Bounds()).Sub(img.Bounds().Min)
|
||||
if r.Empty() {
|
||||
return &image.NRGBA{}
|
||||
}
|
||||
src := newScanner(img)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, r.Dx(), r.Dy()))
|
||||
rowSize := r.Dx() * 4
|
||||
parallel(r.Min.Y, r.Max.Y, func(ys <-chan int) {
|
||||
for y := range ys {
|
||||
i := (y - r.Min.Y) * dst.Stride
|
||||
src.scan(r.Min.X, y, r.Max.X, y+1, dst.Pix[i:i+rowSize])
|
||||
}
|
||||
})
|
||||
return dst
|
||||
}
|
||||
|
||||
// CropAnchor cuts out a rectangular region with the specified size
|
||||
// from the image using the specified anchor point and returns the cropped image.
|
||||
func CropAnchor(img image.Image, width, height int, anchor Anchor) *image.NRGBA {
|
||||
srcBounds := img.Bounds()
|
||||
pt := anchorPt(srcBounds, width, height, anchor)
|
||||
r := image.Rect(0, 0, width, height).Add(pt)
|
||||
b := srcBounds.Intersect(r)
|
||||
return Crop(img, b)
|
||||
}
|
||||
|
||||
// CropCenter cuts out a rectangular region with the specified size
|
||||
// from the center of the image and returns the cropped image.
|
||||
func CropCenter(img image.Image, width, height int) *image.NRGBA {
|
||||
return CropAnchor(img, width, height, Center)
|
||||
}
|
||||
|
||||
// Paste pastes the img image to the background image at the specified position and returns the combined image.
|
||||
func Paste(background, img image.Image, pos image.Point) *image.NRGBA {
|
||||
dst := Clone(background)
|
||||
pos = pos.Sub(background.Bounds().Min)
|
||||
pasteRect := image.Rectangle{Min: pos, Max: pos.Add(img.Bounds().Size())}
|
||||
interRect := pasteRect.Intersect(dst.Bounds())
|
||||
if interRect.Empty() {
|
||||
return dst
|
||||
}
|
||||
src := newScanner(img)
|
||||
parallel(interRect.Min.Y, interRect.Max.Y, func(ys <-chan int) {
|
||||
for y := range ys {
|
||||
x1 := interRect.Min.X - pasteRect.Min.X
|
||||
x2 := interRect.Max.X - pasteRect.Min.X
|
||||
y1 := y - pasteRect.Min.Y
|
||||
y2 := y1 + 1
|
||||
i1 := y*dst.Stride + interRect.Min.X*4
|
||||
i2 := i1 + interRect.Dx()*4
|
||||
src.scan(x1, y1, x2, y2, dst.Pix[i1:i2])
|
||||
}
|
||||
})
|
||||
return dst
|
||||
}
|
||||
|
||||
// PasteCenter pastes the img image to the center of the background image and returns the combined image.
|
||||
func PasteCenter(background, img image.Image) *image.NRGBA {
|
||||
bgBounds := background.Bounds()
|
||||
bgW := bgBounds.Dx()
|
||||
bgH := bgBounds.Dy()
|
||||
bgMinX := bgBounds.Min.X
|
||||
bgMinY := bgBounds.Min.Y
|
||||
|
||||
centerX := bgMinX + bgW/2
|
||||
centerY := bgMinY + bgH/2
|
||||
|
||||
x0 := centerX - img.Bounds().Dx()/2
|
||||
y0 := centerY - img.Bounds().Dy()/2
|
||||
|
||||
return Paste(background, img, image.Pt(x0, y0))
|
||||
}
|
||||
|
||||
// Overlay draws the img image over the background image at given position
|
||||
// and returns the combined image. Opacity parameter is the opacity of the img
|
||||
// image layer, used to compose the images, it must be from 0.0 to 1.0.
|
||||
//
|
||||
// Examples:
|
||||
//
|
||||
// // Draw spriteImage over backgroundImage at the given position (x=50, y=50).
|
||||
// dstImage := imaging.Overlay(backgroundImage, spriteImage, image.Pt(50, 50), 1.0)
|
||||
//
|
||||
// // Blend two opaque images of the same size.
|
||||
// dstImage := imaging.Overlay(imageOne, imageTwo, image.Pt(0, 0), 0.5)
|
||||
//
|
||||
func Overlay(background, img image.Image, pos image.Point, opacity float64) *image.NRGBA {
|
||||
opacity = math.Min(math.Max(opacity, 0.0), 1.0) // Ensure 0.0 <= opacity <= 1.0.
|
||||
dst := Clone(background)
|
||||
pos = pos.Sub(background.Bounds().Min)
|
||||
pasteRect := image.Rectangle{Min: pos, Max: pos.Add(img.Bounds().Size())}
|
||||
interRect := pasteRect.Intersect(dst.Bounds())
|
||||
if interRect.Empty() {
|
||||
return dst
|
||||
}
|
||||
src := newScanner(img)
|
||||
parallel(interRect.Min.Y, interRect.Max.Y, func(ys <-chan int) {
|
||||
scanLine := make([]uint8, interRect.Dx()*4)
|
||||
for y := range ys {
|
||||
x1 := interRect.Min.X - pasteRect.Min.X
|
||||
x2 := interRect.Max.X - pasteRect.Min.X
|
||||
y1 := y - pasteRect.Min.Y
|
||||
y2 := y1 + 1
|
||||
src.scan(x1, y1, x2, y2, scanLine)
|
||||
i := y*dst.Stride + interRect.Min.X*4
|
||||
j := 0
|
||||
for x := interRect.Min.X; x < interRect.Max.X; x++ {
|
||||
d := dst.Pix[i : i+4 : i+4]
|
||||
r1 := float64(d[0])
|
||||
g1 := float64(d[1])
|
||||
b1 := float64(d[2])
|
||||
a1 := float64(d[3])
|
||||
|
||||
s := scanLine[j : j+4 : j+4]
|
||||
r2 := float64(s[0])
|
||||
g2 := float64(s[1])
|
||||
b2 := float64(s[2])
|
||||
a2 := float64(s[3])
|
||||
|
||||
coef2 := opacity * a2 / 255
|
||||
coef1 := (1 - coef2) * a1 / 255
|
||||
coefSum := coef1 + coef2
|
||||
coef1 /= coefSum
|
||||
coef2 /= coefSum
|
||||
|
||||
d[0] = uint8(r1*coef1 + r2*coef2)
|
||||
d[1] = uint8(g1*coef1 + g2*coef2)
|
||||
d[2] = uint8(b1*coef1 + b2*coef2)
|
||||
d[3] = uint8(math.Min(a1+a2*opacity*(255-a1)/255, 255))
|
||||
|
||||
i += 4
|
||||
j += 4
|
||||
}
|
||||
}
|
||||
})
|
||||
return dst
|
||||
}
|
||||
|
||||
// OverlayCenter overlays the img image to the center of the background image and
|
||||
// returns the combined image. Opacity parameter is the opacity of the img
|
||||
// image layer, used to compose the images, it must be from 0.0 to 1.0.
|
||||
func OverlayCenter(background, img image.Image, opacity float64) *image.NRGBA {
|
||||
bgBounds := background.Bounds()
|
||||
bgW := bgBounds.Dx()
|
||||
bgH := bgBounds.Dy()
|
||||
bgMinX := bgBounds.Min.X
|
||||
bgMinY := bgBounds.Min.Y
|
||||
|
||||
centerX := bgMinX + bgW/2
|
||||
centerY := bgMinY + bgH/2
|
||||
|
||||
x0 := centerX - img.Bounds().Dx()/2
|
||||
y0 := centerY - img.Bounds().Dy()/2
|
||||
|
||||
return Overlay(background, img, image.Point{x0, y0}, opacity)
|
||||
}
|
||||
268
vendor/github.com/disintegration/imaging/transform.go
generated
vendored
Normal file
268
vendor/github.com/disintegration/imaging/transform.go
generated
vendored
Normal file
|
|
@ -0,0 +1,268 @@
|
|||
package imaging
|
||||
|
||||
import (
|
||||
"image"
|
||||
"image/color"
|
||||
"math"
|
||||
)
|
||||
|
||||
// FlipH flips the image horizontally (from left to right) and returns the transformed image.
|
||||
func FlipH(img image.Image) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dstW := src.w
|
||||
dstH := src.h
|
||||
rowSize := dstW * 4
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
|
||||
parallel(0, dstH, func(ys <-chan int) {
|
||||
for dstY := range ys {
|
||||
i := dstY * dst.Stride
|
||||
srcY := dstY
|
||||
src.scan(0, srcY, src.w, srcY+1, dst.Pix[i:i+rowSize])
|
||||
reverse(dst.Pix[i : i+rowSize])
|
||||
}
|
||||
})
|
||||
return dst
|
||||
}
|
||||
|
||||
// FlipV flips the image vertically (from top to bottom) and returns the transformed image.
|
||||
func FlipV(img image.Image) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dstW := src.w
|
||||
dstH := src.h
|
||||
rowSize := dstW * 4
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
|
||||
parallel(0, dstH, func(ys <-chan int) {
|
||||
for dstY := range ys {
|
||||
i := dstY * dst.Stride
|
||||
srcY := dstH - dstY - 1
|
||||
src.scan(0, srcY, src.w, srcY+1, dst.Pix[i:i+rowSize])
|
||||
}
|
||||
})
|
||||
return dst
|
||||
}
|
||||
|
||||
// Transpose flips the image horizontally and rotates 90 degrees counter-clockwise.
|
||||
func Transpose(img image.Image) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dstW := src.h
|
||||
dstH := src.w
|
||||
rowSize := dstW * 4
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
|
||||
parallel(0, dstH, func(ys <-chan int) {
|
||||
for dstY := range ys {
|
||||
i := dstY * dst.Stride
|
||||
srcX := dstY
|
||||
src.scan(srcX, 0, srcX+1, src.h, dst.Pix[i:i+rowSize])
|
||||
}
|
||||
})
|
||||
return dst
|
||||
}
|
||||
|
||||
// Transverse flips the image vertically and rotates 90 degrees counter-clockwise.
|
||||
func Transverse(img image.Image) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dstW := src.h
|
||||
dstH := src.w
|
||||
rowSize := dstW * 4
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
|
||||
parallel(0, dstH, func(ys <-chan int) {
|
||||
for dstY := range ys {
|
||||
i := dstY * dst.Stride
|
||||
srcX := dstH - dstY - 1
|
||||
src.scan(srcX, 0, srcX+1, src.h, dst.Pix[i:i+rowSize])
|
||||
reverse(dst.Pix[i : i+rowSize])
|
||||
}
|
||||
})
|
||||
return dst
|
||||
}
|
||||
|
||||
// Rotate90 rotates the image 90 degrees counter-clockwise and returns the transformed image.
|
||||
func Rotate90(img image.Image) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dstW := src.h
|
||||
dstH := src.w
|
||||
rowSize := dstW * 4
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
|
||||
parallel(0, dstH, func(ys <-chan int) {
|
||||
for dstY := range ys {
|
||||
i := dstY * dst.Stride
|
||||
srcX := dstH - dstY - 1
|
||||
src.scan(srcX, 0, srcX+1, src.h, dst.Pix[i:i+rowSize])
|
||||
}
|
||||
})
|
||||
return dst
|
||||
}
|
||||
|
||||
// Rotate180 rotates the image 180 degrees counter-clockwise and returns the transformed image.
|
||||
func Rotate180(img image.Image) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dstW := src.w
|
||||
dstH := src.h
|
||||
rowSize := dstW * 4
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
|
||||
parallel(0, dstH, func(ys <-chan int) {
|
||||
for dstY := range ys {
|
||||
i := dstY * dst.Stride
|
||||
srcY := dstH - dstY - 1
|
||||
src.scan(0, srcY, src.w, srcY+1, dst.Pix[i:i+rowSize])
|
||||
reverse(dst.Pix[i : i+rowSize])
|
||||
}
|
||||
})
|
||||
return dst
|
||||
}
|
||||
|
||||
// Rotate270 rotates the image 270 degrees counter-clockwise and returns the transformed image.
|
||||
func Rotate270(img image.Image) *image.NRGBA {
|
||||
src := newScanner(img)
|
||||
dstW := src.h
|
||||
dstH := src.w
|
||||
rowSize := dstW * 4
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
|
||||
parallel(0, dstH, func(ys <-chan int) {
|
||||
for dstY := range ys {
|
||||
i := dstY * dst.Stride
|
||||
srcX := dstY
|
||||
src.scan(srcX, 0, srcX+1, src.h, dst.Pix[i:i+rowSize])
|
||||
reverse(dst.Pix[i : i+rowSize])
|
||||
}
|
||||
})
|
||||
return dst
|
||||
}
|
||||
|
||||
// Rotate rotates an image by the given angle counter-clockwise .
|
||||
// The angle parameter is the rotation angle in degrees.
|
||||
// The bgColor parameter specifies the color of the uncovered zone after the rotation.
|
||||
func Rotate(img image.Image, angle float64, bgColor color.Color) *image.NRGBA {
|
||||
angle = angle - math.Floor(angle/360)*360
|
||||
|
||||
switch angle {
|
||||
case 0:
|
||||
return Clone(img)
|
||||
case 90:
|
||||
return Rotate90(img)
|
||||
case 180:
|
||||
return Rotate180(img)
|
||||
case 270:
|
||||
return Rotate270(img)
|
||||
}
|
||||
|
||||
src := toNRGBA(img)
|
||||
srcW := src.Bounds().Max.X
|
||||
srcH := src.Bounds().Max.Y
|
||||
dstW, dstH := rotatedSize(srcW, srcH, angle)
|
||||
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
|
||||
|
||||
if dstW <= 0 || dstH <= 0 {
|
||||
return dst
|
||||
}
|
||||
|
||||
srcXOff := float64(srcW)/2 - 0.5
|
||||
srcYOff := float64(srcH)/2 - 0.5
|
||||
dstXOff := float64(dstW)/2 - 0.5
|
||||
dstYOff := float64(dstH)/2 - 0.5
|
||||
|
||||
bgColorNRGBA := color.NRGBAModel.Convert(bgColor).(color.NRGBA)
|
||||
sin, cos := math.Sincos(math.Pi * angle / 180)
|
||||
|
||||
parallel(0, dstH, func(ys <-chan int) {
|
||||
for dstY := range ys {
|
||||
for dstX := 0; dstX < dstW; dstX++ {
|
||||
xf, yf := rotatePoint(float64(dstX)-dstXOff, float64(dstY)-dstYOff, sin, cos)
|
||||
xf, yf = xf+srcXOff, yf+srcYOff
|
||||
interpolatePoint(dst, dstX, dstY, src, xf, yf, bgColorNRGBA)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
func rotatePoint(x, y, sin, cos float64) (float64, float64) {
|
||||
return x*cos - y*sin, x*sin + y*cos
|
||||
}
|
||||
|
||||
func rotatedSize(w, h int, angle float64) (int, int) {
|
||||
if w <= 0 || h <= 0 {
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
sin, cos := math.Sincos(math.Pi * angle / 180)
|
||||
x1, y1 := rotatePoint(float64(w-1), 0, sin, cos)
|
||||
x2, y2 := rotatePoint(float64(w-1), float64(h-1), sin, cos)
|
||||
x3, y3 := rotatePoint(0, float64(h-1), sin, cos)
|
||||
|
||||
minx := math.Min(x1, math.Min(x2, math.Min(x3, 0)))
|
||||
maxx := math.Max(x1, math.Max(x2, math.Max(x3, 0)))
|
||||
miny := math.Min(y1, math.Min(y2, math.Min(y3, 0)))
|
||||
maxy := math.Max(y1, math.Max(y2, math.Max(y3, 0)))
|
||||
|
||||
neww := maxx - minx + 1
|
||||
if neww-math.Floor(neww) > 0.1 {
|
||||
neww++
|
||||
}
|
||||
newh := maxy - miny + 1
|
||||
if newh-math.Floor(newh) > 0.1 {
|
||||
newh++
|
||||
}
|
||||
|
||||
return int(neww), int(newh)
|
||||
}
|
||||
|
||||
func interpolatePoint(dst *image.NRGBA, dstX, dstY int, src *image.NRGBA, xf, yf float64, bgColor color.NRGBA) {
|
||||
j := dstY*dst.Stride + dstX*4
|
||||
d := dst.Pix[j : j+4 : j+4]
|
||||
|
||||
x0 := int(math.Floor(xf))
|
||||
y0 := int(math.Floor(yf))
|
||||
bounds := src.Bounds()
|
||||
if !image.Pt(x0, y0).In(image.Rect(bounds.Min.X-1, bounds.Min.Y-1, bounds.Max.X, bounds.Max.Y)) {
|
||||
d[0] = bgColor.R
|
||||
d[1] = bgColor.G
|
||||
d[2] = bgColor.B
|
||||
d[3] = bgColor.A
|
||||
return
|
||||
}
|
||||
|
||||
xq := xf - float64(x0)
|
||||
yq := yf - float64(y0)
|
||||
points := [4]image.Point{
|
||||
{x0, y0},
|
||||
{x0 + 1, y0},
|
||||
{x0, y0 + 1},
|
||||
{x0 + 1, y0 + 1},
|
||||
}
|
||||
weights := [4]float64{
|
||||
(1 - xq) * (1 - yq),
|
||||
xq * (1 - yq),
|
||||
(1 - xq) * yq,
|
||||
xq * yq,
|
||||
}
|
||||
|
||||
var r, g, b, a float64
|
||||
for i := 0; i < 4; i++ {
|
||||
p := points[i]
|
||||
w := weights[i]
|
||||
if p.In(bounds) {
|
||||
i := p.Y*src.Stride + p.X*4
|
||||
s := src.Pix[i : i+4 : i+4]
|
||||
wa := float64(s[3]) * w
|
||||
r += float64(s[0]) * wa
|
||||
g += float64(s[1]) * wa
|
||||
b += float64(s[2]) * wa
|
||||
a += wa
|
||||
} else {
|
||||
wa := float64(bgColor.A) * w
|
||||
r += float64(bgColor.R) * wa
|
||||
g += float64(bgColor.G) * wa
|
||||
b += float64(bgColor.B) * wa
|
||||
a += wa
|
||||
}
|
||||
}
|
||||
if a != 0 {
|
||||
aInv := 1 / a
|
||||
d[0] = clamp(r * aInv)
|
||||
d[1] = clamp(g * aInv)
|
||||
d[2] = clamp(b * aInv)
|
||||
d[3] = clamp(a)
|
||||
}
|
||||
}
|
||||
167
vendor/github.com/disintegration/imaging/utils.go
generated
vendored
Normal file
167
vendor/github.com/disintegration/imaging/utils.go
generated
vendored
Normal file
|
|
@ -0,0 +1,167 @@
|
|||
package imaging
|
||||
|
||||
import (
|
||||
"image"
|
||||
"math"
|
||||
"runtime"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// parallel processes the data in separate goroutines.
|
||||
func parallel(start, stop int, fn func(<-chan int)) {
|
||||
count := stop - start
|
||||
if count < 1 {
|
||||
return
|
||||
}
|
||||
|
||||
procs := runtime.GOMAXPROCS(0)
|
||||
if procs > count {
|
||||
procs = count
|
||||
}
|
||||
|
||||
c := make(chan int, count)
|
||||
for i := start; i < stop; i++ {
|
||||
c <- i
|
||||
}
|
||||
close(c)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < procs; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
fn(c)
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// absint returns the absolute value of i.
|
||||
func absint(i int) int {
|
||||
if i < 0 {
|
||||
return -i
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// clamp rounds and clamps float64 value to fit into uint8.
|
||||
func clamp(x float64) uint8 {
|
||||
v := int64(x + 0.5)
|
||||
if v > 255 {
|
||||
return 255
|
||||
}
|
||||
if v > 0 {
|
||||
return uint8(v)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func reverse(pix []uint8) {
|
||||
if len(pix) <= 4 {
|
||||
return
|
||||
}
|
||||
i := 0
|
||||
j := len(pix) - 4
|
||||
for i < j {
|
||||
pi := pix[i : i+4 : i+4]
|
||||
pj := pix[j : j+4 : j+4]
|
||||
pi[0], pj[0] = pj[0], pi[0]
|
||||
pi[1], pj[1] = pj[1], pi[1]
|
||||
pi[2], pj[2] = pj[2], pi[2]
|
||||
pi[3], pj[3] = pj[3], pi[3]
|
||||
i += 4
|
||||
j -= 4
|
||||
}
|
||||
}
|
||||
|
||||
func toNRGBA(img image.Image) *image.NRGBA {
|
||||
if img, ok := img.(*image.NRGBA); ok {
|
||||
return &image.NRGBA{
|
||||
Pix: img.Pix,
|
||||
Stride: img.Stride,
|
||||
Rect: img.Rect.Sub(img.Rect.Min),
|
||||
}
|
||||
}
|
||||
return Clone(img)
|
||||
}
|
||||
|
||||
// rgbToHSL converts a color from RGB to HSL.
|
||||
func rgbToHSL(r, g, b uint8) (float64, float64, float64) {
|
||||
rr := float64(r) / 255
|
||||
gg := float64(g) / 255
|
||||
bb := float64(b) / 255
|
||||
|
||||
max := math.Max(rr, math.Max(gg, bb))
|
||||
min := math.Min(rr, math.Min(gg, bb))
|
||||
|
||||
l := (max + min) / 2
|
||||
|
||||
if max == min {
|
||||
return 0, 0, l
|
||||
}
|
||||
|
||||
var h, s float64
|
||||
d := max - min
|
||||
if l > 0.5 {
|
||||
s = d / (2 - max - min)
|
||||
} else {
|
||||
s = d / (max + min)
|
||||
}
|
||||
|
||||
switch max {
|
||||
case rr:
|
||||
h = (gg - bb) / d
|
||||
if g < b {
|
||||
h += 6
|
||||
}
|
||||
case gg:
|
||||
h = (bb-rr)/d + 2
|
||||
case bb:
|
||||
h = (rr-gg)/d + 4
|
||||
}
|
||||
h /= 6
|
||||
|
||||
return h, s, l
|
||||
}
|
||||
|
||||
// hslToRGB converts a color from HSL to RGB.
|
||||
func hslToRGB(h, s, l float64) (uint8, uint8, uint8) {
|
||||
var r, g, b float64
|
||||
if s == 0 {
|
||||
v := clamp(l * 255)
|
||||
return v, v, v
|
||||
}
|
||||
|
||||
var q float64
|
||||
if l < 0.5 {
|
||||
q = l * (1 + s)
|
||||
} else {
|
||||
q = l + s - l*s
|
||||
}
|
||||
p := 2*l - q
|
||||
|
||||
r = hueToRGB(p, q, h+1/3.0)
|
||||
g = hueToRGB(p, q, h)
|
||||
b = hueToRGB(p, q, h-1/3.0)
|
||||
|
||||
return clamp(r * 255), clamp(g * 255), clamp(b * 255)
|
||||
}
|
||||
|
||||
func hueToRGB(p, q, t float64) float64 {
|
||||
if t < 0 {
|
||||
t++
|
||||
}
|
||||
if t > 1 {
|
||||
t--
|
||||
}
|
||||
if t < 1/6.0 {
|
||||
return p + (q-p)*6*t
|
||||
}
|
||||
if t < 1/2.0 {
|
||||
return q
|
||||
}
|
||||
if t < 2/3.0 {
|
||||
return p + (q-p)*(2/3.0-t)*6
|
||||
}
|
||||
return p
|
||||
}
|
||||
41
vendor/github.com/google/uuid/CHANGELOG.md
generated
vendored
Normal file
41
vendor/github.com/google/uuid/CHANGELOG.md
generated
vendored
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
# Changelog
|
||||
|
||||
## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06))
|
||||
* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6))
|
||||
|
||||
## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29))
|
||||
|
||||
## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4))
|
||||
|
||||
### Fixes
|
||||
|
||||
* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior)
|
||||
|
||||
## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0))
|
||||
|
||||
## Changelog
|
||||
26
vendor/github.com/google/uuid/CONTRIBUTING.md
generated
vendored
Normal file
26
vendor/github.com/google/uuid/CONTRIBUTING.md
generated
vendored
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
# How to contribute
|
||||
|
||||
We definitely welcome patches and contribution to this project!
|
||||
|
||||
### Tips
|
||||
|
||||
Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org).
|
||||
|
||||
Always try to include a test case! If it is not possible or not necessary,
|
||||
please explain why in the pull request description.
|
||||
|
||||
### Releasing
|
||||
|
||||
Commits that would precipitate a SemVer change, as described in the Conventional
|
||||
Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action)
|
||||
to create a release candidate pull request. Once submitted, `release-please`
|
||||
will create a release.
|
||||
|
||||
For tips on how to work with `release-please`, see its documentation.
|
||||
|
||||
### Legal requirements
|
||||
|
||||
In order to protect both you and ourselves, you will need to sign the
|
||||
[Contributor License Agreement](https://cla.developers.google.com/clas).
|
||||
|
||||
You may have already signed it for other Google projects.
|
||||
9
vendor/github.com/google/uuid/CONTRIBUTORS
generated
vendored
Normal file
9
vendor/github.com/google/uuid/CONTRIBUTORS
generated
vendored
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
Paul Borman <borman@google.com>
|
||||
bmatsuo
|
||||
shawnps
|
||||
theory
|
||||
jboverfelt
|
||||
dsymonds
|
||||
cd1
|
||||
wallclockbuilder
|
||||
dansouza
|
||||
27
vendor/github.com/google/uuid/LICENSE
generated
vendored
Normal file
27
vendor/github.com/google/uuid/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2009,2014 Google Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
21
vendor/github.com/google/uuid/README.md
generated
vendored
Normal file
21
vendor/github.com/google/uuid/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
# uuid
|
||||
The uuid package generates and inspects UUIDs based on
|
||||
[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122)
|
||||
and DCE 1.1: Authentication and Security Services.
|
||||
|
||||
This package is based on the github.com/pborman/uuid package (previously named
|
||||
code.google.com/p/go-uuid). It differs from these earlier packages in that
|
||||
a UUID is a 16 byte array rather than a byte slice. One loss due to this
|
||||
change is the ability to represent an invalid UUID (vs a NIL UUID).
|
||||
|
||||
###### Install
|
||||
```sh
|
||||
go get github.com/google/uuid
|
||||
```
|
||||
|
||||
###### Documentation
|
||||
[](https://pkg.go.dev/github.com/google/uuid)
|
||||
|
||||
Full `go doc` style documentation for the package can be viewed online without
|
||||
installing this package by using the GoDoc site here:
|
||||
http://pkg.go.dev/github.com/google/uuid
|
||||
80
vendor/github.com/google/uuid/dce.go
generated
vendored
Normal file
80
vendor/github.com/google/uuid/dce.go
generated
vendored
Normal file
|
|
@ -0,0 +1,80 @@
|
|||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
// A Domain represents a Version 2 domain
|
||||
type Domain byte
|
||||
|
||||
// Domain constants for DCE Security (Version 2) UUIDs.
|
||||
const (
|
||||
Person = Domain(0)
|
||||
Group = Domain(1)
|
||||
Org = Domain(2)
|
||||
)
|
||||
|
||||
// NewDCESecurity returns a DCE Security (Version 2) UUID.
|
||||
//
|
||||
// The domain should be one of Person, Group or Org.
|
||||
// On a POSIX system the id should be the users UID for the Person
|
||||
// domain and the users GID for the Group. The meaning of id for
|
||||
// the domain Org or on non-POSIX systems is site defined.
|
||||
//
|
||||
// For a given domain/id pair the same token may be returned for up to
|
||||
// 7 minutes and 10 seconds.
|
||||
func NewDCESecurity(domain Domain, id uint32) (UUID, error) {
|
||||
uuid, err := NewUUID()
|
||||
if err == nil {
|
||||
uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
|
||||
uuid[9] = byte(domain)
|
||||
binary.BigEndian.PutUint32(uuid[0:], id)
|
||||
}
|
||||
return uuid, err
|
||||
}
|
||||
|
||||
// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
|
||||
// domain with the id returned by os.Getuid.
|
||||
//
|
||||
// NewDCESecurity(Person, uint32(os.Getuid()))
|
||||
func NewDCEPerson() (UUID, error) {
|
||||
return NewDCESecurity(Person, uint32(os.Getuid()))
|
||||
}
|
||||
|
||||
// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
|
||||
// domain with the id returned by os.Getgid.
|
||||
//
|
||||
// NewDCESecurity(Group, uint32(os.Getgid()))
|
||||
func NewDCEGroup() (UUID, error) {
|
||||
return NewDCESecurity(Group, uint32(os.Getgid()))
|
||||
}
|
||||
|
||||
// Domain returns the domain for a Version 2 UUID. Domains are only defined
|
||||
// for Version 2 UUIDs.
|
||||
func (uuid UUID) Domain() Domain {
|
||||
return Domain(uuid[9])
|
||||
}
|
||||
|
||||
// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2
|
||||
// UUIDs.
|
||||
func (uuid UUID) ID() uint32 {
|
||||
return binary.BigEndian.Uint32(uuid[0:4])
|
||||
}
|
||||
|
||||
func (d Domain) String() string {
|
||||
switch d {
|
||||
case Person:
|
||||
return "Person"
|
||||
case Group:
|
||||
return "Group"
|
||||
case Org:
|
||||
return "Org"
|
||||
}
|
||||
return fmt.Sprintf("Domain%d", int(d))
|
||||
}
|
||||
12
vendor/github.com/google/uuid/doc.go
generated
vendored
Normal file
12
vendor/github.com/google/uuid/doc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package uuid generates and inspects UUIDs.
|
||||
//
|
||||
// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security
|
||||
// Services.
|
||||
//
|
||||
// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to
|
||||
// maps or compared directly.
|
||||
package uuid
|
||||
59
vendor/github.com/google/uuid/hash.go
generated
vendored
Normal file
59
vendor/github.com/google/uuid/hash.go
generated
vendored
Normal file
|
|
@ -0,0 +1,59 @@
|
|||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"hash"
|
||||
)
|
||||
|
||||
// Well known namespace IDs and UUIDs
|
||||
var (
|
||||
NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
|
||||
NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
|
||||
NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
|
||||
NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
|
||||
Nil UUID // empty UUID, all zeros
|
||||
|
||||
// The Max UUID is special form of UUID that is specified to have all 128 bits set to 1.
|
||||
Max = UUID{
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
}
|
||||
)
|
||||
|
||||
// NewHash returns a new UUID derived from the hash of space concatenated with
|
||||
// data generated by h. The hash should be at least 16 byte in length. The
|
||||
// first 16 bytes of the hash are used to form the UUID. The version of the
|
||||
// UUID will be the lower 4 bits of version. NewHash is used to implement
|
||||
// NewMD5 and NewSHA1.
|
||||
func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
|
||||
h.Reset()
|
||||
h.Write(space[:]) //nolint:errcheck
|
||||
h.Write(data) //nolint:errcheck
|
||||
s := h.Sum(nil)
|
||||
var uuid UUID
|
||||
copy(uuid[:], s)
|
||||
uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
|
||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
|
||||
return uuid
|
||||
}
|
||||
|
||||
// NewMD5 returns a new MD5 (Version 3) UUID based on the
|
||||
// supplied name space and data. It is the same as calling:
|
||||
//
|
||||
// NewHash(md5.New(), space, data, 3)
|
||||
func NewMD5(space UUID, data []byte) UUID {
|
||||
return NewHash(md5.New(), space, data, 3)
|
||||
}
|
||||
|
||||
// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
|
||||
// supplied name space and data. It is the same as calling:
|
||||
//
|
||||
// NewHash(sha1.New(), space, data, 5)
|
||||
func NewSHA1(space UUID, data []byte) UUID {
|
||||
return NewHash(sha1.New(), space, data, 5)
|
||||
}
|
||||
38
vendor/github.com/google/uuid/marshal.go
generated
vendored
Normal file
38
vendor/github.com/google/uuid/marshal.go
generated
vendored
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import "fmt"
|
||||
|
||||
// MarshalText implements encoding.TextMarshaler.
|
||||
func (uuid UUID) MarshalText() ([]byte, error) {
|
||||
var js [36]byte
|
||||
encodeHex(js[:], uuid)
|
||||
return js[:], nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||
func (uuid *UUID) UnmarshalText(data []byte) error {
|
||||
id, err := ParseBytes(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*uuid = id
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary implements encoding.BinaryMarshaler.
|
||||
func (uuid UUID) MarshalBinary() ([]byte, error) {
|
||||
return uuid[:], nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
|
||||
func (uuid *UUID) UnmarshalBinary(data []byte) error {
|
||||
if len(data) != 16 {
|
||||
return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
|
||||
}
|
||||
copy(uuid[:], data)
|
||||
return nil
|
||||
}
|
||||
90
vendor/github.com/google/uuid/node.go
generated
vendored
Normal file
90
vendor/github.com/google/uuid/node.go
generated
vendored
Normal file
|
|
@ -0,0 +1,90 @@
|
|||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
nodeMu sync.Mutex
|
||||
ifname string // name of interface being used
|
||||
nodeID [6]byte // hardware for version 1 UUIDs
|
||||
zeroID [6]byte // nodeID with only 0's
|
||||
)
|
||||
|
||||
// NodeInterface returns the name of the interface from which the NodeID was
|
||||
// derived. The interface "user" is returned if the NodeID was set by
|
||||
// SetNodeID.
|
||||
func NodeInterface() string {
|
||||
defer nodeMu.Unlock()
|
||||
nodeMu.Lock()
|
||||
return ifname
|
||||
}
|
||||
|
||||
// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
|
||||
// If name is "" then the first usable interface found will be used or a random
|
||||
// Node ID will be generated. If a named interface cannot be found then false
|
||||
// is returned.
|
||||
//
|
||||
// SetNodeInterface never fails when name is "".
|
||||
func SetNodeInterface(name string) bool {
|
||||
defer nodeMu.Unlock()
|
||||
nodeMu.Lock()
|
||||
return setNodeInterface(name)
|
||||
}
|
||||
|
||||
func setNodeInterface(name string) bool {
|
||||
iname, addr := getHardwareInterface(name) // null implementation for js
|
||||
if iname != "" && addr != nil {
|
||||
ifname = iname
|
||||
copy(nodeID[:], addr)
|
||||
return true
|
||||
}
|
||||
|
||||
// We found no interfaces with a valid hardware address. If name
|
||||
// does not specify a specific interface generate a random Node ID
|
||||
// (section 4.1.6)
|
||||
if name == "" {
|
||||
ifname = "random"
|
||||
randomBits(nodeID[:])
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
|
||||
// if not already set.
|
||||
func NodeID() []byte {
|
||||
defer nodeMu.Unlock()
|
||||
nodeMu.Lock()
|
||||
if nodeID == zeroID {
|
||||
setNodeInterface("")
|
||||
}
|
||||
nid := nodeID
|
||||
return nid[:]
|
||||
}
|
||||
|
||||
// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes
|
||||
// of id are used. If id is less than 6 bytes then false is returned and the
|
||||
// Node ID is not set.
|
||||
func SetNodeID(id []byte) bool {
|
||||
if len(id) < 6 {
|
||||
return false
|
||||
}
|
||||
defer nodeMu.Unlock()
|
||||
nodeMu.Lock()
|
||||
copy(nodeID[:], id)
|
||||
ifname = "user"
|
||||
return true
|
||||
}
|
||||
|
||||
// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
|
||||
// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
|
||||
func (uuid UUID) NodeID() []byte {
|
||||
var node [6]byte
|
||||
copy(node[:], uuid[10:])
|
||||
return node[:]
|
||||
}
|
||||
12
vendor/github.com/google/uuid/node_js.go
generated
vendored
Normal file
12
vendor/github.com/google/uuid/node_js.go
generated
vendored
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
// Copyright 2017 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build js
|
||||
|
||||
package uuid
|
||||
|
||||
// getHardwareInterface returns nil values for the JS version of the code.
|
||||
// This removes the "net" dependency, because it is not used in the browser.
|
||||
// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
|
||||
func getHardwareInterface(name string) (string, []byte) { return "", nil }
|
||||
33
vendor/github.com/google/uuid/node_net.go
generated
vendored
Normal file
33
vendor/github.com/google/uuid/node_net.go
generated
vendored
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
// Copyright 2017 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !js
|
||||
|
||||
package uuid
|
||||
|
||||
import "net"
|
||||
|
||||
var interfaces []net.Interface // cached list of interfaces
|
||||
|
||||
// getHardwareInterface returns the name and hardware address of interface name.
|
||||
// If name is "" then the name and hardware address of one of the system's
|
||||
// interfaces is returned. If no interfaces are found (name does not exist or
|
||||
// there are no interfaces) then "", nil is returned.
|
||||
//
|
||||
// Only addresses of at least 6 bytes are returned.
|
||||
func getHardwareInterface(name string) (string, []byte) {
|
||||
if interfaces == nil {
|
||||
var err error
|
||||
interfaces, err = net.Interfaces()
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
}
|
||||
for _, ifs := range interfaces {
|
||||
if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
|
||||
return ifs.Name, ifs.HardwareAddr
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
118
vendor/github.com/google/uuid/null.go
generated
vendored
Normal file
118
vendor/github.com/google/uuid/null.go
generated
vendored
Normal file
|
|
@ -0,0 +1,118 @@
|
|||
// Copyright 2021 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var jsonNull = []byte("null")
|
||||
|
||||
// NullUUID represents a UUID that may be null.
|
||||
// NullUUID implements the SQL driver.Scanner interface so
|
||||
// it can be used as a scan destination:
|
||||
//
|
||||
// var u uuid.NullUUID
|
||||
// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u)
|
||||
// ...
|
||||
// if u.Valid {
|
||||
// // use u.UUID
|
||||
// } else {
|
||||
// // NULL value
|
||||
// }
|
||||
//
|
||||
type NullUUID struct {
|
||||
UUID UUID
|
||||
Valid bool // Valid is true if UUID is not NULL
|
||||
}
|
||||
|
||||
// Scan implements the SQL driver.Scanner interface.
|
||||
func (nu *NullUUID) Scan(value interface{}) error {
|
||||
if value == nil {
|
||||
nu.UUID, nu.Valid = Nil, false
|
||||
return nil
|
||||
}
|
||||
|
||||
err := nu.UUID.Scan(value)
|
||||
if err != nil {
|
||||
nu.Valid = false
|
||||
return err
|
||||
}
|
||||
|
||||
nu.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements the driver Valuer interface.
|
||||
func (nu NullUUID) Value() (driver.Value, error) {
|
||||
if !nu.Valid {
|
||||
return nil, nil
|
||||
}
|
||||
// Delegate to UUID Value function
|
||||
return nu.UUID.Value()
|
||||
}
|
||||
|
||||
// MarshalBinary implements encoding.BinaryMarshaler.
|
||||
func (nu NullUUID) MarshalBinary() ([]byte, error) {
|
||||
if nu.Valid {
|
||||
return nu.UUID[:], nil
|
||||
}
|
||||
|
||||
return []byte(nil), nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
|
||||
func (nu *NullUUID) UnmarshalBinary(data []byte) error {
|
||||
if len(data) != 16 {
|
||||
return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
|
||||
}
|
||||
copy(nu.UUID[:], data)
|
||||
nu.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalText implements encoding.TextMarshaler.
|
||||
func (nu NullUUID) MarshalText() ([]byte, error) {
|
||||
if nu.Valid {
|
||||
return nu.UUID.MarshalText()
|
||||
}
|
||||
|
||||
return jsonNull, nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||
func (nu *NullUUID) UnmarshalText(data []byte) error {
|
||||
id, err := ParseBytes(data)
|
||||
if err != nil {
|
||||
nu.Valid = false
|
||||
return err
|
||||
}
|
||||
nu.UUID = id
|
||||
nu.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (nu NullUUID) MarshalJSON() ([]byte, error) {
|
||||
if nu.Valid {
|
||||
return json.Marshal(nu.UUID)
|
||||
}
|
||||
|
||||
return jsonNull, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (nu *NullUUID) UnmarshalJSON(data []byte) error {
|
||||
if bytes.Equal(data, jsonNull) {
|
||||
*nu = NullUUID{}
|
||||
return nil // valid null UUID
|
||||
}
|
||||
err := json.Unmarshal(data, &nu.UUID)
|
||||
nu.Valid = err == nil
|
||||
return err
|
||||
}
|
||||
59
vendor/github.com/google/uuid/sql.go
generated
vendored
Normal file
59
vendor/github.com/google/uuid/sql.go
generated
vendored
Normal file
|
|
@ -0,0 +1,59 @@
|
|||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Scan implements sql.Scanner so UUIDs can be read from databases transparently.
|
||||
// Currently, database types that map to string and []byte are supported. Please
|
||||
// consult database-specific driver documentation for matching types.
|
||||
func (uuid *UUID) Scan(src interface{}) error {
|
||||
switch src := src.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
|
||||
case string:
|
||||
// if an empty UUID comes from a table, we return a null UUID
|
||||
if src == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// see Parse for required string format
|
||||
u, err := Parse(src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Scan: %v", err)
|
||||
}
|
||||
|
||||
*uuid = u
|
||||
|
||||
case []byte:
|
||||
// if an empty UUID comes from a table, we return a null UUID
|
||||
if len(src) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// assumes a simple slice of bytes if 16 bytes
|
||||
// otherwise attempts to parse
|
||||
if len(src) != 16 {
|
||||
return uuid.Scan(string(src))
|
||||
}
|
||||
copy((*uuid)[:], src)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements sql.Valuer so that UUIDs can be written to databases
|
||||
// transparently. Currently, UUIDs map to strings. Please consult
|
||||
// database-specific driver documentation for matching types.
|
||||
func (uuid UUID) Value() (driver.Value, error) {
|
||||
return uuid.String(), nil
|
||||
}
|
||||
134
vendor/github.com/google/uuid/time.go
generated
vendored
Normal file
134
vendor/github.com/google/uuid/time.go
generated
vendored
Normal file
|
|
@ -0,0 +1,134 @@
|
|||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
|
||||
// 1582.
|
||||
type Time int64
|
||||
|
||||
const (
|
||||
lillian = 2299160 // Julian day of 15 Oct 1582
|
||||
unix = 2440587 // Julian day of 1 Jan 1970
|
||||
epoch = unix - lillian // Days between epochs
|
||||
g1582 = epoch * 86400 // seconds between epochs
|
||||
g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
|
||||
)
|
||||
|
||||
var (
|
||||
timeMu sync.Mutex
|
||||
lasttime uint64 // last time we returned
|
||||
clockSeq uint16 // clock sequence for this run
|
||||
|
||||
timeNow = time.Now // for testing
|
||||
)
|
||||
|
||||
// UnixTime converts t the number of seconds and nanoseconds using the Unix
|
||||
// epoch of 1 Jan 1970.
|
||||
func (t Time) UnixTime() (sec, nsec int64) {
|
||||
sec = int64(t - g1582ns100)
|
||||
nsec = (sec % 10000000) * 100
|
||||
sec /= 10000000
|
||||
return sec, nsec
|
||||
}
|
||||
|
||||
// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
|
||||
// clock sequence as well as adjusting the clock sequence as needed. An error
|
||||
// is returned if the current time cannot be determined.
|
||||
func GetTime() (Time, uint16, error) {
|
||||
defer timeMu.Unlock()
|
||||
timeMu.Lock()
|
||||
return getTime()
|
||||
}
|
||||
|
||||
func getTime() (Time, uint16, error) {
|
||||
t := timeNow()
|
||||
|
||||
// If we don't have a clock sequence already, set one.
|
||||
if clockSeq == 0 {
|
||||
setClockSequence(-1)
|
||||
}
|
||||
now := uint64(t.UnixNano()/100) + g1582ns100
|
||||
|
||||
// If time has gone backwards with this clock sequence then we
|
||||
// increment the clock sequence
|
||||
if now <= lasttime {
|
||||
clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000
|
||||
}
|
||||
lasttime = now
|
||||
return Time(now), clockSeq, nil
|
||||
}
|
||||
|
||||
// ClockSequence returns the current clock sequence, generating one if not
|
||||
// already set. The clock sequence is only used for Version 1 UUIDs.
|
||||
//
|
||||
// The uuid package does not use global static storage for the clock sequence or
|
||||
// the last time a UUID was generated. Unless SetClockSequence is used, a new
|
||||
// random clock sequence is generated the first time a clock sequence is
|
||||
// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1)
|
||||
func ClockSequence() int {
|
||||
defer timeMu.Unlock()
|
||||
timeMu.Lock()
|
||||
return clockSequence()
|
||||
}
|
||||
|
||||
func clockSequence() int {
|
||||
if clockSeq == 0 {
|
||||
setClockSequence(-1)
|
||||
}
|
||||
return int(clockSeq & 0x3fff)
|
||||
}
|
||||
|
||||
// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to
|
||||
// -1 causes a new sequence to be generated.
|
||||
func SetClockSequence(seq int) {
|
||||
defer timeMu.Unlock()
|
||||
timeMu.Lock()
|
||||
setClockSequence(seq)
|
||||
}
|
||||
|
||||
func setClockSequence(seq int) {
|
||||
if seq == -1 {
|
||||
var b [2]byte
|
||||
randomBits(b[:]) // clock sequence
|
||||
seq = int(b[0])<<8 | int(b[1])
|
||||
}
|
||||
oldSeq := clockSeq
|
||||
clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant
|
||||
if oldSeq != clockSeq {
|
||||
lasttime = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
|
||||
// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs.
|
||||
func (uuid UUID) Time() Time {
|
||||
var t Time
|
||||
switch uuid.Version() {
|
||||
case 6:
|
||||
time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110
|
||||
t = Time(time)
|
||||
case 7:
|
||||
time := binary.BigEndian.Uint64(uuid[:8])
|
||||
t = Time((time>>16)*10000 + g1582ns100)
|
||||
default: // forward compatible
|
||||
time := int64(binary.BigEndian.Uint32(uuid[0:4]))
|
||||
time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
|
||||
time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
|
||||
t = Time(time)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// ClockSequence returns the clock sequence encoded in uuid.
|
||||
// The clock sequence is only well defined for version 1 and 2 UUIDs.
|
||||
func (uuid UUID) ClockSequence() int {
|
||||
return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff
|
||||
}
|
||||
43
vendor/github.com/google/uuid/util.go
generated
vendored
Normal file
43
vendor/github.com/google/uuid/util.go
generated
vendored
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// randomBits completely fills slice b with random data.
|
||||
func randomBits(b []byte) {
|
||||
if _, err := io.ReadFull(rander, b); err != nil {
|
||||
panic(err.Error()) // rand should never fail
|
||||
}
|
||||
}
|
||||
|
||||
// xvalues returns the value of a byte as a hexadecimal digit or 255.
|
||||
var xvalues = [256]byte{
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
|
||||
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
}
|
||||
|
||||
// xtob converts hex characters x1 and x2 into a byte.
|
||||
func xtob(x1, x2 byte) (byte, bool) {
|
||||
b1 := xvalues[x1]
|
||||
b2 := xvalues[x2]
|
||||
return (b1 << 4) | b2, b1 != 255 && b2 != 255
|
||||
}
|
||||
365
vendor/github.com/google/uuid/uuid.go
generated
vendored
Normal file
365
vendor/github.com/google/uuid/uuid.go
generated
vendored
Normal file
|
|
@ -0,0 +1,365 @@
|
|||
// Copyright 2018 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
|
||||
// 4122.
|
||||
type UUID [16]byte
|
||||
|
||||
// A Version represents a UUID's version.
|
||||
type Version byte
|
||||
|
||||
// A Variant represents a UUID's variant.
|
||||
type Variant byte
|
||||
|
||||
// Constants returned by Variant.
|
||||
const (
|
||||
Invalid = Variant(iota) // Invalid UUID
|
||||
RFC4122 // The variant specified in RFC4122
|
||||
Reserved // Reserved, NCS backward compatibility.
|
||||
Microsoft // Reserved, Microsoft Corporation backward compatibility.
|
||||
Future // Reserved for future definition.
|
||||
)
|
||||
|
||||
const randPoolSize = 16 * 16
|
||||
|
||||
var (
|
||||
rander = rand.Reader // random function
|
||||
poolEnabled = false
|
||||
poolMu sync.Mutex
|
||||
poolPos = randPoolSize // protected with poolMu
|
||||
pool [randPoolSize]byte // protected with poolMu
|
||||
)
|
||||
|
||||
type invalidLengthError struct{ len int }
|
||||
|
||||
func (err invalidLengthError) Error() string {
|
||||
return fmt.Sprintf("invalid UUID length: %d", err.len)
|
||||
}
|
||||
|
||||
// IsInvalidLengthError is matcher function for custom error invalidLengthError
|
||||
func IsInvalidLengthError(err error) bool {
|
||||
_, ok := err.(invalidLengthError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both
|
||||
// the standard UUID forms defined in RFC 4122
|
||||
// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
|
||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition,
|
||||
// Parse accepts non-standard strings such as the raw hex encoding
|
||||
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings,
|
||||
// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are
|
||||
// examined in the latter case. Parse should not be used to validate strings as
|
||||
// it parses non-standard encodings as indicated above.
|
||||
func Parse(s string) (UUID, error) {
|
||||
var uuid UUID
|
||||
switch len(s) {
|
||||
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
case 36:
|
||||
|
||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
case 36 + 9:
|
||||
if !strings.EqualFold(s[:9], "urn:uuid:") {
|
||||
return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
|
||||
}
|
||||
s = s[9:]
|
||||
|
||||
// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
|
||||
case 36 + 2:
|
||||
s = s[1:]
|
||||
|
||||
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
case 32:
|
||||
var ok bool
|
||||
for i := range uuid {
|
||||
uuid[i], ok = xtob(s[i*2], s[i*2+1])
|
||||
if !ok {
|
||||
return uuid, errors.New("invalid UUID format")
|
||||
}
|
||||
}
|
||||
return uuid, nil
|
||||
default:
|
||||
return uuid, invalidLengthError{len(s)}
|
||||
}
|
||||
// s is now at least 36 bytes long
|
||||
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
|
||||
return uuid, errors.New("invalid UUID format")
|
||||
}
|
||||
for i, x := range [16]int{
|
||||
0, 2, 4, 6,
|
||||
9, 11,
|
||||
14, 16,
|
||||
19, 21,
|
||||
24, 26, 28, 30, 32, 34,
|
||||
} {
|
||||
v, ok := xtob(s[x], s[x+1])
|
||||
if !ok {
|
||||
return uuid, errors.New("invalid UUID format")
|
||||
}
|
||||
uuid[i] = v
|
||||
}
|
||||
return uuid, nil
|
||||
}
|
||||
|
||||
// ParseBytes is like Parse, except it parses a byte slice instead of a string.
|
||||
func ParseBytes(b []byte) (UUID, error) {
|
||||
var uuid UUID
|
||||
switch len(b) {
|
||||
case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) {
|
||||
return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
|
||||
}
|
||||
b = b[9:]
|
||||
case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
|
||||
b = b[1:]
|
||||
case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
var ok bool
|
||||
for i := 0; i < 32; i += 2 {
|
||||
uuid[i/2], ok = xtob(b[i], b[i+1])
|
||||
if !ok {
|
||||
return uuid, errors.New("invalid UUID format")
|
||||
}
|
||||
}
|
||||
return uuid, nil
|
||||
default:
|
||||
return uuid, invalidLengthError{len(b)}
|
||||
}
|
||||
// s is now at least 36 bytes long
|
||||
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' {
|
||||
return uuid, errors.New("invalid UUID format")
|
||||
}
|
||||
for i, x := range [16]int{
|
||||
0, 2, 4, 6,
|
||||
9, 11,
|
||||
14, 16,
|
||||
19, 21,
|
||||
24, 26, 28, 30, 32, 34,
|
||||
} {
|
||||
v, ok := xtob(b[x], b[x+1])
|
||||
if !ok {
|
||||
return uuid, errors.New("invalid UUID format")
|
||||
}
|
||||
uuid[i] = v
|
||||
}
|
||||
return uuid, nil
|
||||
}
|
||||
|
||||
// MustParse is like Parse but panics if the string cannot be parsed.
|
||||
// It simplifies safe initialization of global variables holding compiled UUIDs.
|
||||
func MustParse(s string) UUID {
|
||||
uuid, err := Parse(s)
|
||||
if err != nil {
|
||||
panic(`uuid: Parse(` + s + `): ` + err.Error())
|
||||
}
|
||||
return uuid
|
||||
}
|
||||
|
||||
// FromBytes creates a new UUID from a byte slice. Returns an error if the slice
|
||||
// does not have a length of 16. The bytes are copied from the slice.
|
||||
func FromBytes(b []byte) (uuid UUID, err error) {
|
||||
err = uuid.UnmarshalBinary(b)
|
||||
return uuid, err
|
||||
}
|
||||
|
||||
// Must returns uuid if err is nil and panics otherwise.
|
||||
func Must(uuid UUID, err error) UUID {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return uuid
|
||||
}
|
||||
|
||||
// Validate returns an error if s is not a properly formatted UUID in one of the following formats:
|
||||
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
|
||||
// It returns an error if the format is invalid, otherwise nil.
|
||||
func Validate(s string) error {
|
||||
switch len(s) {
|
||||
// Standard UUID format
|
||||
case 36:
|
||||
|
||||
// UUID with "urn:uuid:" prefix
|
||||
case 36 + 9:
|
||||
if !strings.EqualFold(s[:9], "urn:uuid:") {
|
||||
return fmt.Errorf("invalid urn prefix: %q", s[:9])
|
||||
}
|
||||
s = s[9:]
|
||||
|
||||
// UUID enclosed in braces
|
||||
case 36 + 2:
|
||||
if s[0] != '{' || s[len(s)-1] != '}' {
|
||||
return fmt.Errorf("invalid bracketed UUID format")
|
||||
}
|
||||
s = s[1 : len(s)-1]
|
||||
|
||||
// UUID without hyphens
|
||||
case 32:
|
||||
for i := 0; i < len(s); i += 2 {
|
||||
_, ok := xtob(s[i], s[i+1])
|
||||
if !ok {
|
||||
return errors.New("invalid UUID format")
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
return invalidLengthError{len(s)}
|
||||
}
|
||||
|
||||
// Check for standard UUID format
|
||||
if len(s) == 36 {
|
||||
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
|
||||
return errors.New("invalid UUID format")
|
||||
}
|
||||
for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} {
|
||||
if _, ok := xtob(s[x], s[x+1]); !ok {
|
||||
return errors.New("invalid UUID format")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
// , or "" if uuid is invalid.
|
||||
func (uuid UUID) String() string {
|
||||
var buf [36]byte
|
||||
encodeHex(buf[:], uuid)
|
||||
return string(buf[:])
|
||||
}
|
||||
|
||||
// URN returns the RFC 2141 URN form of uuid,
|
||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid.
|
||||
func (uuid UUID) URN() string {
|
||||
var buf [36 + 9]byte
|
||||
copy(buf[:], "urn:uuid:")
|
||||
encodeHex(buf[9:], uuid)
|
||||
return string(buf[:])
|
||||
}
|
||||
|
||||
func encodeHex(dst []byte, uuid UUID) {
|
||||
hex.Encode(dst, uuid[:4])
|
||||
dst[8] = '-'
|
||||
hex.Encode(dst[9:13], uuid[4:6])
|
||||
dst[13] = '-'
|
||||
hex.Encode(dst[14:18], uuid[6:8])
|
||||
dst[18] = '-'
|
||||
hex.Encode(dst[19:23], uuid[8:10])
|
||||
dst[23] = '-'
|
||||
hex.Encode(dst[24:], uuid[10:])
|
||||
}
|
||||
|
||||
// Variant returns the variant encoded in uuid.
|
||||
func (uuid UUID) Variant() Variant {
|
||||
switch {
|
||||
case (uuid[8] & 0xc0) == 0x80:
|
||||
return RFC4122
|
||||
case (uuid[8] & 0xe0) == 0xc0:
|
||||
return Microsoft
|
||||
case (uuid[8] & 0xe0) == 0xe0:
|
||||
return Future
|
||||
default:
|
||||
return Reserved
|
||||
}
|
||||
}
|
||||
|
||||
// Version returns the version of uuid.
|
||||
func (uuid UUID) Version() Version {
|
||||
return Version(uuid[6] >> 4)
|
||||
}
|
||||
|
||||
func (v Version) String() string {
|
||||
if v > 15 {
|
||||
return fmt.Sprintf("BAD_VERSION_%d", v)
|
||||
}
|
||||
return fmt.Sprintf("VERSION_%d", v)
|
||||
}
|
||||
|
||||
func (v Variant) String() string {
|
||||
switch v {
|
||||
case RFC4122:
|
||||
return "RFC4122"
|
||||
case Reserved:
|
||||
return "Reserved"
|
||||
case Microsoft:
|
||||
return "Microsoft"
|
||||
case Future:
|
||||
return "Future"
|
||||
case Invalid:
|
||||
return "Invalid"
|
||||
}
|
||||
return fmt.Sprintf("BadVariant%d", int(v))
|
||||
}
|
||||
|
||||
// SetRand sets the random number generator to r, which implements io.Reader.
|
||||
// If r.Read returns an error when the package requests random data then
|
||||
// a panic will be issued.
|
||||
//
|
||||
// Calling SetRand with nil sets the random number generator to the default
|
||||
// generator.
|
||||
func SetRand(r io.Reader) {
|
||||
if r == nil {
|
||||
rander = rand.Reader
|
||||
return
|
||||
}
|
||||
rander = r
|
||||
}
|
||||
|
||||
// EnableRandPool enables internal randomness pool used for Random
|
||||
// (Version 4) UUID generation. The pool contains random bytes read from
|
||||
// the random number generator on demand in batches. Enabling the pool
|
||||
// may improve the UUID generation throughput significantly.
|
||||
//
|
||||
// Since the pool is stored on the Go heap, this feature may be a bad fit
|
||||
// for security sensitive applications.
|
||||
//
|
||||
// Both EnableRandPool and DisableRandPool are not thread-safe and should
|
||||
// only be called when there is no possibility that New or any other
|
||||
// UUID Version 4 generation function will be called concurrently.
|
||||
func EnableRandPool() {
|
||||
poolEnabled = true
|
||||
}
|
||||
|
||||
// DisableRandPool disables the randomness pool if it was previously
|
||||
// enabled with EnableRandPool.
|
||||
//
|
||||
// Both EnableRandPool and DisableRandPool are not thread-safe and should
|
||||
// only be called when there is no possibility that New or any other
|
||||
// UUID Version 4 generation function will be called concurrently.
|
||||
func DisableRandPool() {
|
||||
poolEnabled = false
|
||||
defer poolMu.Unlock()
|
||||
poolMu.Lock()
|
||||
poolPos = randPoolSize
|
||||
}
|
||||
|
||||
// UUIDs is a slice of UUID types.
|
||||
type UUIDs []UUID
|
||||
|
||||
// Strings returns a string slice containing the string form of each UUID in uuids.
|
||||
func (uuids UUIDs) Strings() []string {
|
||||
var uuidStrs = make([]string, len(uuids))
|
||||
for i, uuid := range uuids {
|
||||
uuidStrs[i] = uuid.String()
|
||||
}
|
||||
return uuidStrs
|
||||
}
|
||||
44
vendor/github.com/google/uuid/version1.go
generated
vendored
Normal file
44
vendor/github.com/google/uuid/version1.go
generated
vendored
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// NewUUID returns a Version 1 UUID based on the current NodeID and clock
|
||||
// sequence, and the current time. If the NodeID has not been set by SetNodeID
|
||||
// or SetNodeInterface then it will be set automatically. If the NodeID cannot
|
||||
// be set NewUUID returns nil. If clock sequence has not been set by
|
||||
// SetClockSequence then it will be set automatically. If GetTime fails to
|
||||
// return the current NewUUID returns nil and an error.
|
||||
//
|
||||
// In most cases, New should be used.
|
||||
func NewUUID() (UUID, error) {
|
||||
var uuid UUID
|
||||
now, seq, err := GetTime()
|
||||
if err != nil {
|
||||
return uuid, err
|
||||
}
|
||||
|
||||
timeLow := uint32(now & 0xffffffff)
|
||||
timeMid := uint16((now >> 32) & 0xffff)
|
||||
timeHi := uint16((now >> 48) & 0x0fff)
|
||||
timeHi |= 0x1000 // Version 1
|
||||
|
||||
binary.BigEndian.PutUint32(uuid[0:], timeLow)
|
||||
binary.BigEndian.PutUint16(uuid[4:], timeMid)
|
||||
binary.BigEndian.PutUint16(uuid[6:], timeHi)
|
||||
binary.BigEndian.PutUint16(uuid[8:], seq)
|
||||
|
||||
nodeMu.Lock()
|
||||
if nodeID == zeroID {
|
||||
setNodeInterface("")
|
||||
}
|
||||
copy(uuid[10:], nodeID[:])
|
||||
nodeMu.Unlock()
|
||||
|
||||
return uuid, nil
|
||||
}
|
||||
76
vendor/github.com/google/uuid/version4.go
generated
vendored
Normal file
76
vendor/github.com/google/uuid/version4.go
generated
vendored
Normal file
|
|
@ -0,0 +1,76 @@
|
|||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import "io"
|
||||
|
||||
// New creates a new random UUID or panics. New is equivalent to
|
||||
// the expression
|
||||
//
|
||||
// uuid.Must(uuid.NewRandom())
|
||||
func New() UUID {
|
||||
return Must(NewRandom())
|
||||
}
|
||||
|
||||
// NewString creates a new random UUID and returns it as a string or panics.
|
||||
// NewString is equivalent to the expression
|
||||
//
|
||||
// uuid.New().String()
|
||||
func NewString() string {
|
||||
return Must(NewRandom()).String()
|
||||
}
|
||||
|
||||
// NewRandom returns a Random (Version 4) UUID.
|
||||
//
|
||||
// The strength of the UUIDs is based on the strength of the crypto/rand
|
||||
// package.
|
||||
//
|
||||
// Uses the randomness pool if it was enabled with EnableRandPool.
|
||||
//
|
||||
// A note about uniqueness derived from the UUID Wikipedia entry:
|
||||
//
|
||||
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
|
||||
// hit by a meteorite is estimated to be one chance in 17 billion, that
|
||||
// means the probability is about 0.00000000006 (6 × 10−11),
|
||||
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
|
||||
// year and having one duplicate.
|
||||
func NewRandom() (UUID, error) {
|
||||
if !poolEnabled {
|
||||
return NewRandomFromReader(rander)
|
||||
}
|
||||
return newRandomFromPool()
|
||||
}
|
||||
|
||||
// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader.
|
||||
func NewRandomFromReader(r io.Reader) (UUID, error) {
|
||||
var uuid UUID
|
||||
_, err := io.ReadFull(r, uuid[:])
|
||||
if err != nil {
|
||||
return Nil, err
|
||||
}
|
||||
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
|
||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
|
||||
return uuid, nil
|
||||
}
|
||||
|
||||
func newRandomFromPool() (UUID, error) {
|
||||
var uuid UUID
|
||||
poolMu.Lock()
|
||||
if poolPos == randPoolSize {
|
||||
_, err := io.ReadFull(rander, pool[:])
|
||||
if err != nil {
|
||||
poolMu.Unlock()
|
||||
return Nil, err
|
||||
}
|
||||
poolPos = 0
|
||||
}
|
||||
copy(uuid[:], pool[poolPos:(poolPos+16)])
|
||||
poolPos += 16
|
||||
poolMu.Unlock()
|
||||
|
||||
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
|
||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
|
||||
return uuid, nil
|
||||
}
|
||||
56
vendor/github.com/google/uuid/version6.go
generated
vendored
Normal file
56
vendor/github.com/google/uuid/version6.go
generated
vendored
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
// Copyright 2023 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality.
|
||||
// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs.
|
||||
// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead.
|
||||
//
|
||||
// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6
|
||||
//
|
||||
// NewV6 returns a Version 6 UUID based on the current NodeID and clock
|
||||
// sequence, and the current time. If the NodeID has not been set by SetNodeID
|
||||
// or SetNodeInterface then it will be set automatically. If the NodeID cannot
|
||||
// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by
|
||||
// SetClockSequence then it will be set automatically. If GetTime fails to
|
||||
// return the current NewV6 returns Nil and an error.
|
||||
func NewV6() (UUID, error) {
|
||||
var uuid UUID
|
||||
now, seq, err := GetTime()
|
||||
if err != nil {
|
||||
return uuid, err
|
||||
}
|
||||
|
||||
/*
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| time_high |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| time_mid | time_low_and_version |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|clk_seq_hi_res | clk_seq_low | node (0-1) |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| node (2-5) |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
*/
|
||||
|
||||
binary.BigEndian.PutUint64(uuid[0:], uint64(now))
|
||||
binary.BigEndian.PutUint16(uuid[8:], seq)
|
||||
|
||||
uuid[6] = 0x60 | (uuid[6] & 0x0F)
|
||||
uuid[8] = 0x80 | (uuid[8] & 0x3F)
|
||||
|
||||
nodeMu.Lock()
|
||||
if nodeID == zeroID {
|
||||
setNodeInterface("")
|
||||
}
|
||||
copy(uuid[10:], nodeID[:])
|
||||
nodeMu.Unlock()
|
||||
|
||||
return uuid, nil
|
||||
}
|
||||
104
vendor/github.com/google/uuid/version7.go
generated
vendored
Normal file
104
vendor/github.com/google/uuid/version7.go
generated
vendored
Normal file
|
|
@ -0,0 +1,104 @@
|
|||
// Copyright 2023 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// UUID version 7 features a time-ordered value field derived from the widely
|
||||
// implemented and well known Unix Epoch timestamp source,
|
||||
// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded.
|
||||
// As well as improved entropy characteristics over versions 1 or 6.
|
||||
//
|
||||
// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7
|
||||
//
|
||||
// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible.
|
||||
//
|
||||
// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch).
|
||||
// Uses the randomness pool if it was enabled with EnableRandPool.
|
||||
// On error, NewV7 returns Nil and an error
|
||||
func NewV7() (UUID, error) {
|
||||
uuid, err := NewRandom()
|
||||
if err != nil {
|
||||
return uuid, err
|
||||
}
|
||||
makeV7(uuid[:])
|
||||
return uuid, nil
|
||||
}
|
||||
|
||||
// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch).
|
||||
// it use NewRandomFromReader fill random bits.
|
||||
// On error, NewV7FromReader returns Nil and an error.
|
||||
func NewV7FromReader(r io.Reader) (UUID, error) {
|
||||
uuid, err := NewRandomFromReader(r)
|
||||
if err != nil {
|
||||
return uuid, err
|
||||
}
|
||||
|
||||
makeV7(uuid[:])
|
||||
return uuid, nil
|
||||
}
|
||||
|
||||
// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6])
|
||||
// uuid[8] already has the right version number (Variant is 10)
|
||||
// see function NewV7 and NewV7FromReader
|
||||
func makeV7(uuid []byte) {
|
||||
/*
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| unix_ts_ms |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| unix_ts_ms | ver | rand_a (12 bit seq) |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|var| rand_b |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| rand_b |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
*/
|
||||
_ = uuid[15] // bounds check
|
||||
|
||||
t, s := getV7Time()
|
||||
|
||||
uuid[0] = byte(t >> 40)
|
||||
uuid[1] = byte(t >> 32)
|
||||
uuid[2] = byte(t >> 24)
|
||||
uuid[3] = byte(t >> 16)
|
||||
uuid[4] = byte(t >> 8)
|
||||
uuid[5] = byte(t)
|
||||
|
||||
uuid[6] = 0x70 | (0x0F & byte(s>>8))
|
||||
uuid[7] = byte(s)
|
||||
}
|
||||
|
||||
// lastV7time is the last time we returned stored as:
|
||||
//
|
||||
// 52 bits of time in milliseconds since epoch
|
||||
// 12 bits of (fractional nanoseconds) >> 8
|
||||
var lastV7time int64
|
||||
|
||||
const nanoPerMilli = 1000000
|
||||
|
||||
// getV7Time returns the time in milliseconds and nanoseconds / 256.
|
||||
// The returned (milli << 12 + seq) is guarenteed to be greater than
|
||||
// (milli << 12 + seq) returned by any previous call to getV7Time.
|
||||
func getV7Time() (milli, seq int64) {
|
||||
timeMu.Lock()
|
||||
defer timeMu.Unlock()
|
||||
|
||||
nano := timeNow().UnixNano()
|
||||
milli = nano / nanoPerMilli
|
||||
// Sequence number is between 0 and 3906 (nanoPerMilli>>8)
|
||||
seq = (nano - milli*nanoPerMilli) >> 8
|
||||
now := milli<<12 + seq
|
||||
if now <= lastV7time {
|
||||
now = lastV7time + 1
|
||||
milli = now >> 12
|
||||
seq = now & 0xfff
|
||||
}
|
||||
lastV7time = now
|
||||
return milli, seq
|
||||
}
|
||||
9
vendor/github.com/jackc/pgpassfile/.travis.yml
generated
vendored
Normal file
9
vendor/github.com/jackc/pgpassfile/.travis.yml
generated
vendored
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.x
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
22
vendor/github.com/jackc/pgpassfile/LICENSE
generated
vendored
Normal file
22
vendor/github.com/jackc/pgpassfile/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
Copyright (c) 2019 Jack Christensen
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
8
vendor/github.com/jackc/pgpassfile/README.md
generated
vendored
Normal file
8
vendor/github.com/jackc/pgpassfile/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
[](https://godoc.org/github.com/jackc/pgpassfile)
|
||||
[](https://travis-ci.org/jackc/pgpassfile)
|
||||
|
||||
# pgpassfile
|
||||
|
||||
Package pgpassfile is a parser PostgreSQL .pgpass files.
|
||||
|
||||
Extracted and rewritten from original implementation in https://github.com/jackc/pgx.
|
||||
110
vendor/github.com/jackc/pgpassfile/pgpass.go
generated
vendored
Normal file
110
vendor/github.com/jackc/pgpassfile/pgpass.go
generated
vendored
Normal file
|
|
@ -0,0 +1,110 @@
|
|||
// Package pgpassfile is a parser PostgreSQL .pgpass files.
|
||||
package pgpassfile
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Entry represents a line in a PG passfile.
|
||||
type Entry struct {
|
||||
Hostname string
|
||||
Port string
|
||||
Database string
|
||||
Username string
|
||||
Password string
|
||||
}
|
||||
|
||||
// Passfile is the in memory data structure representing a PG passfile.
|
||||
type Passfile struct {
|
||||
Entries []*Entry
|
||||
}
|
||||
|
||||
// ReadPassfile reads the file at path and parses it into a Passfile.
|
||||
func ReadPassfile(path string) (*Passfile, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return ParsePassfile(f)
|
||||
}
|
||||
|
||||
// ParsePassfile reads r and parses it into a Passfile.
|
||||
func ParsePassfile(r io.Reader) (*Passfile, error) {
|
||||
passfile := &Passfile{}
|
||||
|
||||
scanner := bufio.NewScanner(r)
|
||||
for scanner.Scan() {
|
||||
entry := parseLine(scanner.Text())
|
||||
if entry != nil {
|
||||
passfile.Entries = append(passfile.Entries, entry)
|
||||
}
|
||||
}
|
||||
|
||||
return passfile, scanner.Err()
|
||||
}
|
||||
|
||||
// Match (not colons or escaped colon or escaped backslash)+. Essentially gives a split on unescaped
|
||||
// colon.
|
||||
var colonSplitterRegexp = regexp.MustCompile("(([^:]|(\\:)))+")
|
||||
|
||||
// var colonSplitterRegexp = regexp.MustCompile("((?:[^:]|(?:\\:)|(?:\\\\))+)")
|
||||
|
||||
// parseLine parses a line into an *Entry. It returns nil on comment lines or any other unparsable
|
||||
// line.
|
||||
func parseLine(line string) *Entry {
|
||||
const (
|
||||
tmpBackslash = "\r"
|
||||
tmpColon = "\n"
|
||||
)
|
||||
|
||||
line = strings.TrimSpace(line)
|
||||
|
||||
if strings.HasPrefix(line, "#") {
|
||||
return nil
|
||||
}
|
||||
|
||||
line = strings.Replace(line, `\\`, tmpBackslash, -1)
|
||||
line = strings.Replace(line, `\:`, tmpColon, -1)
|
||||
|
||||
parts := strings.Split(line, ":")
|
||||
if len(parts) != 5 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unescape escaped colons and backslashes
|
||||
for i := range parts {
|
||||
parts[i] = strings.Replace(parts[i], tmpBackslash, `\`, -1)
|
||||
parts[i] = strings.Replace(parts[i], tmpColon, `:`, -1)
|
||||
}
|
||||
|
||||
return &Entry{
|
||||
Hostname: parts[0],
|
||||
Port: parts[1],
|
||||
Database: parts[2],
|
||||
Username: parts[3],
|
||||
Password: parts[4],
|
||||
}
|
||||
}
|
||||
|
||||
// FindPassword finds the password for the provided hostname, port, database, and username. For a
|
||||
// Unix domain socket hostname must be set to "localhost". An empty string will be returned if no
|
||||
// match is found.
|
||||
//
|
||||
// See https://www.postgresql.org/docs/current/libpq-pgpass.html for more password file information.
|
||||
func (pf *Passfile) FindPassword(hostname, port, database, username string) (password string) {
|
||||
for _, e := range pf.Entries {
|
||||
if (e.Hostname == "*" || e.Hostname == hostname) &&
|
||||
(e.Port == "*" || e.Port == port) &&
|
||||
(e.Database == "*" || e.Database == database) &&
|
||||
(e.Username == "*" || e.Username == username) {
|
||||
return e.Password
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
22
vendor/github.com/jackc/pgservicefile/LICENSE
generated
vendored
Normal file
22
vendor/github.com/jackc/pgservicefile/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
Copyright (c) 2020 Jack Christensen
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
7
vendor/github.com/jackc/pgservicefile/README.md
generated
vendored
Normal file
7
vendor/github.com/jackc/pgservicefile/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
[](https://pkg.go.dev/github.com/jackc/pgservicefile)
|
||||
[](https://github.com/jackc/pgservicefile/actions/workflows/ci.yml)
|
||||
|
||||
|
||||
# pgservicefile
|
||||
|
||||
Package pgservicefile is a parser for PostgreSQL service files (e.g. `.pg_service.conf`).
|
||||
81
vendor/github.com/jackc/pgservicefile/pgservicefile.go
generated
vendored
Normal file
81
vendor/github.com/jackc/pgservicefile/pgservicefile.go
generated
vendored
Normal file
|
|
@ -0,0 +1,81 @@
|
|||
// Package pgservicefile is a parser for PostgreSQL service files (e.g. .pg_service.conf).
|
||||
package pgservicefile
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Service struct {
|
||||
Name string
|
||||
Settings map[string]string
|
||||
}
|
||||
|
||||
type Servicefile struct {
|
||||
Services []*Service
|
||||
servicesByName map[string]*Service
|
||||
}
|
||||
|
||||
// GetService returns the named service.
|
||||
func (sf *Servicefile) GetService(name string) (*Service, error) {
|
||||
service, present := sf.servicesByName[name]
|
||||
if !present {
|
||||
return nil, errors.New("not found")
|
||||
}
|
||||
return service, nil
|
||||
}
|
||||
|
||||
// ReadServicefile reads the file at path and parses it into a Servicefile.
|
||||
func ReadServicefile(path string) (*Servicefile, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return ParseServicefile(f)
|
||||
}
|
||||
|
||||
// ParseServicefile reads r and parses it into a Servicefile.
|
||||
func ParseServicefile(r io.Reader) (*Servicefile, error) {
|
||||
servicefile := &Servicefile{}
|
||||
|
||||
var service *Service
|
||||
scanner := bufio.NewScanner(r)
|
||||
lineNum := 0
|
||||
for scanner.Scan() {
|
||||
lineNum += 1
|
||||
line := scanner.Text()
|
||||
line = strings.TrimSpace(line)
|
||||
|
||||
if line == "" || strings.HasPrefix(line, "#") {
|
||||
// ignore comments and empty lines
|
||||
} else if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") {
|
||||
service = &Service{Name: line[1 : len(line)-1], Settings: make(map[string]string)}
|
||||
servicefile.Services = append(servicefile.Services, service)
|
||||
} else if service != nil {
|
||||
parts := strings.SplitN(line, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("unable to parse line %d", lineNum)
|
||||
}
|
||||
|
||||
key := strings.TrimSpace(parts[0])
|
||||
value := strings.TrimSpace(parts[1])
|
||||
|
||||
service.Settings[key] = value
|
||||
} else {
|
||||
return nil, fmt.Errorf("line %d is not in a section", lineNum)
|
||||
}
|
||||
}
|
||||
|
||||
servicefile.servicesByName = make(map[string]*Service, len(servicefile.Services))
|
||||
for _, service := range servicefile.Services {
|
||||
servicefile.servicesByName[service.Name] = service
|
||||
}
|
||||
|
||||
return servicefile, scanner.Err()
|
||||
}
|
||||
27
vendor/github.com/jackc/pgx/v5/.gitignore
generated
vendored
Normal file
27
vendor/github.com/jackc/pgx/v5/.gitignore
generated
vendored
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
|
||||
.envrc
|
||||
/.testdb
|
||||
|
||||
.DS_Store
|
||||
21
vendor/github.com/jackc/pgx/v5/.golangci.yml
generated
vendored
Normal file
21
vendor/github.com/jackc/pgx/v5/.golangci.yml
generated
vendored
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
# See for configurations: https://golangci-lint.run/usage/configuration/
|
||||
version: 2
|
||||
|
||||
# See: https://golangci-lint.run/usage/formatters/
|
||||
formatters:
|
||||
default: none
|
||||
enable:
|
||||
- gofmt # https://pkg.go.dev/cmd/gofmt
|
||||
- gofumpt # https://github.com/mvdan/gofumpt
|
||||
|
||||
settings:
|
||||
gofmt:
|
||||
simplify: true # Simplify code: gofmt with `-s` option.
|
||||
|
||||
gofumpt:
|
||||
# Module path which contains the source code being formatted.
|
||||
# Default: ""
|
||||
module-path: github.com/jackc/pgx/v5 # Should match with module in go.mod
|
||||
# Choose whether to use the extra rules.
|
||||
# Default: false
|
||||
extra-rules: true
|
||||
473
vendor/github.com/jackc/pgx/v5/CHANGELOG.md
generated
vendored
Normal file
473
vendor/github.com/jackc/pgx/v5/CHANGELOG.md
generated
vendored
Normal file
|
|
@ -0,0 +1,473 @@
|
|||
# 5.7.6 (September 8, 2025)
|
||||
|
||||
* Use ParseConfigError in pgx.ParseConfig and pgxpool.ParseConfig (Yurasov Ilia)
|
||||
* Add PrepareConn hook to pgxpool (Jonathan Hall)
|
||||
* Reduce allocations in QueryContext (Dominique Lefevre)
|
||||
* Add MarshalJSON and UnmarshalJSON for pgtype.Uint32 (Panos Koutsovasilis)
|
||||
* Configure ping behavior on pgxpool with ShouldPing (Christian Kiely)
|
||||
* zeronull int types implement Int64Valuer and Int64Scanner (Li Zeghong)
|
||||
* Fix panic when receiving terminate connection message during CopyFrom (Michal Drausowski)
|
||||
* Fix statement cache not being invalidated on error during batch (Muhammadali Nazarov)
|
||||
|
||||
# 5.7.5 (May 17, 2025)
|
||||
|
||||
* Support sslnegotiation connection option (divyam234)
|
||||
* Update golang.org/x/crypto to v0.37.0. This placates security scanners that were unable to see that pgx did not use the behavior affected by https://pkg.go.dev/vuln/GO-2025-3487.
|
||||
* TraceLog now logs Acquire and Release at the debug level (dave sinclair)
|
||||
* Add support for PGTZ environment variable
|
||||
* Add support for PGOPTIONS environment variable
|
||||
* Unpin memory used by Rows quicker
|
||||
* Remove PlanScan memoization. This resolves a rare issue where scanning could be broken for one type by first scanning another. The problem was in the memoization system and benchmarking revealed that memoization was not providing any meaningful benefit.
|
||||
|
||||
# 5.7.4 (March 24, 2025)
|
||||
|
||||
* Fix / revert change to scanning JSON `null` (Felix Röhrich)
|
||||
|
||||
# 5.7.3 (March 21, 2025)
|
||||
|
||||
* Expose EmptyAcquireWaitTime in pgxpool.Stat (vamshiaruru32)
|
||||
* Improve SQL sanitizer performance (ninedraft)
|
||||
* Fix Scan confusion with json(b), sql.Scanner, and automatic dereferencing (moukoublen, felix-roehrich)
|
||||
* Fix Values() for xml type always returning nil instead of []byte
|
||||
* Add ability to send Flush message in pipeline mode (zenkovev)
|
||||
* Fix pgtype.Timestamp's JSON behavior to match PostgreSQL (pconstantinou)
|
||||
* Better error messages when scanning structs (logicbomb)
|
||||
* Fix handling of error on batch write (bonnefoa)
|
||||
* Match libpq's connection fallback behavior more closely (felix-roehrich)
|
||||
* Add MinIdleConns to pgxpool (djahandarie)
|
||||
|
||||
# 5.7.2 (December 21, 2024)
|
||||
|
||||
* Fix prepared statement already exists on batch prepare failure
|
||||
* Add commit query to tx options (Lucas Hild)
|
||||
* Fix pgtype.Timestamp json unmarshal (Shean de Montigny-Desautels)
|
||||
* Add message body size limits in frontend and backend (zene)
|
||||
* Add xid8 type
|
||||
* Ensure planning encodes and scans cannot infinitely recurse
|
||||
* Implement pgtype.UUID.String() (Konstantin Grachev)
|
||||
* Switch from ExecParams to Exec in ValidateConnectTargetSessionAttrs functions (Alexander Rumyantsev)
|
||||
* Update golang.org/x/crypto
|
||||
* Fix json(b) columns prefer sql.Scanner interface like database/sql (Ludovico Russo)
|
||||
|
||||
# 5.7.1 (September 10, 2024)
|
||||
|
||||
* Fix data race in tracelog.TraceLog
|
||||
* Update puddle to v2.2.2. This removes the import of nanotime via linkname.
|
||||
* Update golang.org/x/crypto and golang.org/x/text
|
||||
|
||||
# 5.7.0 (September 7, 2024)
|
||||
|
||||
* Add support for sslrootcert=system (Yann Soubeyrand)
|
||||
* Add LoadTypes to load multiple types in a single SQL query (Nick Farrell)
|
||||
* Add XMLCodec supports encoding + scanning XML column type like json (nickcruess-soda)
|
||||
* Add MultiTrace (Stepan Rabotkin)
|
||||
* Add TraceLogConfig with customizable TimeKey (stringintech)
|
||||
* pgx.ErrNoRows wraps sql.ErrNoRows to aid in database/sql compatibility with native pgx functions (merlin)
|
||||
* Support scanning binary formatted uint32 into string / TextScanner (jennifersp)
|
||||
* Fix interval encoding to allow 0s and avoid extra spaces (Carlos Pérez-Aradros Herce)
|
||||
* Update pgservicefile - fixes panic when parsing invalid file
|
||||
* Better error message when reading past end of batch
|
||||
* Don't print url when url.Parse returns an error (Kevin Biju)
|
||||
* Fix snake case name normalization collision in RowToStructByName with db tag (nolandseigler)
|
||||
* Fix: Scan and encode types with underlying types of arrays
|
||||
|
||||
# 5.6.0 (May 25, 2024)
|
||||
|
||||
* Add StrictNamedArgs (Tomas Zahradnicek)
|
||||
* Add support for macaddr8 type (Carlos Pérez-Aradros Herce)
|
||||
* Add SeverityUnlocalized field to PgError / Notice
|
||||
* Performance optimization of RowToStructByPos/Name (Zach Olstein)
|
||||
* Allow customizing context canceled behavior for pgconn
|
||||
* Add ScanLocation to pgtype.Timestamp[tz]Codec
|
||||
* Add custom data to pgconn.PgConn
|
||||
* Fix ResultReader.Read() to handle nil values
|
||||
* Do not encode interval microseconds when they are 0 (Carlos Pérez-Aradros Herce)
|
||||
* pgconn.SafeToRetry checks for wrapped errors (tjasko)
|
||||
* Failed connection attempts include all errors
|
||||
* Optimize LargeObject.Read (Mitar)
|
||||
* Add tracing for connection acquire and release from pool (ngavinsir)
|
||||
* Fix encode driver.Valuer not called when nil
|
||||
* Add support for custom JSON marshal and unmarshal (Mitar)
|
||||
* Use Go default keepalive for TCP connections (Hans-Joachim Kliemeck)
|
||||
|
||||
# 5.5.5 (March 9, 2024)
|
||||
|
||||
Use spaces instead of parentheses for SQL sanitization.
|
||||
|
||||
This still solves the problem of negative numbers creating a line comment, but this avoids breaking edge cases such as
|
||||
`set foo to $1` where the substitution is taking place in a location where an arbitrary expression is not allowed.
|
||||
|
||||
# 5.5.4 (March 4, 2024)
|
||||
|
||||
Fix CVE-2024-27304
|
||||
|
||||
SQL injection can occur if an attacker can cause a single query or bind message to exceed 4 GB in size. An integer
|
||||
overflow in the calculated message size can cause the one large message to be sent as multiple messages under the
|
||||
attacker's control.
|
||||
|
||||
Thanks to Paul Gerste for reporting this issue.
|
||||
|
||||
* Fix behavior of CollectRows to return empty slice if Rows are empty (Felix)
|
||||
* Fix simple protocol encoding of json.RawMessage
|
||||
* Fix *Pipeline.getResults should close pipeline on error
|
||||
* Fix panic in TryFindUnderlyingTypeScanPlan (David Kurman)
|
||||
* Fix deallocation of invalidated cached statements in a transaction
|
||||
* Handle invalid sslkey file
|
||||
* Fix scan float4 into sql.Scanner
|
||||
* Fix pgtype.Bits not making copy of data from read buffer. This would cause the data to be corrupted by future reads.
|
||||
|
||||
# 5.5.3 (February 3, 2024)
|
||||
|
||||
* Fix: prepared statement already exists
|
||||
* Improve CopyFrom auto-conversion of text-ish values
|
||||
* Add ltree type support (Florent Viel)
|
||||
* Make some properties of Batch and QueuedQuery public (Pavlo Golub)
|
||||
* Add AppendRows function (Edoardo Spadolini)
|
||||
* Optimize convert UUID [16]byte to string (Kirill Malikov)
|
||||
* Fix: LargeObject Read and Write of more than ~1GB at a time (Mitar)
|
||||
|
||||
# 5.5.2 (January 13, 2024)
|
||||
|
||||
* Allow NamedArgs to start with underscore
|
||||
* pgproto3: Maximum message body length support (jeremy.spriet)
|
||||
* Upgrade golang.org/x/crypto to v0.17.0
|
||||
* Add snake_case support to RowToStructByName (Tikhon Fedulov)
|
||||
* Fix: update description cache after exec prepare (James Hartig)
|
||||
* Fix: pipeline checks if it is closed (James Hartig and Ryan Fowler)
|
||||
* Fix: normalize timeout / context errors during TLS startup (Samuel Stauffer)
|
||||
* Add OnPgError for easier centralized error handling (James Hartig)
|
||||
|
||||
# 5.5.1 (December 9, 2023)
|
||||
|
||||
* Add CopyFromFunc helper function. (robford)
|
||||
* Add PgConn.Deallocate method that uses PostgreSQL protocol Close message.
|
||||
* pgx uses new PgConn.Deallocate method. This allows deallocating statements to work in a failed transaction. This fixes a case where the prepared statement map could become invalid.
|
||||
* Fix: Prefer driver.Valuer over json.Marshaler for json fields. (Jacopo)
|
||||
* Fix: simple protocol SQL sanitizer previously panicked if an invalid $0 placeholder was used. This now returns an error instead. (maksymnevajdev)
|
||||
* Add pgtype.Numeric.ScanScientific (Eshton Robateau)
|
||||
|
||||
# 5.5.0 (November 4, 2023)
|
||||
|
||||
* Add CollectExactlyOneRow. (Julien GOTTELAND)
|
||||
* Add OpenDBFromPool to create *database/sql.DB from *pgxpool.Pool. (Lev Zakharov)
|
||||
* Prepare can automatically choose statement name based on sql. This makes it easier to explicitly manage prepared statements.
|
||||
* Statement cache now uses deterministic, stable statement names.
|
||||
* database/sql prepared statement names are deterministically generated.
|
||||
* Fix: SendBatch wasn't respecting context cancellation.
|
||||
* Fix: Timeout error from pipeline is now normalized.
|
||||
* Fix: database/sql encoding json.RawMessage to []byte.
|
||||
* CancelRequest: Wait for the cancel request to be acknowledged by the server. This should improve PgBouncer compatibility. (Anton Levakin)
|
||||
* stdlib: Use Ping instead of CheckConn in ResetSession
|
||||
* Add json.Marshaler and json.Unmarshaler for Float4, Float8 (Kirill Mironov)
|
||||
|
||||
# 5.4.3 (August 5, 2023)
|
||||
|
||||
* Fix: QCharArrayOID was defined with the wrong OID (Christoph Engelbert)
|
||||
* Fix: connect_timeout for sslmode=allow|prefer (smaher-edb)
|
||||
* Fix: pgxpool: background health check cannot overflow pool
|
||||
* Fix: Check for nil in defer when sending batch (recover properly from panic)
|
||||
* Fix: json scan of non-string pointer to pointer
|
||||
* Fix: zeronull.Timestamptz should use pgtype.Timestamptz
|
||||
* Fix: NewConnsCount was not correctly counting connections created by Acquire directly. (James Hartig)
|
||||
* RowTo(AddrOf)StructByPos ignores fields with "-" db tag
|
||||
* Optimization: improve text format numeric parsing (horpto)
|
||||
|
||||
# 5.4.2 (July 11, 2023)
|
||||
|
||||
* Fix: RowScanner errors are fatal to Rows
|
||||
* Fix: Enable failover efforts when pg_hba.conf disallows non-ssl connections (Brandon Kauffman)
|
||||
* Hstore text codec internal improvements (Evan Jones)
|
||||
* Fix: Stop timers for background reader when not in use. Fixes memory leak when closing connections (Adrian-Stefan Mares)
|
||||
* Fix: Stop background reader as soon as possible.
|
||||
* Add PgConn.SyncConn(). This combined with the above fix makes it safe to directly use the underlying net.Conn.
|
||||
|
||||
# 5.4.1 (June 18, 2023)
|
||||
|
||||
* Fix: concurrency bug with pgtypeDefaultMap and simple protocol (Lev Zakharov)
|
||||
* Add TxOptions.BeginQuery to allow overriding the default BEGIN query
|
||||
|
||||
# 5.4.0 (June 14, 2023)
|
||||
|
||||
* Replace platform specific syscalls for non-blocking IO with more traditional goroutines and deadlines. This returns to the v4 approach with some additional improvements and fixes. This restores the ability to use a pgx.Conn over an ssh.Conn as well as other non-TCP or Unix socket connections. In addition, it is a significantly simpler implementation that is less likely to have cross platform issues.
|
||||
* Optimization: The default type registrations are now shared among all connections. This saves about 100KB of memory per connection. `pgtype.Type` and `pgtype.Codec` values are now required to be immutable after registration. This was already necessary in most cases but wasn't documented until now. (Lev Zakharov)
|
||||
* Fix: Ensure pgxpool.Pool.QueryRow.Scan releases connection on panic
|
||||
* CancelRequest: don't try to read the reply (Nicola Murino)
|
||||
* Fix: correctly handle bool type aliases (Wichert Akkerman)
|
||||
* Fix: pgconn.CancelRequest: Fix unix sockets: don't use RemoteAddr()
|
||||
* Fix: pgx.Conn memory leak with prepared statement caching (Evan Jones)
|
||||
* Add BeforeClose to pgxpool.Pool (Evan Cordell)
|
||||
* Fix: various hstore fixes and optimizations (Evan Jones)
|
||||
* Fix: RowToStructByPos with embedded unexported struct
|
||||
* Support different bool string representations (Lev Zakharov)
|
||||
* Fix: error when using BatchResults.Exec on a select that returns an error after some rows.
|
||||
* Fix: pipelineBatchResults.Exec() not returning error from ResultReader
|
||||
* Fix: pipeline batch results not closing pipeline when error occurs while reading directly from results instead of using
|
||||
a callback.
|
||||
* Fix: scanning a table type into a struct
|
||||
* Fix: scan array of record to pointer to slice of struct
|
||||
* Fix: handle null for json (Cemre Mengu)
|
||||
* Batch Query callback is called even when there is an error
|
||||
* Add RowTo(AddrOf)StructByNameLax (Audi P. Risa P)
|
||||
|
||||
# 5.3.1 (February 27, 2023)
|
||||
|
||||
* Fix: Support v4 and v5 stdlib in same program (Tomáš Procházka)
|
||||
* Fix: sql.Scanner not being used in certain cases
|
||||
* Add text format jsonpath support
|
||||
* Fix: fake non-blocking read adaptive wait time
|
||||
|
||||
# 5.3.0 (February 11, 2023)
|
||||
|
||||
* Fix: json values work with sql.Scanner
|
||||
* Fixed / improved error messages (Mark Chambers and Yevgeny Pats)
|
||||
* Fix: support scan into single dimensional arrays
|
||||
* Fix: MaxConnLifetimeJitter setting actually jitter (Ben Weintraub)
|
||||
* Fix: driver.Value representation of bytea should be []byte not string
|
||||
* Fix: better handling of unregistered OIDs
|
||||
* CopyFrom can use query cache to avoid extra round trip to get OIDs (Alejandro Do Nascimento Mora)
|
||||
* Fix: encode to json ignoring driver.Valuer
|
||||
* Support sql.Scanner on renamed base type
|
||||
* Fix: pgtype.Numeric text encoding of negative numbers (Mark Chambers)
|
||||
* Fix: connect with multiple hostnames when one can't be resolved
|
||||
* Upgrade puddle to remove dependency on uber/atomic and fix alignment issue on 32-bit platform
|
||||
* Fix: scanning json column into **string
|
||||
* Multiple reductions in memory allocations
|
||||
* Fake non-blocking read adapts its max wait time
|
||||
* Improve CopyFrom performance and reduce memory usage
|
||||
* Fix: encode []any to array
|
||||
* Fix: LoadType for composite with dropped attributes (Felix Röhrich)
|
||||
* Support v4 and v5 stdlib in same program
|
||||
* Fix: text format array decoding with string of "NULL"
|
||||
* Prefer binary format for arrays
|
||||
|
||||
# 5.2.0 (December 5, 2022)
|
||||
|
||||
* `tracelog.TraceLog` implements the pgx.PrepareTracer interface. (Vitalii Solodilov)
|
||||
* Optimize creating begin transaction SQL string (Petr Evdokimov and ksco)
|
||||
* `Conn.LoadType` supports range and multirange types (Vitalii Solodilov)
|
||||
* Fix scan `uint` and `uint64` `ScanNumeric`. This resolves a PostgreSQL `numeric` being incorrectly scanned into `uint` and `uint64`.
|
||||
|
||||
# 5.1.1 (November 17, 2022)
|
||||
|
||||
* Fix simple query sanitizer where query text contains a Unicode replacement character.
|
||||
* Remove erroneous `name` argument from `DeallocateAll()`. Technically, this is a breaking change, but given that method was only added 5 days ago this change was accepted. (Bodo Kaiser)
|
||||
|
||||
# 5.1.0 (November 12, 2022)
|
||||
|
||||
* Update puddle to v2.1.2. This resolves a race condition and a deadlock in pgxpool.
|
||||
* `QueryRewriter.RewriteQuery` now returns an error. Technically, this is a breaking change for any external implementers, but given the minimal likelihood that there are actually any external implementers this change was accepted.
|
||||
* Expose `GetSSLPassword` support to pgx.
|
||||
* Fix encode `ErrorResponse` unknown field handling. This would only affect pgproto3 being used directly as a proxy with a non-PostgreSQL server that included additional error fields.
|
||||
* Fix date text format encoding with 5 digit years.
|
||||
* Fix date values passed to a `sql.Scanner` as `string` instead of `time.Time`.
|
||||
* DateCodec.DecodeValue can return `pgtype.InfinityModifier` instead of `string` for infinite values. This now matches the behavior of the timestamp types.
|
||||
* Add domain type support to `Conn.LoadType()`.
|
||||
* Add `RowToStructByName` and `RowToAddrOfStructByName`. (Pavlo Golub)
|
||||
* Add `Conn.DeallocateAll()` to clear all prepared statements including the statement cache. (Bodo Kaiser)
|
||||
|
||||
# 5.0.4 (October 24, 2022)
|
||||
|
||||
* Fix: CollectOneRow prefers PostgreSQL error over pgx.ErrorNoRows
|
||||
* Fix: some reflect Kind checks to first check for nil
|
||||
* Bump golang.org/x/text dependency to placate snyk
|
||||
* Fix: RowToStructByPos on structs with multiple anonymous sub-structs (Baptiste Fontaine)
|
||||
* Fix: Exec checks if tx is closed
|
||||
|
||||
# 5.0.3 (October 14, 2022)
|
||||
|
||||
* Fix `driver.Valuer` handling edge cases that could cause infinite loop or crash
|
||||
|
||||
# v5.0.2 (October 8, 2022)
|
||||
|
||||
* Fix date encoding in text format to always use 2 digits for month and day
|
||||
* Prefer driver.Valuer over wrap plans when encoding
|
||||
* Fix scan to pointer to pointer to renamed type
|
||||
* Allow scanning NULL even if PG and Go types are incompatible
|
||||
|
||||
# v5.0.1 (September 24, 2022)
|
||||
|
||||
* Fix 32-bit atomic usage
|
||||
* Add MarshalJSON for Float8 (yogipristiawan)
|
||||
* Add `[` and `]` to text encoding of `Lseg`
|
||||
* Fix sqlScannerWrapper NULL handling
|
||||
|
||||
# v5.0.0 (September 17, 2022)
|
||||
|
||||
## Merged Packages
|
||||
|
||||
`github.com/jackc/pgtype`, `github.com/jackc/pgconn`, and `github.com/jackc/pgproto3` are now included in the main
|
||||
`github.com/jackc/pgx` repository. Previously there was confusion as to where issues should be reported, additional
|
||||
release work due to releasing multiple packages, and less clear changelogs.
|
||||
|
||||
## pgconn
|
||||
|
||||
`CommandTag` is now an opaque type instead of directly exposing an underlying `[]byte`.
|
||||
|
||||
The return value `ResultReader.Values()` is no longer safe to retain a reference to after a subsequent call to `NextRow()` or `Close()`.
|
||||
|
||||
`Trace()` method adds low level message tracing similar to the `PQtrace` function in `libpq`.
|
||||
|
||||
pgconn now uses non-blocking IO. This is a significant internal restructuring, but it should not cause any visible changes on its own. However, it is important in implementing other new features.
|
||||
|
||||
`CheckConn()` checks a connection's liveness by doing a non-blocking read. This can be used to detect database restarts or network interruptions without executing a query or a ping.
|
||||
|
||||
pgconn now supports pipeline mode.
|
||||
|
||||
`*PgConn.ReceiveResults` removed. Use pipeline mode instead.
|
||||
|
||||
`Timeout()` no longer considers `context.Canceled` as a timeout error. `context.DeadlineExceeded` still is considered a timeout error.
|
||||
|
||||
## pgxpool
|
||||
|
||||
`Connect` and `ConnectConfig` have been renamed to `New` and `NewWithConfig` respectively. The `LazyConnect` option has been removed. Pools always lazily connect.
|
||||
|
||||
## pgtype
|
||||
|
||||
The `pgtype` package has been significantly changed.
|
||||
|
||||
### NULL Representation
|
||||
|
||||
Previously, types had a `Status` field that could be `Undefined`, `Null`, or `Present`. This has been changed to a
|
||||
`Valid` `bool` field to harmonize with how `database/sql` represents `NULL` and to make the zero value useable.
|
||||
|
||||
Previously, a type that implemented `driver.Valuer` would have the `Value` method called even on a nil pointer. All nils
|
||||
whether typed or untyped now represent `NULL`.
|
||||
|
||||
### Codec and Value Split
|
||||
|
||||
Previously, the type system combined decoding and encoding values with the value types. e.g. Type `Int8` both handled
|
||||
encoding and decoding the PostgreSQL representation and acted as a value object. This caused some difficulties when
|
||||
there was not an exact 1 to 1 relationship between the Go types and the PostgreSQL types For example, scanning a
|
||||
PostgreSQL binary `numeric` into a Go `float64` was awkward (see https://github.com/jackc/pgtype/issues/147). This
|
||||
concepts have been separated. A `Codec` only has responsibility for encoding and decoding values. Value types are
|
||||
generally defined by implementing an interface that a particular `Codec` understands (e.g. `PointScanner` and
|
||||
`PointValuer` for the PostgreSQL `point` type).
|
||||
|
||||
### Array Types
|
||||
|
||||
All array types are now handled by `ArrayCodec` instead of using code generation for each new array type. This also
|
||||
means that less common array types such as `point[]` are now supported. `Array[T]` supports PostgreSQL multi-dimensional
|
||||
arrays.
|
||||
|
||||
### Composite Types
|
||||
|
||||
Composite types must be registered before use. `CompositeFields` may still be used to construct and destruct composite
|
||||
values, but any type may now implement `CompositeIndexGetter` and `CompositeIndexScanner` to be used as a composite.
|
||||
|
||||
### Range Types
|
||||
|
||||
Range types are now handled with types `RangeCodec` and `Range[T]`. This allows additional user defined range types to
|
||||
easily be handled. Multirange types are handled similarly with `MultirangeCodec` and `Multirange[T]`.
|
||||
|
||||
### pgxtype
|
||||
|
||||
`LoadDataType` moved to `*Conn` as `LoadType`.
|
||||
|
||||
### Bytea
|
||||
|
||||
The `Bytea` and `GenericBinary` types have been replaced. Use the following instead:
|
||||
|
||||
* `[]byte` - For normal usage directly use `[]byte`.
|
||||
* `DriverBytes` - Uses driver memory only available until next database method call. Avoids a copy and an allocation.
|
||||
* `PreallocBytes` - Uses preallocated byte slice to avoid an allocation.
|
||||
* `UndecodedBytes` - Avoids any decoding. Allows working with raw bytes.
|
||||
|
||||
### Dropped lib/pq Support
|
||||
|
||||
`pgtype` previously supported and was tested against [lib/pq](https://github.com/lib/pq). While it will continue to work
|
||||
in most cases this is no longer supported.
|
||||
|
||||
### database/sql Scan
|
||||
|
||||
Previously, most `Scan` implementations would convert `[]byte` to `string` automatically to decode a text value. Now
|
||||
only `string` is handled. This is to allow the possibility of future binary support in `database/sql` mode by
|
||||
considering `[]byte` to be binary format and `string` text format. This change should have no effect for any use with
|
||||
`pgx`. The previous behavior was only necessary for `lib/pq` compatibility.
|
||||
|
||||
Added `*Map.SQLScanner` to create a `sql.Scanner` for types such as `[]int32` and `Range[T]` that do not implement
|
||||
`sql.Scanner` directly.
|
||||
|
||||
### Number Type Fields Include Bit size
|
||||
|
||||
`Int2`, `Int4`, `Int8`, `Float4`, `Float8`, and `Uint32` fields now include bit size. e.g. `Int` is renamed to `Int64`.
|
||||
This matches the convention set by `database/sql`. In addition, for comparable types like `pgtype.Int8` and
|
||||
`sql.NullInt64` the structures are identical. This means they can be directly converted one to another.
|
||||
|
||||
### 3rd Party Type Integrations
|
||||
|
||||
* Extracted integrations with https://github.com/shopspring/decimal and https://github.com/gofrs/uuid to
|
||||
https://github.com/jackc/pgx-shopspring-decimal and https://github.com/jackc/pgx-gofrs-uuid respectively. This trims
|
||||
the pgx dependency tree.
|
||||
|
||||
### Other Changes
|
||||
|
||||
* `Bit` and `Varbit` are both replaced by the `Bits` type.
|
||||
* `CID`, `OID`, `OIDValue`, and `XID` are replaced by the `Uint32` type.
|
||||
* `Hstore` is now defined as `map[string]*string`.
|
||||
* `JSON` and `JSONB` types removed. Use `[]byte` or `string` directly.
|
||||
* `QChar` type removed. Use `rune` or `byte` directly.
|
||||
* `Inet` and `Cidr` types removed. Use `netip.Addr` and `netip.Prefix` directly. These types are more memory efficient than the previous `net.IPNet`.
|
||||
* `Macaddr` type removed. Use `net.HardwareAddr` directly.
|
||||
* Renamed `pgtype.ConnInfo` to `pgtype.Map`.
|
||||
* Renamed `pgtype.DataType` to `pgtype.Type`.
|
||||
* Renamed `pgtype.None` to `pgtype.Finite`.
|
||||
* `RegisterType` now accepts a `*Type` instead of `Type`.
|
||||
* Assorted array helper methods and types made private.
|
||||
|
||||
## stdlib
|
||||
|
||||
* Removed `AcquireConn` and `ReleaseConn` as that functionality has been built in since Go 1.13.
|
||||
|
||||
## Reduced Memory Usage by Reusing Read Buffers
|
||||
|
||||
Previously, the connection read buffer would allocate large chunks of memory and never reuse them. This allowed
|
||||
transferring ownership to anything such as scanned values without incurring an additional allocation and memory copy.
|
||||
However, this came at the cost of overall increased memory allocation size. But worse it was also possible to pin large
|
||||
chunks of memory by retaining a reference to a small value that originally came directly from the read buffer. Now
|
||||
ownership remains with the read buffer and anything needing to retain a value must make a copy.
|
||||
|
||||
## Query Execution Modes
|
||||
|
||||
Control over automatic prepared statement caching and simple protocol use are now combined into query execution mode.
|
||||
See documentation for `QueryExecMode`.
|
||||
|
||||
## QueryRewriter Interface and NamedArgs
|
||||
|
||||
pgx now supports named arguments with the `NamedArgs` type. This is implemented via the new `QueryRewriter` interface which
|
||||
allows arbitrary rewriting of query SQL and arguments.
|
||||
|
||||
## RowScanner Interface
|
||||
|
||||
The `RowScanner` interface allows a single argument to Rows.Scan to scan the entire row.
|
||||
|
||||
## Rows Result Helpers
|
||||
|
||||
* `CollectRows` and `RowTo*` functions simplify collecting results into a slice.
|
||||
* `CollectOneRow` collects one row using `RowTo*` functions.
|
||||
* `ForEachRow` simplifies scanning each row and executing code using the scanned values. `ForEachRow` replaces `QueryFunc`.
|
||||
|
||||
## Tx Helpers
|
||||
|
||||
Rather than every type that implemented `Begin` or `BeginTx` methods also needing to implement `BeginFunc` and
|
||||
`BeginTxFunc` these methods have been converted to functions that take a db that implements `Begin` or `BeginTx`.
|
||||
|
||||
## Improved Batch Query Ergonomics
|
||||
|
||||
Previously, the code for building a batch went in one place before the call to `SendBatch`, and the code for reading the
|
||||
results went in one place after the call to `SendBatch`. This could make it difficult to match up the query and the code
|
||||
to handle the results. Now `Queue` returns a `QueuedQuery` which has methods `Query`, `QueryRow`, and `Exec` which can
|
||||
be used to register a callback function that will handle the result. Callback functions are called automatically when
|
||||
`BatchResults.Close` is called.
|
||||
|
||||
## SendBatch Uses Pipeline Mode When Appropriate
|
||||
|
||||
Previously, a batch with 10 unique parameterized statements executed 100 times would entail 11 network round trips. 1
|
||||
for each prepare / describe and 1 for executing them all. Now pipeline mode is used to prepare / describe all statements
|
||||
in a single network round trip. So it would only take 2 round trips.
|
||||
|
||||
## Tracing and Logging
|
||||
|
||||
Internal logging support has been replaced with tracing hooks. This allows custom tracing integration with tools like OpenTelemetry. Package tracelog provides an adapter for pgx v4 loggers to act as a tracer.
|
||||
|
||||
All integrations with 3rd party loggers have been extracted to separate repositories. This trims the pgx dependency
|
||||
tree.
|
||||
121
vendor/github.com/jackc/pgx/v5/CONTRIBUTING.md
generated
vendored
Normal file
121
vendor/github.com/jackc/pgx/v5/CONTRIBUTING.md
generated
vendored
Normal file
|
|
@ -0,0 +1,121 @@
|
|||
# Contributing
|
||||
|
||||
## Discuss Significant Changes
|
||||
|
||||
Before you invest a significant amount of time on a change, please create a discussion or issue describing your
|
||||
proposal. This will help to ensure your proposed change has a reasonable chance of being merged.
|
||||
|
||||
## Avoid Dependencies
|
||||
|
||||
Adding a dependency is a big deal. While on occasion a new dependency may be accepted, the default answer to any change
|
||||
that adds a dependency is no.
|
||||
|
||||
## Development Environment Setup
|
||||
|
||||
pgx tests naturally require a PostgreSQL database. It will connect to the database specified in the `PGX_TEST_DATABASE`
|
||||
environment variable. The `PGX_TEST_DATABASE` environment variable can either be a URL or key-value pairs. In addition,
|
||||
the standard `PG*` environment variables will be respected. Consider using [direnv](https://github.com/direnv/direnv) to
|
||||
simplify environment variable handling.
|
||||
|
||||
### Using an Existing PostgreSQL Cluster
|
||||
|
||||
If you already have a PostgreSQL development server this is the quickest way to start and run the majority of the pgx
|
||||
test suite. Some tests will be skipped that require server configuration changes (e.g. those testing different
|
||||
authentication methods).
|
||||
|
||||
Create and setup a test database:
|
||||
|
||||
```
|
||||
export PGDATABASE=pgx_test
|
||||
createdb
|
||||
psql -c 'create extension hstore;'
|
||||
psql -c 'create extension ltree;'
|
||||
psql -c 'create domain uint64 as numeric(20,0);'
|
||||
```
|
||||
|
||||
Ensure a `postgres` user exists. This happens by default in normal PostgreSQL installs, but some installation methods
|
||||
such as Homebrew do not.
|
||||
|
||||
```
|
||||
createuser -s postgres
|
||||
```
|
||||
|
||||
Ensure your `PGX_TEST_DATABASE` environment variable points to the database you just created and run the tests.
|
||||
|
||||
```
|
||||
export PGX_TEST_DATABASE="host=/private/tmp database=pgx_test"
|
||||
go test ./...
|
||||
```
|
||||
|
||||
This will run the vast majority of the tests, but some tests will be skipped (e.g. those testing different connection methods).
|
||||
|
||||
### Creating a New PostgreSQL Cluster Exclusively for Testing
|
||||
|
||||
The following environment variables need to be set both for initial setup and whenever the tests are run. (direnv is
|
||||
highly recommended). Depending on your platform, you may need to change the host for `PGX_TEST_UNIX_SOCKET_CONN_STRING`.
|
||||
|
||||
```
|
||||
export PGPORT=5015
|
||||
export PGUSER=postgres
|
||||
export PGDATABASE=pgx_test
|
||||
export POSTGRESQL_DATA_DIR=postgresql
|
||||
|
||||
export PGX_TEST_DATABASE="host=127.0.0.1 database=pgx_test user=pgx_md5 password=secret"
|
||||
export PGX_TEST_UNIX_SOCKET_CONN_STRING="host=/private/tmp database=pgx_test"
|
||||
export PGX_TEST_TCP_CONN_STRING="host=127.0.0.1 database=pgx_test user=pgx_md5 password=secret"
|
||||
export PGX_TEST_SCRAM_PASSWORD_CONN_STRING="host=127.0.0.1 user=pgx_scram password=secret database=pgx_test"
|
||||
export PGX_TEST_MD5_PASSWORD_CONN_STRING="host=127.0.0.1 database=pgx_test user=pgx_md5 password=secret"
|
||||
export PGX_TEST_PLAIN_PASSWORD_CONN_STRING="host=127.0.0.1 user=pgx_pw password=secret"
|
||||
export PGX_TEST_TLS_CONN_STRING="host=localhost user=pgx_ssl password=secret sslmode=verify-full sslrootcert=`pwd`/.testdb/ca.pem"
|
||||
export PGX_SSL_PASSWORD=certpw
|
||||
export PGX_TEST_TLS_CLIENT_CONN_STRING="host=localhost user=pgx_sslcert sslmode=verify-full sslrootcert=`pwd`/.testdb/ca.pem database=pgx_test sslcert=`pwd`/.testdb/pgx_sslcert.crt sslkey=`pwd`/.testdb/pgx_sslcert.key"
|
||||
```
|
||||
|
||||
Create a new database cluster.
|
||||
|
||||
```
|
||||
initdb --locale=en_US -E UTF-8 --username=postgres .testdb/$POSTGRESQL_DATA_DIR
|
||||
|
||||
echo "listen_addresses = '127.0.0.1'" >> .testdb/$POSTGRESQL_DATA_DIR/postgresql.conf
|
||||
echo "port = $PGPORT" >> .testdb/$POSTGRESQL_DATA_DIR/postgresql.conf
|
||||
cat testsetup/postgresql_ssl.conf >> .testdb/$POSTGRESQL_DATA_DIR/postgresql.conf
|
||||
cp testsetup/pg_hba.conf .testdb/$POSTGRESQL_DATA_DIR/pg_hba.conf
|
||||
|
||||
cd .testdb
|
||||
|
||||
# Generate CA, server, and encrypted client certificates.
|
||||
go run ../testsetup/generate_certs.go
|
||||
|
||||
# Copy certificates to server directory and set permissions.
|
||||
cp ca.pem $POSTGRESQL_DATA_DIR/root.crt
|
||||
cp localhost.key $POSTGRESQL_DATA_DIR/server.key
|
||||
chmod 600 $POSTGRESQL_DATA_DIR/server.key
|
||||
cp localhost.crt $POSTGRESQL_DATA_DIR/server.crt
|
||||
|
||||
cd ..
|
||||
```
|
||||
|
||||
|
||||
Start the new cluster. This will be necessary whenever you are running pgx tests.
|
||||
|
||||
```
|
||||
postgres -D .testdb/$POSTGRESQL_DATA_DIR
|
||||
```
|
||||
|
||||
Setup the test database in the new cluster.
|
||||
|
||||
```
|
||||
createdb
|
||||
psql --no-psqlrc -f testsetup/postgresql_setup.sql
|
||||
```
|
||||
|
||||
### PgBouncer
|
||||
|
||||
There are tests specific for PgBouncer that will be executed if `PGX_TEST_PGBOUNCER_CONN_STRING` is set.
|
||||
|
||||
### Optional Tests
|
||||
|
||||
pgx supports multiple connection types and means of authentication. These tests are optional. They will only run if the
|
||||
appropriate environment variables are set. In addition, there may be tests specific to particular PostgreSQL versions,
|
||||
non-PostgreSQL servers (e.g. CockroachDB), or connection poolers (e.g. PgBouncer). `go test ./... -v | grep SKIP` to see
|
||||
if any tests are being skipped.
|
||||
22
vendor/github.com/jackc/pgx/v5/LICENSE
generated
vendored
Normal file
22
vendor/github.com/jackc/pgx/v5/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
Copyright (c) 2013-2021 Jack Christensen
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
191
vendor/github.com/jackc/pgx/v5/README.md
generated
vendored
Normal file
191
vendor/github.com/jackc/pgx/v5/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,191 @@
|
|||
[](https://pkg.go.dev/github.com/jackc/pgx/v5)
|
||||
[](https://github.com/jackc/pgx/actions/workflows/ci.yml)
|
||||
|
||||
# pgx - PostgreSQL Driver and Toolkit
|
||||
|
||||
pgx is a pure Go driver and toolkit for PostgreSQL.
|
||||
|
||||
The pgx driver is a low-level, high performance interface that exposes PostgreSQL-specific features such as `LISTEN` /
|
||||
`NOTIFY` and `COPY`. It also includes an adapter for the standard `database/sql` interface.
|
||||
|
||||
The toolkit component is a related set of packages that implement PostgreSQL functionality such as parsing the wire protocol
|
||||
and type mapping between PostgreSQL and Go. These underlying packages can be used to implement alternative drivers,
|
||||
proxies, load balancers, logical replication clients, etc.
|
||||
|
||||
## Example Usage
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// urlExample := "postgres://username:password@localhost:5432/database_name"
|
||||
conn, err := pgx.Connect(context.Background(), os.Getenv("DATABASE_URL"))
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Unable to connect to database: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer conn.Close(context.Background())
|
||||
|
||||
var name string
|
||||
var weight int64
|
||||
err = conn.QueryRow(context.Background(), "select name, weight from widgets where id=$1", 42).Scan(&name, &weight)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "QueryRow failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Println(name, weight)
|
||||
}
|
||||
```
|
||||
|
||||
See the [getting started guide](https://github.com/jackc/pgx/wiki/Getting-started-with-pgx) for more information.
|
||||
|
||||
## Features
|
||||
|
||||
* Support for approximately 70 different PostgreSQL types
|
||||
* Automatic statement preparation and caching
|
||||
* Batch queries
|
||||
* Single-round trip query mode
|
||||
* Full TLS connection control
|
||||
* Binary format support for custom types (allows for much quicker encoding/decoding)
|
||||
* `COPY` protocol support for faster bulk data loads
|
||||
* Tracing and logging support
|
||||
* Connection pool with after-connect hook for arbitrary connection setup
|
||||
* `LISTEN` / `NOTIFY`
|
||||
* Conversion of PostgreSQL arrays to Go slice mappings for integers, floats, and strings
|
||||
* `hstore` support
|
||||
* `json` and `jsonb` support
|
||||
* Maps `inet` and `cidr` PostgreSQL types to `netip.Addr` and `netip.Prefix`
|
||||
* Large object support
|
||||
* NULL mapping to pointer to pointer
|
||||
* Supports `database/sql.Scanner` and `database/sql/driver.Valuer` interfaces for custom types
|
||||
* Notice response handling
|
||||
* Simulated nested transactions with savepoints
|
||||
|
||||
## Choosing Between the pgx and database/sql Interfaces
|
||||
|
||||
The pgx interface is faster. Many PostgreSQL specific features such as `LISTEN` / `NOTIFY` and `COPY` are not available
|
||||
through the `database/sql` interface.
|
||||
|
||||
The pgx interface is recommended when:
|
||||
|
||||
1. The application only targets PostgreSQL.
|
||||
2. No other libraries that require `database/sql` are in use.
|
||||
|
||||
It is also possible to use the `database/sql` interface and convert a connection to the lower-level pgx interface as needed.
|
||||
|
||||
## Testing
|
||||
|
||||
See [CONTRIBUTING.md](./CONTRIBUTING.md) for setup instructions.
|
||||
|
||||
## Architecture
|
||||
|
||||
See the presentation at Golang Estonia, [PGX Top to Bottom](https://www.youtube.com/watch?v=sXMSWhcHCf8) for a description of pgx architecture.
|
||||
|
||||
## Supported Go and PostgreSQL Versions
|
||||
|
||||
pgx supports the same versions of Go and PostgreSQL that are supported by their respective teams. For [Go](https://golang.org/doc/devel/release.html#policy) that is the two most recent major releases and for [PostgreSQL](https://www.postgresql.org/support/versioning/) the major releases in the last 5 years. This means pgx supports Go 1.23 and higher and PostgreSQL 13 and higher. pgx also is tested against the latest version of [CockroachDB](https://www.cockroachlabs.com/product/).
|
||||
|
||||
## Version Policy
|
||||
|
||||
pgx follows semantic versioning for the documented public API on stable releases. `v5` is the latest stable major version.
|
||||
|
||||
## PGX Family Libraries
|
||||
|
||||
### [github.com/jackc/pglogrepl](https://github.com/jackc/pglogrepl)
|
||||
|
||||
pglogrepl provides functionality to act as a client for PostgreSQL logical replication.
|
||||
|
||||
### [github.com/jackc/pgmock](https://github.com/jackc/pgmock)
|
||||
|
||||
pgmock offers the ability to create a server that mocks the PostgreSQL wire protocol. This is used internally to test pgx by purposely inducing unusual errors. pgproto3 and pgmock together provide most of the foundational tooling required to implement a PostgreSQL proxy or MitM (such as for a custom connection pooler).
|
||||
|
||||
### [github.com/jackc/tern](https://github.com/jackc/tern)
|
||||
|
||||
tern is a stand-alone SQL migration system.
|
||||
|
||||
### [github.com/jackc/pgerrcode](https://github.com/jackc/pgerrcode)
|
||||
|
||||
pgerrcode contains constants for the PostgreSQL error codes.
|
||||
|
||||
## Adapters for 3rd Party Types
|
||||
|
||||
* [github.com/jackc/pgx-gofrs-uuid](https://github.com/jackc/pgx-gofrs-uuid)
|
||||
* [github.com/jackc/pgx-shopspring-decimal](https://github.com/jackc/pgx-shopspring-decimal)
|
||||
* [github.com/twpayne/pgx-geos](https://github.com/twpayne/pgx-geos) ([PostGIS](https://postgis.net/) and [GEOS](https://libgeos.org/) via [go-geos](https://github.com/twpayne/go-geos))
|
||||
* [github.com/vgarvardt/pgx-google-uuid](https://github.com/vgarvardt/pgx-google-uuid)
|
||||
|
||||
|
||||
## Adapters for 3rd Party Tracers
|
||||
|
||||
* [github.com/jackhopner/pgx-xray-tracer](https://github.com/jackhopner/pgx-xray-tracer)
|
||||
* [github.com/exaring/otelpgx](https://github.com/exaring/otelpgx)
|
||||
|
||||
## Adapters for 3rd Party Loggers
|
||||
|
||||
These adapters can be used with the tracelog package.
|
||||
|
||||
* [github.com/jackc/pgx-go-kit-log](https://github.com/jackc/pgx-go-kit-log)
|
||||
* [github.com/jackc/pgx-log15](https://github.com/jackc/pgx-log15)
|
||||
* [github.com/jackc/pgx-logrus](https://github.com/jackc/pgx-logrus)
|
||||
* [github.com/jackc/pgx-zap](https://github.com/jackc/pgx-zap)
|
||||
* [github.com/jackc/pgx-zerolog](https://github.com/jackc/pgx-zerolog)
|
||||
* [github.com/mcosta74/pgx-slog](https://github.com/mcosta74/pgx-slog)
|
||||
* [github.com/kataras/pgx-golog](https://github.com/kataras/pgx-golog)
|
||||
|
||||
## 3rd Party Libraries with PGX Support
|
||||
|
||||
### [github.com/pashagolub/pgxmock](https://github.com/pashagolub/pgxmock)
|
||||
|
||||
pgxmock is a mock library implementing pgx interfaces.
|
||||
pgxmock has one and only purpose - to simulate pgx behavior in tests, without needing a real database connection.
|
||||
|
||||
### [github.com/georgysavva/scany](https://github.com/georgysavva/scany)
|
||||
|
||||
Library for scanning data from a database into Go structs and more.
|
||||
|
||||
### [github.com/vingarcia/ksql](https://github.com/vingarcia/ksql)
|
||||
|
||||
A carefully designed SQL client for making using SQL easier,
|
||||
more productive, and less error-prone on Golang.
|
||||
|
||||
### [github.com/otan/gopgkrb5](https://github.com/otan/gopgkrb5)
|
||||
|
||||
Adds GSSAPI / Kerberos authentication support.
|
||||
|
||||
### [github.com/wcamarao/pmx](https://github.com/wcamarao/pmx)
|
||||
|
||||
Explicit data mapping and scanning library for Go structs and slices.
|
||||
|
||||
### [github.com/stephenafamo/scan](https://github.com/stephenafamo/scan)
|
||||
|
||||
Type safe and flexible package for scanning database data into Go types.
|
||||
Supports, structs, maps, slices and custom mapping functions.
|
||||
|
||||
### [github.com/z0ne-dev/mgx](https://github.com/z0ne-dev/mgx)
|
||||
|
||||
Code first migration library for native pgx (no database/sql abstraction).
|
||||
|
||||
### [github.com/amirsalarsafaei/sqlc-pgx-monitoring](https://github.com/amirsalarsafaei/sqlc-pgx-monitoring)
|
||||
|
||||
A database monitoring/metrics library for pgx and sqlc. Trace, log and monitor your sqlc query performance using OpenTelemetry.
|
||||
|
||||
### [https://github.com/nikolayk812/pgx-outbox](https://github.com/nikolayk812/pgx-outbox)
|
||||
|
||||
Simple Golang implementation for transactional outbox pattern for PostgreSQL using jackc/pgx driver.
|
||||
|
||||
### [https://github.com/Arlandaren/pgxWrappy](https://github.com/Arlandaren/pgxWrappy)
|
||||
|
||||
Simplifies working with the pgx library, providing convenient scanning of nested structures.
|
||||
|
||||
## [https://github.com/KoNekoD/pgx-colon-query-rewriter](https://github.com/KoNekoD/pgx-colon-query-rewriter)
|
||||
|
||||
Implementation of the pgx query rewriter to use ':' instead of '@' in named query parameters.
|
||||
18
vendor/github.com/jackc/pgx/v5/Rakefile
generated
vendored
Normal file
18
vendor/github.com/jackc/pgx/v5/Rakefile
generated
vendored
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
require "erb"
|
||||
|
||||
rule '.go' => '.go.erb' do |task|
|
||||
erb = ERB.new(File.read(task.source))
|
||||
File.write(task.name, "// Code generated from #{task.source}. DO NOT EDIT.\n\n" + erb.result(binding))
|
||||
sh "goimports", "-w", task.name
|
||||
end
|
||||
|
||||
generated_code_files = [
|
||||
"pgtype/int.go",
|
||||
"pgtype/int_test.go",
|
||||
"pgtype/integration_benchmark_test.go",
|
||||
"pgtype/zeronull/int.go",
|
||||
"pgtype/zeronull/int_test.go"
|
||||
]
|
||||
|
||||
desc "Generate code"
|
||||
task generate: generated_code_files
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue