Implement HTTP timeout configurations and enhance API documentation

- Added optional HTTP server timeout configurations (`HNHMAP_READ_TIMEOUT`, `HNHMAP_WRITE_TIMEOUT`, `HNHMAP_IDLE_TIMEOUT`) to `.env.example` and updated the server initialization in `main.go` to utilize these settings.
- Enhanced API documentation for the `rebuildZooms` endpoint to clarify its background processing and polling mechanism for status updates.
- Updated `configuration.md` to include new timeout environment variables for better configuration guidance.
- Improved error handling in the client for large request bodies, ensuring appropriate responses for oversized payloads.
This commit is contained in:
2026-03-04 11:59:28 +03:00
parent a3a4c0e896
commit dda35baeca
17 changed files with 396 additions and 73 deletions

View File

@@ -321,6 +321,7 @@ func (h *Handlers) APIAdminHideMarker(rw http.ResponseWriter, req *http.Request)
}
// APIAdminRebuildZooms handles POST /map/api/admin/rebuildZooms.
// It starts the rebuild in the background and returns 202 Accepted immediately.
func (h *Handlers) APIAdminRebuildZooms(rw http.ResponseWriter, req *http.Request) {
if !h.requireMethod(rw, req, http.MethodPost) {
return
@@ -328,11 +329,22 @@ func (h *Handlers) APIAdminRebuildZooms(rw http.ResponseWriter, req *http.Reques
if h.requireAdmin(rw, req) == nil {
return
}
if err := h.Admin.RebuildZooms(req.Context()); err != nil {
HandleServiceError(rw, err)
h.Admin.StartRebuildZooms()
rw.WriteHeader(http.StatusAccepted)
}
// APIAdminRebuildZoomsStatus handles GET /map/api/admin/rebuildZooms/status.
// Returns {"running": true|false} so the client can poll until the rebuild finishes.
func (h *Handlers) APIAdminRebuildZoomsStatus(rw http.ResponseWriter, req *http.Request) {
if req.Method != http.MethodGet {
JSONError(rw, http.StatusMethodNotAllowed, "method not allowed", "METHOD_NOT_ALLOWED")
return
}
rw.WriteHeader(http.StatusOK)
if h.requireAdmin(rw, req) == nil {
return
}
running := h.Admin.RebuildZoomsRunning()
JSON(rw, http.StatusOK, map[string]bool{"running": running})
}
// APIAdminExport handles GET /map/api/admin/export.
@@ -426,6 +438,8 @@ func (h *Handlers) APIAdminRoute(rw http.ResponseWriter, req *http.Request, path
h.APIAdminWipe(rw, req)
case path == "rebuildZooms":
h.APIAdminRebuildZooms(rw, req)
case path == "rebuildZooms/status":
h.APIAdminRebuildZoomsStatus(rw, req)
case path == "export":
h.APIAdminExport(rw, req)
case path == "merge":

View File

@@ -12,6 +12,9 @@ import (
"github.com/andyleap/hnh-map/internal/app/services"
)
// maxClientBodySize is the maximum size for position and marker update request bodies.
const maxClientBodySize = 2 * 1024 * 1024 // 2 MB
var clientPath = regexp.MustCompile(`client/([^/]+)/(.*)`)
// ClientRouter handles /client/* requests with token-based auth.
@@ -112,12 +115,16 @@ func (h *Handlers) clientGridUpload(rw http.ResponseWriter, req *http.Request) {
func (h *Handlers) clientPositionUpdate(rw http.ResponseWriter, req *http.Request) {
defer req.Body.Close()
buf, err := io.ReadAll(req.Body)
buf, err := io.ReadAll(io.LimitReader(req.Body, maxClientBodySize+1))
if err != nil {
slog.Error("error reading position update", "error", err)
JSONError(rw, http.StatusBadRequest, "failed to read body", "BAD_REQUEST")
return
}
if len(buf) > maxClientBodySize {
JSONError(rw, http.StatusRequestEntityTooLarge, "request body too large", "PAYLOAD_TOO_LARGE")
return
}
if err := h.Client.UpdatePositions(req.Context(), buf); err != nil {
slog.Error("position update failed", "error", err)
HandleServiceError(rw, err)
@@ -126,12 +133,16 @@ func (h *Handlers) clientPositionUpdate(rw http.ResponseWriter, req *http.Reques
func (h *Handlers) clientMarkerUpdate(rw http.ResponseWriter, req *http.Request) {
defer req.Body.Close()
buf, err := io.ReadAll(req.Body)
buf, err := io.ReadAll(io.LimitReader(req.Body, maxClientBodySize+1))
if err != nil {
slog.Error("error reading marker update", "error", err)
JSONError(rw, http.StatusBadRequest, "failed to read body", "BAD_REQUEST")
return
}
if len(buf) > maxClientBodySize {
JSONError(rw, http.StatusRequestEntityTooLarge, "request body too large", "PAYLOAD_TOO_LARGE")
return
}
if err := h.Client.UploadMarkers(req.Context(), buf); err != nil {
slog.Error("marker update failed", "error", err)
HandleServiceError(rw, err)

View File

@@ -1,6 +1,7 @@
package handlers_test
import (
"bytes"
"context"
"encoding/json"
"errors"
@@ -677,3 +678,38 @@ func TestClientRouter_InvalidToken(t *testing.T) {
t.Fatalf("expected 401, got %d", rr.Code)
}
}
func TestClientRouter_PositionUpdate_BodyTooLarge(t *testing.T) {
env := newTestEnv(t)
env.createUser(t, "alice", "pass", app.Auths{app.AUTH_UPLOAD})
tokens := env.auth.GenerateTokenForUser(context.Background(), "alice")
if len(tokens) == 0 {
t.Fatal("expected token")
}
// Body larger than maxClientBodySize (2MB)
bigBody := bytes.Repeat([]byte("x"), 2*1024*1024+1)
req := httptest.NewRequest(http.MethodPost, "/client/"+tokens[0]+"/positionUpdate", bytes.NewReader(bigBody))
req.Header.Set("Content-Type", "application/json")
rr := httptest.NewRecorder()
env.h.ClientRouter(rr, req)
if rr.Code != http.StatusRequestEntityTooLarge {
t.Fatalf("expected 413, got %d: %s", rr.Code, rr.Body.String())
}
}
func TestClientRouter_MarkerUpdate_BodyTooLarge(t *testing.T) {
env := newTestEnv(t)
env.createUser(t, "alice", "pass", app.Auths{app.AUTH_UPLOAD})
tokens := env.auth.GenerateTokenForUser(context.Background(), "alice")
if len(tokens) == 0 {
t.Fatal("expected token")
}
bigBody := bytes.Repeat([]byte("x"), 2*1024*1024+1)
req := httptest.NewRequest(http.MethodPost, "/client/"+tokens[0]+"/markerUpdate", bytes.NewReader(bigBody))
req.Header.Set("Content-Type", "application/json")
rr := httptest.NewRecorder()
env.h.ClientRouter(rr, req)
if rr.Code != http.StatusRequestEntityTooLarge {
t.Fatalf("expected 413, got %d: %s", rr.Code, rr.Body.String())
}
}

View File

@@ -6,6 +6,7 @@ import (
"fmt"
"log/slog"
"strconv"
"sync"
"github.com/andyleap/hnh-map/internal/app"
"github.com/andyleap/hnh-map/internal/app/store"
@@ -17,6 +18,9 @@ import (
type AdminService struct {
st *store.Store
mapSvc *MapService
rebuildMu sync.Mutex
rebuildRunning bool
}
// NewAdminService creates an AdminService with the given store and map service.
@@ -372,3 +376,32 @@ func (s *AdminService) HideMarker(ctx context.Context, markerID string) error {
func (s *AdminService) RebuildZooms(ctx context.Context) error {
return s.mapSvc.RebuildZooms(ctx)
}
// StartRebuildZooms starts RebuildZooms in a goroutine and returns immediately.
// RebuildZoomsRunning returns true while the rebuild is in progress.
func (s *AdminService) StartRebuildZooms() {
s.rebuildMu.Lock()
if s.rebuildRunning {
s.rebuildMu.Unlock()
return
}
s.rebuildRunning = true
s.rebuildMu.Unlock()
go func() {
defer func() {
s.rebuildMu.Lock()
s.rebuildRunning = false
s.rebuildMu.Unlock()
}()
if err := s.mapSvc.RebuildZooms(context.Background()); err != nil {
slog.Error("RebuildZooms background failed", "error", err)
}
}()
}
// RebuildZoomsRunning returns true if a rebuild is currently in progress.
func (s *AdminService) RebuildZoomsRunning() bool {
s.rebuildMu.Lock()
defer s.rebuildMu.Unlock()
return s.rebuildRunning
}

View File

@@ -35,14 +35,23 @@ func NewExportService(st *store.Store, mapSvc *MapService) *ExportService {
return &ExportService{st: st, mapSvc: mapSvc}
}
// Export writes all map data as a ZIP archive to the given writer.
func (s *ExportService) Export(ctx context.Context, w io.Writer) error {
zw := zip.NewWriter(w)
defer zw.Close()
// exportEntry describes a single grid PNG to copy into the ZIP (collected inside a read-only View).
type exportEntry struct {
ZipPath string // e.g. "1/grid1.png"
FilePath string // absolute path on disk
}
return s.st.Update(ctx, func(tx *bbolt.Tx) error {
maps := map[int]mapData{}
gridMap := map[string]int{}
// Export writes all map data as a ZIP archive to the given writer.
// It uses a read-only View to collect data, then builds the ZIP outside the transaction
// so that the write lock is not held during file I/O.
func (s *ExportService) Export(ctx context.Context, w io.Writer) error {
var maps map[int]mapData
var gridMap map[string]int
var filesToCopy []exportEntry
if err := s.st.View(ctx, func(tx *bbolt.Tx) error {
maps = map[int]mapData{}
gridMap = map[string]int{}
grids := tx.Bucket(store.BucketGrids)
if grids == nil {
@@ -54,6 +63,11 @@ func (s *ExportService) Export(ctx context.Context, w io.Writer) error {
}
if err := grids.ForEach(func(k, v []byte) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
gd := app.GridData{}
if err := json.Unmarshal(v, &gd); err != nil {
return err
@@ -84,17 +98,11 @@ func (s *ExportService) Export(ctx context.Context, w io.Writer) error {
if err := json.Unmarshal(tdraw, &td); err != nil {
return err
}
fw, err := zw.Create(fmt.Sprintf("%d/%s.png", gd.Map, gd.ID))
if err != nil {
return err
}
f, err := os.Open(filepath.Join(s.mapSvc.GridStorage(), td.File))
if err != nil {
return err
}
_, err = io.Copy(fw, f)
f.Close()
return err
filesToCopy = append(filesToCopy, exportEntry{
ZipPath: fmt.Sprintf("%d/%s.png", gd.Map, gd.ID),
FilePath: filepath.Join(s.mapSvc.GridStorage(), td.File),
})
return nil
}); err != nil {
return err
}
@@ -104,6 +112,11 @@ func (s *ExportService) Export(ctx context.Context, w io.Writer) error {
markersgrid := markersb.Bucket(store.BucketMarkersGrid)
if markersgrid != nil {
markersgrid.ForEach(func(k, v []byte) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
marker := app.Marker{}
if json.Unmarshal(v, &marker) != nil {
return nil
@@ -115,16 +128,41 @@ func (s *ExportService) Export(ctx context.Context, w io.Writer) error {
})
}
}
for mapid, md := range maps {
fw, err := zw.Create(fmt.Sprintf("%d/grids.json", mapid))
if err != nil {
return err
}
json.NewEncoder(fw).Encode(md)
}
return nil
})
}); err != nil {
return err
}
// Build ZIP outside the transaction so the write lock is not held during file I/O.
zw := zip.NewWriter(w)
defer zw.Close()
for _, e := range filesToCopy {
fw, err := zw.Create(e.ZipPath)
if err != nil {
return err
}
f, err := os.Open(e.FilePath)
if err != nil {
return err
}
_, err = io.Copy(fw, f)
f.Close()
if err != nil {
return err
}
}
for mapid, md := range maps {
fw, err := zw.Create(fmt.Sprintf("%d/grids.json", mapid))
if err != nil {
return err
}
if err := json.NewEncoder(fw).Encode(md); err != nil {
return err
}
}
return nil
}
// Merge imports map data from a ZIP file.

View File

@@ -184,6 +184,38 @@ func (s *MapService) GetTile(ctx context.Context, mapID int, c app.Coord, zoom i
return td
}
// getSubTiles returns up to 4 tile data for the given parent coord at zoom z-1 (sub-tiles at z).
// Order: (0,0), (1,0), (0,1), (1,1) to match the 2x2 loop in UpdateZoomLevel.
func (s *MapService) getSubTiles(ctx context.Context, mapid int, c app.Coord, z int) []*app.TileData {
coords := []app.Coord{
{X: c.X*2 + 0, Y: c.Y*2 + 0},
{X: c.X*2 + 1, Y: c.Y*2 + 0},
{X: c.X*2 + 0, Y: c.Y*2 + 1},
{X: c.X*2 + 1, Y: c.Y*2 + 1},
}
keys := make([]string, len(coords))
for i := range coords {
keys[i] = coords[i].Name()
}
var rawMap map[string][]byte
if err := s.st.View(ctx, func(tx *bbolt.Tx) error {
rawMap = s.st.GetTiles(tx, mapid, z-1, keys)
return nil
}); err != nil {
return nil
}
result := make([]*app.TileData, 4)
for i, k := range keys {
if raw, ok := rawMap[k]; ok && len(raw) > 0 {
td := &app.TileData{}
if json.Unmarshal(raw, td) == nil {
result[i] = td
}
}
}
return result
}
// SaveTile persists a tile and broadcasts the update.
func (s *MapService) SaveTile(ctx context.Context, mapid int, c app.Coord, z int, f string, t int64) {
s.st.Update(ctx, func(tx *bbolt.Tx) error {
@@ -203,32 +235,28 @@ func (s *MapService) SaveTile(ctx context.Context, mapid int, c app.Coord, z int
})
}
// UpdateZoomLevel composes a zoom tile from 4 sub-tiles.
// UpdateZoomLevel composes a zoom tile from 4 sub-tiles (one View for all 4 tile reads).
func (s *MapService) UpdateZoomLevel(ctx context.Context, mapid int, c app.Coord, z int) {
subTiles := s.getSubTiles(ctx, mapid, c, z)
img := image.NewNRGBA(image.Rect(0, 0, app.GridSize, app.GridSize))
draw.Draw(img, img.Bounds(), image.Transparent, image.Point{}, draw.Src)
for x := 0; x <= 1; x++ {
for y := 0; y <= 1; y++ {
subC := c
subC.X *= 2
subC.Y *= 2
subC.X += x
subC.Y += y
td := s.GetTile(ctx, mapid, subC, z-1)
if td == nil || td.File == "" {
continue
}
subf, err := os.Open(filepath.Join(s.gridStorage, td.File))
if err != nil {
continue
}
subimg, _, err := image.Decode(subf)
subf.Close()
if err != nil {
continue
}
draw.BiLinear.Scale(img, image.Rect(50*x, 50*y, 50*x+50, 50*y+50), subimg, subimg.Bounds(), draw.Src, nil)
for i := 0; i < 4; i++ {
td := subTiles[i]
if td == nil || td.File == "" {
continue
}
x := i % 2
y := i / 2
subf, err := os.Open(filepath.Join(s.gridStorage, td.File))
if err != nil {
continue
}
subimg, _, err := image.Decode(subf)
subf.Close()
if err != nil {
continue
}
draw.BiLinear.Scale(img, image.Rect(50*x, 50*y, 50*x+50, 50*y+50), subimg, subimg.Bounds(), draw.Src, nil)
}
if err := os.MkdirAll(fmt.Sprintf("%s/%d/%d", s.gridStorage, mapid, z), 0755); err != nil {
slog.Error("failed to create zoom dir", "error", err)
@@ -266,6 +294,11 @@ func (s *MapService) RebuildZooms(ctx context.Context) error {
return nil
}
b.ForEach(func(k, v []byte) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
grid := app.GridData{}
if err := json.Unmarshal(v, &grid); err != nil {
return err
@@ -282,6 +315,9 @@ func (s *MapService) RebuildZooms(ctx context.Context) error {
}
for g, id := range saveGrid {
if ctx.Err() != nil {
return ctx.Err()
}
f := fmt.Sprintf("%s/grids/%s.png", s.gridStorage, id)
if _, err := os.Stat(f); err != nil {
continue
@@ -289,6 +325,9 @@ func (s *MapService) RebuildZooms(ctx context.Context) error {
s.SaveTile(ctx, g.m, g.c, 0, fmt.Sprintf("grids/%s.png", id), time.Now().UnixNano())
}
for z := 1; z <= app.MaxZoomLevel; z++ {
if ctx.Err() != nil {
return ctx.Err()
}
process := needProcess
needProcess = map[zoomproc]struct{}{}
for p := range process {
@@ -327,6 +366,11 @@ func (s *MapService) GetAllTileCache(ctx context.Context) []TileCache {
var cache []TileCache
s.st.View(ctx, func(tx *bbolt.Tx) error {
return s.st.ForEachTile(tx, func(mapK, zoomK, coordK, v []byte) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
td := app.TileData{}
if err := json.Unmarshal(v, &td); err != nil {
return err

View File

@@ -17,7 +17,9 @@ func New(db *bbolt.DB) *Store {
return &Store{db: db}
}
// View runs fn in a read-only transaction. Checks context before starting.
// View runs fn in a read-only transaction. It checks context before starting.
// Long-running callbacks (e.g. large ForEach or ForEachTile) should check ctx.Done()
// periodically and return ctx.Err() to abort early when the context is cancelled.
func (s *Store) View(ctx context.Context, fn func(tx *bbolt.Tx) error) error {
select {
case <-ctx.Done():
@@ -27,7 +29,8 @@ func (s *Store) View(ctx context.Context, fn func(tx *bbolt.Tx) error) error {
}
}
// Update runs fn in a read-write transaction. Checks context before starting.
// Update runs fn in a read-write transaction. It checks context before starting.
// Long-running callbacks should check ctx.Done() periodically and return ctx.Err() to abort.
func (s *Store) Update(ctx context.Context, fn func(tx *bbolt.Tx) error) error {
select {
case <-ctx.Done():
@@ -284,6 +287,30 @@ func (s *Store) GetTile(tx *bbolt.Tx, mapID, zoom int, coordKey string) []byte {
return zoomB.Get([]byte(coordKey))
}
// GetTiles returns raw JSON for multiple tiles in the same map/zoom in one transaction.
// Keys that are not found are omitted from the result. Coord keys are in the form "x_y".
func (s *Store) GetTiles(tx *bbolt.Tx, mapID, zoom int, coordKeys []string) map[string][]byte {
out := make(map[string][]byte, len(coordKeys))
tiles := tx.Bucket(BucketTiles)
if tiles == nil {
return out
}
mapB := tiles.Bucket([]byte(strconv.Itoa(mapID)))
if mapB == nil {
return out
}
zoomB := mapB.Bucket([]byte(strconv.Itoa(zoom)))
if zoomB == nil {
return out
}
for _, k := range coordKeys {
if v := zoomB.Get([]byte(k)); v != nil {
out[k] = v
}
}
return out
}
// PutTile stores a tile entry (creates nested buckets as needed).
func (s *Store) PutTile(tx *bbolt.Tx, mapID, zoom int, coordKey string, raw []byte) error {
tiles, err := tx.CreateBucketIfNotExists(BucketTiles)