- Updated API documentation for the `rebuildZooms` endpoint to clarify its long execution time and response behavior. - Modified MapView component to manage tile cache invalidation after rebuilding zoom levels, ensuring fresh tile display. - Introduced a new composable for handling tile cache invalidation state after admin actions. - Enhanced character icon creation to reflect ownership status with distinct colors. - Improved loading state handling in various components for better user experience during data fetching.
382 lines
9.1 KiB
Go
382 lines
9.1 KiB
Go
package services
|
|
|
|
import (
|
|
"archive/zip"
|
|
"context"
|
|
"encoding/json"
|
|
"fmt"
|
|
"io"
|
|
"log/slog"
|
|
"os"
|
|
"path/filepath"
|
|
"strconv"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/andyleap/hnh-map/internal/app"
|
|
"github.com/andyleap/hnh-map/internal/app/store"
|
|
"go.etcd.io/bbolt"
|
|
)
|
|
|
|
type mapData struct {
|
|
Grids map[string]string
|
|
Markers map[string][]app.Marker
|
|
}
|
|
|
|
// ExportService handles map data export and import (merge).
|
|
type ExportService struct {
|
|
st *store.Store
|
|
mapSvc *MapService
|
|
}
|
|
|
|
// NewExportService creates an ExportService with the given store and map service.
|
|
func NewExportService(st *store.Store, mapSvc *MapService) *ExportService {
|
|
return &ExportService{st: st, mapSvc: mapSvc}
|
|
}
|
|
|
|
// Export writes all map data as a ZIP archive to the given writer.
|
|
func (s *ExportService) Export(ctx context.Context, w io.Writer) error {
|
|
zw := zip.NewWriter(w)
|
|
defer zw.Close()
|
|
|
|
return s.st.Update(ctx, func(tx *bbolt.Tx) error {
|
|
maps := map[int]mapData{}
|
|
gridMap := map[string]int{}
|
|
|
|
grids := tx.Bucket(store.BucketGrids)
|
|
if grids == nil {
|
|
return nil
|
|
}
|
|
tiles := tx.Bucket(store.BucketTiles)
|
|
if tiles == nil {
|
|
return nil
|
|
}
|
|
|
|
if err := grids.ForEach(func(k, v []byte) error {
|
|
gd := app.GridData{}
|
|
if err := json.Unmarshal(v, &gd); err != nil {
|
|
return err
|
|
}
|
|
md, ok := maps[gd.Map]
|
|
if !ok {
|
|
md = mapData{
|
|
Grids: map[string]string{},
|
|
Markers: map[string][]app.Marker{},
|
|
}
|
|
maps[gd.Map] = md
|
|
}
|
|
md.Grids[gd.Coord.Name()] = gd.ID
|
|
gridMap[gd.ID] = gd.Map
|
|
mapb := tiles.Bucket([]byte(strconv.Itoa(gd.Map)))
|
|
if mapb == nil {
|
|
return nil
|
|
}
|
|
zoom := mapb.Bucket([]byte("0"))
|
|
if zoom == nil {
|
|
return nil
|
|
}
|
|
tdraw := zoom.Get([]byte(gd.Coord.Name()))
|
|
if tdraw == nil {
|
|
return nil
|
|
}
|
|
td := app.TileData{}
|
|
if err := json.Unmarshal(tdraw, &td); err != nil {
|
|
return err
|
|
}
|
|
fw, err := zw.Create(fmt.Sprintf("%d/%s.png", gd.Map, gd.ID))
|
|
if err != nil {
|
|
return err
|
|
}
|
|
f, err := os.Open(filepath.Join(s.mapSvc.GridStorage(), td.File))
|
|
if err != nil {
|
|
return err
|
|
}
|
|
_, err = io.Copy(fw, f)
|
|
f.Close()
|
|
return err
|
|
}); err != nil {
|
|
return err
|
|
}
|
|
|
|
markersb := tx.Bucket(store.BucketMarkers)
|
|
if markersb != nil {
|
|
markersgrid := markersb.Bucket(store.BucketMarkersGrid)
|
|
if markersgrid != nil {
|
|
markersgrid.ForEach(func(k, v []byte) error {
|
|
marker := app.Marker{}
|
|
if json.Unmarshal(v, &marker) != nil {
|
|
return nil
|
|
}
|
|
if _, ok := maps[gridMap[marker.GridID]]; ok {
|
|
maps[gridMap[marker.GridID]].Markers[marker.GridID] = append(maps[gridMap[marker.GridID]].Markers[marker.GridID], marker)
|
|
}
|
|
return nil
|
|
})
|
|
}
|
|
}
|
|
|
|
for mapid, md := range maps {
|
|
fw, err := zw.Create(fmt.Sprintf("%d/grids.json", mapid))
|
|
if err != nil {
|
|
return err
|
|
}
|
|
json.NewEncoder(fw).Encode(md)
|
|
}
|
|
return nil
|
|
})
|
|
}
|
|
|
|
// Merge imports map data from a ZIP file.
|
|
func (s *ExportService) Merge(ctx context.Context, zr *zip.Reader) error {
|
|
var ops []TileOp
|
|
newTiles := map[string]struct{}{}
|
|
|
|
if err := s.st.Update(ctx, func(tx *bbolt.Tx) error {
|
|
grids, err := tx.CreateBucketIfNotExists(store.BucketGrids)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
tiles, err := tx.CreateBucketIfNotExists(store.BucketTiles)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
mb, err := tx.CreateBucketIfNotExists(store.BucketMarkers)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
mgrid, err := mb.CreateBucketIfNotExists(store.BucketMarkersGrid)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
idB, err := mb.CreateBucketIfNotExists(store.BucketMarkersID)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
configb, err := tx.CreateBucketIfNotExists(store.BucketConfig)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
mapB, err := tx.CreateBucketIfNotExists(store.BucketMaps)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
for _, fhdr := range zr.File {
|
|
if strings.HasSuffix(fhdr.Name, ".json") {
|
|
if err := s.processMergeJSON(fhdr, grids, tiles, mapB, configb, mgrid, idB, &ops); err != nil {
|
|
return err
|
|
}
|
|
} else if strings.HasSuffix(fhdr.Name, ".png") {
|
|
if err := os.MkdirAll(filepath.Join(s.mapSvc.GridStorage(), "grids"), 0755); err != nil {
|
|
return err
|
|
}
|
|
f, err := os.Create(filepath.Join(s.mapSvc.GridStorage(), "grids", filepath.Base(fhdr.Name)))
|
|
if err != nil {
|
|
return err
|
|
}
|
|
r, err := fhdr.Open()
|
|
if err != nil {
|
|
f.Close()
|
|
return err
|
|
}
|
|
io.Copy(f, r)
|
|
r.Close()
|
|
f.Close()
|
|
newTiles[strings.TrimSuffix(filepath.Base(fhdr.Name), ".png")] = struct{}{}
|
|
}
|
|
}
|
|
|
|
for gid := range newTiles {
|
|
gridRaw := grids.Get([]byte(gid))
|
|
if gridRaw != nil {
|
|
gd := app.GridData{}
|
|
json.Unmarshal(gridRaw, &gd)
|
|
ops = append(ops, TileOp{
|
|
MapID: gd.Map,
|
|
X: gd.Coord.X,
|
|
Y: gd.Coord.Y,
|
|
File: filepath.Join("grids", gid+".png"),
|
|
})
|
|
}
|
|
}
|
|
return nil
|
|
}); err != nil {
|
|
return err
|
|
}
|
|
|
|
for _, op := range ops {
|
|
s.mapSvc.SaveTile(ctx, op.MapID, app.Coord{X: op.X, Y: op.Y}, 0, op.File, time.Now().UnixNano())
|
|
}
|
|
return s.mapSvc.RebuildZooms(ctx)
|
|
}
|
|
|
|
func (s *ExportService) processMergeJSON(
|
|
fhdr *zip.File,
|
|
grids, tiles, mapB, configb, mgrid, idB *bbolt.Bucket,
|
|
ops *[]TileOp,
|
|
) error {
|
|
f, err := fhdr.Open()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer f.Close()
|
|
|
|
md := mapData{}
|
|
if err := json.NewDecoder(f).Decode(&md); err != nil {
|
|
return err
|
|
}
|
|
|
|
for _, ms := range md.Markers {
|
|
for _, mraw := range ms {
|
|
key := []byte(fmt.Sprintf("%s_%d_%d", mraw.GridID, mraw.Position.X, mraw.Position.Y))
|
|
if mgrid.Get(key) != nil {
|
|
continue
|
|
}
|
|
img := mraw.Image
|
|
if img == "" {
|
|
img = "gfx/terobjs/mm/custom"
|
|
}
|
|
id, err := idB.NextSequence()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
idKey := []byte(strconv.Itoa(int(id)))
|
|
m := app.Marker{
|
|
Name: mraw.Name,
|
|
ID: int(id),
|
|
GridID: mraw.GridID,
|
|
Position: app.Position{X: mraw.Position.X, Y: mraw.Position.Y},
|
|
Image: img,
|
|
}
|
|
raw, _ := json.Marshal(m)
|
|
mgrid.Put(key, raw)
|
|
idB.Put(idKey, key)
|
|
}
|
|
}
|
|
|
|
newGrids := map[app.Coord]string{}
|
|
existingMaps := map[int]struct{ X, Y int }{}
|
|
for k, v := range md.Grids {
|
|
c := app.Coord{}
|
|
if _, err := fmt.Sscanf(k, "%d_%d", &c.X, &c.Y); err != nil {
|
|
return err
|
|
}
|
|
newGrids[c] = v
|
|
gridRaw := grids.Get([]byte(v))
|
|
if gridRaw != nil {
|
|
gd := app.GridData{}
|
|
json.Unmarshal(gridRaw, &gd)
|
|
existingMaps[gd.Map] = struct{ X, Y int }{gd.Coord.X - c.X, gd.Coord.Y - c.Y}
|
|
}
|
|
}
|
|
|
|
if len(existingMaps) == 0 {
|
|
seq, err := mapB.NextSequence()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
mi := app.MapInfo{
|
|
ID: int(seq),
|
|
Name: strconv.Itoa(int(seq)),
|
|
Hidden: configb.Get([]byte("defaultHide")) != nil,
|
|
}
|
|
raw, _ := json.Marshal(mi)
|
|
if err = mapB.Put([]byte(strconv.Itoa(int(seq))), raw); err != nil {
|
|
return err
|
|
}
|
|
for c, grid := range newGrids {
|
|
cur := app.GridData{ID: grid, Map: int(seq), Coord: c}
|
|
raw, err := json.Marshal(cur)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
grids.Put([]byte(grid), raw)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
mapid := -1
|
|
offset := struct{ X, Y int }{}
|
|
for id, off := range existingMaps {
|
|
mi := app.MapInfo{}
|
|
mraw := mapB.Get([]byte(strconv.Itoa(id)))
|
|
if mraw != nil {
|
|
json.Unmarshal(mraw, &mi)
|
|
}
|
|
if mi.Priority {
|
|
mapid = id
|
|
offset = off
|
|
break
|
|
}
|
|
if id < mapid || mapid == -1 {
|
|
mapid = id
|
|
offset = off
|
|
}
|
|
}
|
|
|
|
for c, grid := range newGrids {
|
|
if grids.Get([]byte(grid)) != nil {
|
|
continue
|
|
}
|
|
cur := app.GridData{
|
|
ID: grid,
|
|
Map: mapid,
|
|
Coord: app.Coord{X: c.X + offset.X, Y: c.Y + offset.Y},
|
|
}
|
|
raw, err := json.Marshal(cur)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
grids.Put([]byte(grid), raw)
|
|
}
|
|
|
|
if len(existingMaps) > 1 {
|
|
grids.ForEach(func(k, v []byte) error {
|
|
gd := app.GridData{}
|
|
json.Unmarshal(v, &gd)
|
|
if gd.Map == mapid {
|
|
return nil
|
|
}
|
|
if merge, ok := existingMaps[gd.Map]; ok {
|
|
var td *app.TileData
|
|
mapb, err := tiles.CreateBucketIfNotExists([]byte(strconv.Itoa(gd.Map)))
|
|
if err != nil {
|
|
return err
|
|
}
|
|
zoom, err := mapb.CreateBucketIfNotExists([]byte(strconv.Itoa(0)))
|
|
if err != nil {
|
|
return err
|
|
}
|
|
tileraw := zoom.Get([]byte(gd.Coord.Name()))
|
|
if tileraw != nil {
|
|
json.Unmarshal(tileraw, &td)
|
|
}
|
|
|
|
gd.Map = mapid
|
|
gd.Coord.X += offset.X - merge.X
|
|
gd.Coord.Y += offset.Y - merge.Y
|
|
raw, _ := json.Marshal(gd)
|
|
if td != nil {
|
|
*ops = append(*ops, TileOp{
|
|
MapID: mapid,
|
|
X: gd.Coord.X,
|
|
Y: gd.Coord.Y,
|
|
File: td.File,
|
|
})
|
|
}
|
|
grids.Put(k, raw)
|
|
}
|
|
return nil
|
|
})
|
|
}
|
|
for mergeid, merge := range existingMaps {
|
|
if mapid == mergeid {
|
|
continue
|
|
}
|
|
mapB.Delete([]byte(strconv.Itoa(mergeid)))
|
|
slog.Info("reporting merge", "from", mergeid, "to", mapid)
|
|
s.mapSvc.ReportMerge(mergeid, mapid, app.Coord{X: offset.X - merge.X, Y: offset.Y - merge.Y})
|
|
}
|
|
return nil
|
|
}
|