feat(kc): checkout packing + ETag caching (Phase 2)
Implements issue #142 — .kc checkout pipeline that repacks silo/ entries with current DB state before serving downloads. When a client downloads a .kc file via GET /api/items/{pn}/file/{rev}, the server now: 1. Reads the file from storage into memory 2. Checks for silo/ directory (plain .fcstd files bypass packing) 3. Repacks silo/ entries with current item_metadata + revision history 4. Streams the repacked ZIP to the client New files: - internal/kc/pack.go: Pack() replaces silo/ entries in ZIP, preserving all non-silo entries (FreeCAD files, thumbnails) with original compression and timestamps. HasSiloDir() for lightweight detection. - internal/api/pack_handlers.go: packKCFile server helper, computeETag, canSkipRepack lazy optimization. ETag caching: - ETag computed from revision_number + metadata.updated_at - If-None-Match support returns 304 Not Modified before reading storage - Cache-Control: private, must-revalidate Lazy packing optimization: - Skips repack if revision_hash matches and metadata unchanged since upload Phase 2 packs: manifest.json, metadata.json, history.json, dependencies.json (empty []). Approvals, macros, jobs deferred to Phase 3-5. Closes #142
This commit is contained in:
@@ -5,6 +5,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@@ -19,6 +20,7 @@ import (
|
|||||||
"github.com/kindredsystems/silo/internal/config"
|
"github.com/kindredsystems/silo/internal/config"
|
||||||
"github.com/kindredsystems/silo/internal/db"
|
"github.com/kindredsystems/silo/internal/db"
|
||||||
"github.com/kindredsystems/silo/internal/jobdef"
|
"github.com/kindredsystems/silo/internal/jobdef"
|
||||||
|
"github.com/kindredsystems/silo/internal/kc"
|
||||||
"github.com/kindredsystems/silo/internal/modules"
|
"github.com/kindredsystems/silo/internal/modules"
|
||||||
"github.com/kindredsystems/silo/internal/partnum"
|
"github.com/kindredsystems/silo/internal/partnum"
|
||||||
"github.com/kindredsystems/silo/internal/schema"
|
"github.com/kindredsystems/silo/internal/schema"
|
||||||
@@ -1662,6 +1664,7 @@ func (s *Server) HandleUploadFile(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// HandleDownloadFile downloads the file for a specific revision.
|
// HandleDownloadFile downloads the file for a specific revision.
|
||||||
|
// For .kc files, silo/ entries are repacked with current DB state.
|
||||||
func (s *Server) HandleDownloadFile(w http.ResponseWriter, r *http.Request) {
|
func (s *Server) HandleDownloadFile(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := r.Context()
|
ctx := r.Context()
|
||||||
partNumber := chi.URLParam(r, "partNumber")
|
partNumber := chi.URLParam(r, "partNumber")
|
||||||
@@ -1716,18 +1719,23 @@ func (s *Server) HandleDownloadFile(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get file from storage
|
// ETag: computed from revision + metadata freshness.
|
||||||
var reader interface {
|
meta, _ := s.metadata.Get(ctx, item.ID) // nil is ok (plain .fcstd)
|
||||||
Read(p []byte) (n int, err error)
|
etag := computeETag(revision, meta)
|
||||||
Close() error
|
|
||||||
|
if match := r.Header.Get("If-None-Match"); match == etag {
|
||||||
|
w.Header().Set("ETag", etag)
|
||||||
|
w.WriteHeader(http.StatusNotModified)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get file from storage
|
||||||
|
var reader io.ReadCloser
|
||||||
if revision.FileVersion != nil && *revision.FileVersion != "" {
|
if revision.FileVersion != nil && *revision.FileVersion != "" {
|
||||||
reader, err = s.storage.GetVersion(ctx, *revision.FileKey, *revision.FileVersion)
|
reader, err = s.storage.GetVersion(ctx, *revision.FileKey, *revision.FileVersion)
|
||||||
} else {
|
} else {
|
||||||
reader, err = s.storage.Get(ctx, *revision.FileKey)
|
reader, err = s.storage.Get(ctx, *revision.FileKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.Error().Err(err).Str("key", *revision.FileKey).Msg("failed to get file")
|
s.logger.Error().Err(err).Str("key", *revision.FileKey).Msg("failed to get file")
|
||||||
writeError(w, http.StatusInternalServerError, "download_failed", err.Error())
|
writeError(w, http.StatusInternalServerError, "download_failed", err.Error())
|
||||||
@@ -1735,28 +1743,37 @@ func (s *Server) HandleDownloadFile(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
defer reader.Close()
|
defer reader.Close()
|
||||||
|
|
||||||
|
// Read entire file for potential .kc repacking.
|
||||||
|
data, err := io.ReadAll(reader)
|
||||||
|
if err != nil {
|
||||||
|
s.logger.Error().Err(err).Msg("failed to read file")
|
||||||
|
writeError(w, http.StatusInternalServerError, "download_failed", "Failed to read file")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Repack silo/ entries for .kc files with indexed metadata.
|
||||||
|
output := data
|
||||||
|
if meta != nil {
|
||||||
|
if hasSilo, chkErr := kc.HasSiloDir(data); chkErr == nil && hasSilo {
|
||||||
|
if !canSkipRepack(revision, meta) {
|
||||||
|
if packed, packErr := s.packKCFile(ctx, data, item, revision, meta); packErr != nil {
|
||||||
|
s.logger.Warn().Err(packErr).Str("part_number", partNumber).Msg("kc: packing failed, serving original")
|
||||||
|
} else {
|
||||||
|
output = packed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Set response headers
|
// Set response headers
|
||||||
filename := partNumber + "_rev" + strconv.Itoa(revNum) + ".FCStd"
|
filename := partNumber + "_rev" + strconv.Itoa(revNum) + ".FCStd"
|
||||||
w.Header().Set("Content-Type", "application/octet-stream")
|
w.Header().Set("Content-Type", "application/octet-stream")
|
||||||
w.Header().Set("Content-Disposition", "attachment; filename=\""+filename+"\"")
|
w.Header().Set("Content-Disposition", "attachment; filename=\""+filename+"\"")
|
||||||
if revision.FileSize != nil {
|
w.Header().Set("Content-Length", strconv.Itoa(len(output)))
|
||||||
w.Header().Set("Content-Length", strconv.FormatInt(*revision.FileSize, 10))
|
w.Header().Set("ETag", etag)
|
||||||
}
|
w.Header().Set("Cache-Control", "private, must-revalidate")
|
||||||
|
|
||||||
// Stream file to response
|
w.Write(output)
|
||||||
buf := make([]byte, 32*1024)
|
|
||||||
for {
|
|
||||||
n, readErr := reader.Read(buf)
|
|
||||||
if n > 0 {
|
|
||||||
if _, writeErr := w.Write(buf[:n]); writeErr != nil {
|
|
||||||
s.logger.Error().Err(writeErr).Msg("failed to write response")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if readErr != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// HandleDownloadLatestFile downloads the file for the latest revision.
|
// HandleDownloadLatestFile downloads the file for the latest revision.
|
||||||
|
|||||||
97
internal/api/pack_handlers.go
Normal file
97
internal/api/pack_handlers.go
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/kindredsystems/silo/internal/db"
|
||||||
|
"github.com/kindredsystems/silo/internal/kc"
|
||||||
|
)
|
||||||
|
|
||||||
|
// packKCFile gathers DB state and repacks silo/ entries in a .kc file.
|
||||||
|
func (s *Server) packKCFile(ctx context.Context, data []byte, item *db.Item, rev *db.Revision, meta *db.ItemMetadata) ([]byte, error) {
|
||||||
|
manifest := &kc.Manifest{
|
||||||
|
UUID: item.ID,
|
||||||
|
KCVersion: derefStr(meta.KCVersion, "1.0"),
|
||||||
|
RevisionHash: derefStr(meta.RevisionHash, ""),
|
||||||
|
SiloInstance: derefStr(meta.SiloInstance, ""),
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata := &kc.Metadata{
|
||||||
|
SchemaName: derefStr(meta.SchemaName, ""),
|
||||||
|
Tags: meta.Tags,
|
||||||
|
LifecycleState: meta.LifecycleState,
|
||||||
|
Fields: meta.Fields,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build history from last 20 revisions.
|
||||||
|
revisions, err := s.items.GetRevisions(ctx, item.ID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("getting revisions: %w", err)
|
||||||
|
}
|
||||||
|
limit := 20
|
||||||
|
if len(revisions) < limit {
|
||||||
|
limit = len(revisions)
|
||||||
|
}
|
||||||
|
history := make([]kc.HistoryEntry, limit)
|
||||||
|
for i, r := range revisions[:limit] {
|
||||||
|
labels := r.Labels
|
||||||
|
if labels == nil {
|
||||||
|
labels = []string{}
|
||||||
|
}
|
||||||
|
history[i] = kc.HistoryEntry{
|
||||||
|
RevisionNumber: r.RevisionNumber,
|
||||||
|
CreatedAt: r.CreatedAt.UTC().Format(time.RFC3339),
|
||||||
|
CreatedBy: r.CreatedBy,
|
||||||
|
Comment: r.Comment,
|
||||||
|
Status: r.Status,
|
||||||
|
Labels: labels,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
input := &kc.PackInput{
|
||||||
|
Manifest: manifest,
|
||||||
|
Metadata: metadata,
|
||||||
|
History: history,
|
||||||
|
Dependencies: []any{}, // empty for Phase 2
|
||||||
|
}
|
||||||
|
|
||||||
|
return kc.Pack(data, input)
|
||||||
|
}
|
||||||
|
|
||||||
|
// computeETag generates a quoted ETag from the revision number and metadata freshness.
|
||||||
|
func computeETag(rev *db.Revision, meta *db.ItemMetadata) string {
|
||||||
|
var ts int64
|
||||||
|
if meta != nil {
|
||||||
|
ts = meta.UpdatedAt.UnixNano()
|
||||||
|
} else {
|
||||||
|
ts = rev.CreatedAt.UnixNano()
|
||||||
|
}
|
||||||
|
raw := fmt.Sprintf("%d:%d", rev.RevisionNumber, ts)
|
||||||
|
h := sha256.Sum256([]byte(raw))
|
||||||
|
return `"` + hex.EncodeToString(h[:8]) + `"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// canSkipRepack returns true if the stored blob already has up-to-date silo/ data.
|
||||||
|
func canSkipRepack(rev *db.Revision, meta *db.ItemMetadata) bool {
|
||||||
|
if meta == nil {
|
||||||
|
return true // no metadata row = plain .fcstd
|
||||||
|
}
|
||||||
|
if meta.RevisionHash != nil && rev.FileChecksum != nil &&
|
||||||
|
*meta.RevisionHash == *rev.FileChecksum &&
|
||||||
|
meta.UpdatedAt.Before(rev.CreatedAt) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// derefStr returns the value of a *string pointer, or fallback if nil.
|
||||||
|
func derefStr(p *string, fallback string) string {
|
||||||
|
if p != nil {
|
||||||
|
return *p
|
||||||
|
}
|
||||||
|
return fallback
|
||||||
|
}
|
||||||
@@ -1,8 +1,8 @@
|
|||||||
// Package kc extracts and parses the silo/ metadata directory from .kc files.
|
// Package kc extracts and parses the silo/ metadata directory from .kc files.
|
||||||
//
|
//
|
||||||
// A .kc file is a ZIP archive (superset of .fcstd) that contains a silo/
|
// A .kc file is a ZIP archive (superset of .fcstd) that contains a silo/
|
||||||
// directory with JSON metadata entries. This package handles extraction only —
|
// directory with JSON metadata entries. This package handles extraction and
|
||||||
// no database or HTTP dependencies.
|
// packing — no database or HTTP dependencies.
|
||||||
package kc
|
package kc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -36,6 +36,25 @@ type ExtractResult struct {
|
|||||||
Metadata *Metadata
|
Metadata *Metadata
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HistoryEntry represents one entry in silo/history.json.
|
||||||
|
type HistoryEntry struct {
|
||||||
|
RevisionNumber int `json:"revision_number"`
|
||||||
|
CreatedAt string `json:"created_at"`
|
||||||
|
CreatedBy *string `json:"created_by,omitempty"`
|
||||||
|
Comment *string `json:"comment,omitempty"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
Labels []string `json:"labels"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PackInput holds all the data needed to repack silo/ entries in a .kc file.
|
||||||
|
// Each field is optional — nil/empty means the entry is omitted from the ZIP.
|
||||||
|
type PackInput struct {
|
||||||
|
Manifest *Manifest
|
||||||
|
Metadata *Metadata
|
||||||
|
History []HistoryEntry
|
||||||
|
Dependencies []any // empty [] for Phase 2; structured types in Phase 3+
|
||||||
|
}
|
||||||
|
|
||||||
// Extract opens a ZIP archive from data and parses the silo/ directory.
|
// Extract opens a ZIP archive from data and parses the silo/ directory.
|
||||||
// Returns nil, nil if no silo/ directory is found (plain .fcstd file).
|
// Returns nil, nil if no silo/ directory is found (plain .fcstd file).
|
||||||
// Returns nil, error if silo/ entries exist but fail to parse.
|
// Returns nil, error if silo/ entries exist but fail to parse.
|
||||||
|
|||||||
131
internal/kc/pack.go
Normal file
131
internal/kc/pack.go
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
package kc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/zip"
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HasSiloDir opens a ZIP archive and returns true if any entry starts with "silo/".
|
||||||
|
// This is a lightweight check used to short-circuit before gathering DB data.
|
||||||
|
func HasSiloDir(data []byte) (bool, error) {
|
||||||
|
r, err := zip.NewReader(bytes.NewReader(data), int64(len(data)))
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("kc: open zip: %w", err)
|
||||||
|
}
|
||||||
|
for _, f := range r.File {
|
||||||
|
if f.Name == "silo/" || strings.HasPrefix(f.Name, "silo/") {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pack takes original ZIP file bytes and a PackInput, and returns new ZIP bytes
|
||||||
|
// with all silo/ entries replaced by the data from input. Non-silo entries
|
||||||
|
// (FreeCAD Document.xml, thumbnails, etc.) are copied verbatim with their
|
||||||
|
// original compression method and timestamps preserved.
|
||||||
|
//
|
||||||
|
// If the original ZIP contains no silo/ directory, the original bytes are
|
||||||
|
// returned unchanged (plain .fcstd pass-through).
|
||||||
|
func Pack(original []byte, input *PackInput) ([]byte, error) {
|
||||||
|
r, err := zip.NewReader(bytes.NewReader(original), int64(len(original)))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("kc: open zip: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Partition entries into silo/ vs non-silo.
|
||||||
|
hasSilo := false
|
||||||
|
for _, f := range r.File {
|
||||||
|
if f.Name == "silo/" || strings.HasPrefix(f.Name, "silo/") {
|
||||||
|
hasSilo = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !hasSilo {
|
||||||
|
return original, nil // plain .fcstd, no repacking needed
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
zw := zip.NewWriter(&buf)
|
||||||
|
|
||||||
|
// Copy all non-silo entries verbatim.
|
||||||
|
for _, f := range r.File {
|
||||||
|
if f.Name == "silo/" || strings.HasPrefix(f.Name, "silo/") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := copyZipEntry(zw, f); err != nil {
|
||||||
|
return nil, fmt.Errorf("kc: copying entry %s: %w", f.Name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write new silo/ entries from PackInput.
|
||||||
|
if input.Manifest != nil {
|
||||||
|
if err := writeJSONEntry(zw, "silo/manifest.json", input.Manifest); err != nil {
|
||||||
|
return nil, fmt.Errorf("kc: writing manifest.json: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if input.Metadata != nil {
|
||||||
|
if err := writeJSONEntry(zw, "silo/metadata.json", input.Metadata); err != nil {
|
||||||
|
return nil, fmt.Errorf("kc: writing metadata.json: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if input.History != nil {
|
||||||
|
if err := writeJSONEntry(zw, "silo/history.json", input.History); err != nil {
|
||||||
|
return nil, fmt.Errorf("kc: writing history.json: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if input.Dependencies != nil {
|
||||||
|
if err := writeJSONEntry(zw, "silo/dependencies.json", input.Dependencies); err != nil {
|
||||||
|
return nil, fmt.Errorf("kc: writing dependencies.json: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := zw.Close(); err != nil {
|
||||||
|
return nil, fmt.Errorf("kc: closing zip writer: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// copyZipEntry copies a single entry from the original ZIP to the new writer,
|
||||||
|
// preserving the file header (compression method, timestamps, etc.).
|
||||||
|
func copyZipEntry(zw *zip.Writer, f *zip.File) error {
|
||||||
|
header := f.FileHeader
|
||||||
|
w, err := zw.CreateHeader(&header)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
rc, err := f.Open()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rc.Close()
|
||||||
|
|
||||||
|
_, err = io.Copy(w, rc)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeJSONEntry writes a new silo/ entry as JSON with Deflate compression.
|
||||||
|
func writeJSONEntry(zw *zip.Writer, name string, v any) error {
|
||||||
|
data, err := json.MarshalIndent(v, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
header := &zip.FileHeader{
|
||||||
|
Name: name,
|
||||||
|
Method: zip.Deflate,
|
||||||
|
}
|
||||||
|
w, err := zw.CreateHeader(header)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = w.Write(data)
|
||||||
|
return err
|
||||||
|
}
|
||||||
229
internal/kc/pack_test.go
Normal file
229
internal/kc/pack_test.go
Normal file
@@ -0,0 +1,229 @@
|
|||||||
|
package kc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/zip"
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestHasSiloDir_PlainFCStd(t *testing.T) {
|
||||||
|
data := buildZip(t, map[string][]byte{
|
||||||
|
"Document.xml": []byte("<xml/>"),
|
||||||
|
})
|
||||||
|
has, err := HasSiloDir(data)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
if has {
|
||||||
|
t.Fatal("expected false for plain .fcstd")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHasSiloDir_KC(t *testing.T) {
|
||||||
|
data := buildZip(t, map[string][]byte{
|
||||||
|
"Document.xml": []byte("<xml/>"),
|
||||||
|
"silo/manifest.json": []byte("{}"),
|
||||||
|
})
|
||||||
|
has, err := HasSiloDir(data)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
if !has {
|
||||||
|
t.Fatal("expected true for .kc with silo/ dir")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHasSiloDir_NotAZip(t *testing.T) {
|
||||||
|
_, err := HasSiloDir([]byte("not a zip"))
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("expected error for non-ZIP data")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPack_PlainFCStd_Passthrough(t *testing.T) {
|
||||||
|
original := buildZip(t, map[string][]byte{
|
||||||
|
"Document.xml": []byte("<xml/>"),
|
||||||
|
"thumbnails/a.png": []byte("png-data"),
|
||||||
|
})
|
||||||
|
|
||||||
|
result, err := Pack(original, &PackInput{
|
||||||
|
Manifest: &Manifest{UUID: "test"},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(result, original) {
|
||||||
|
t.Fatal("expected original bytes returned unchanged for plain .fcstd")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPack_RoundTrip(t *testing.T) {
|
||||||
|
// Build a .kc with old silo/ data
|
||||||
|
oldManifest := Manifest{UUID: "old-uuid", KCVersion: "0.9", RevisionHash: "old-hash"}
|
||||||
|
oldMetadata := Metadata{SchemaName: "old-schema", Tags: []string{"old"}, LifecycleState: "draft"}
|
||||||
|
|
||||||
|
original := buildZip(t, map[string][]byte{
|
||||||
|
"Document.xml": []byte("<freecad/>"),
|
||||||
|
"thumbnails/t.png": []byte("thumb-data"),
|
||||||
|
"silo/manifest.json": mustJSON(t, oldManifest),
|
||||||
|
"silo/metadata.json": mustJSON(t, oldMetadata),
|
||||||
|
})
|
||||||
|
|
||||||
|
// Pack with new data
|
||||||
|
newManifest := &Manifest{UUID: "new-uuid", KCVersion: "1.0", RevisionHash: "new-hash", SiloInstance: "https://silo.test"}
|
||||||
|
newMetadata := &Metadata{SchemaName: "mechanical-part-v2", Tags: []string{"aluminum", "structural"}, LifecycleState: "review", Fields: map[string]any{"material": "7075-T6"}}
|
||||||
|
comment := "initial commit"
|
||||||
|
history := []HistoryEntry{
|
||||||
|
{RevisionNumber: 1, CreatedAt: "2026-01-01T00:00:00Z", Comment: &comment, Status: "draft", Labels: []string{}},
|
||||||
|
}
|
||||||
|
|
||||||
|
packed, err := Pack(original, &PackInput{
|
||||||
|
Manifest: newManifest,
|
||||||
|
Metadata: newMetadata,
|
||||||
|
History: history,
|
||||||
|
Dependencies: []any{},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Pack error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract and verify new silo/ data
|
||||||
|
result, err := Extract(packed)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Extract error: %v", err)
|
||||||
|
}
|
||||||
|
if result == nil {
|
||||||
|
t.Fatal("expected non-nil extract result")
|
||||||
|
}
|
||||||
|
if result.Manifest.UUID != "new-uuid" {
|
||||||
|
t.Errorf("manifest UUID = %q, want %q", result.Manifest.UUID, "new-uuid")
|
||||||
|
}
|
||||||
|
if result.Manifest.KCVersion != "1.0" {
|
||||||
|
t.Errorf("manifest KCVersion = %q, want %q", result.Manifest.KCVersion, "1.0")
|
||||||
|
}
|
||||||
|
if result.Manifest.SiloInstance != "https://silo.test" {
|
||||||
|
t.Errorf("manifest SiloInstance = %q, want %q", result.Manifest.SiloInstance, "https://silo.test")
|
||||||
|
}
|
||||||
|
if result.Metadata.SchemaName != "mechanical-part-v2" {
|
||||||
|
t.Errorf("metadata SchemaName = %q, want %q", result.Metadata.SchemaName, "mechanical-part-v2")
|
||||||
|
}
|
||||||
|
if result.Metadata.LifecycleState != "review" {
|
||||||
|
t.Errorf("metadata LifecycleState = %q, want %q", result.Metadata.LifecycleState, "review")
|
||||||
|
}
|
||||||
|
if len(result.Metadata.Tags) != 2 {
|
||||||
|
t.Errorf("metadata Tags len = %d, want 2", len(result.Metadata.Tags))
|
||||||
|
}
|
||||||
|
if result.Metadata.Fields["material"] != "7075-T6" {
|
||||||
|
t.Errorf("metadata Fields[material] = %v, want 7075-T6", result.Metadata.Fields["material"])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify non-silo entries are preserved
|
||||||
|
r, err := zip.NewReader(bytes.NewReader(packed), int64(len(packed)))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("opening packed ZIP: %v", err)
|
||||||
|
}
|
||||||
|
entryMap := make(map[string]bool)
|
||||||
|
for _, f := range r.File {
|
||||||
|
entryMap[f.Name] = true
|
||||||
|
}
|
||||||
|
if !entryMap["Document.xml"] {
|
||||||
|
t.Error("Document.xml missing from packed ZIP")
|
||||||
|
}
|
||||||
|
if !entryMap["thumbnails/t.png"] {
|
||||||
|
t.Error("thumbnails/t.png missing from packed ZIP")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify non-silo content is byte-identical
|
||||||
|
for _, f := range r.File {
|
||||||
|
if f.Name == "Document.xml" {
|
||||||
|
content := readZipEntry(t, f)
|
||||||
|
if string(content) != "<freecad/>" {
|
||||||
|
t.Errorf("Document.xml content = %q, want %q", content, "<freecad/>")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if f.Name == "thumbnails/t.png" {
|
||||||
|
content := readZipEntry(t, f)
|
||||||
|
if string(content) != "thumb-data" {
|
||||||
|
t.Errorf("thumbnails/t.png content = %q, want %q", content, "thumb-data")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPack_NilFields(t *testing.T) {
|
||||||
|
original := buildZip(t, map[string][]byte{
|
||||||
|
"Document.xml": []byte("<xml/>"),
|
||||||
|
"silo/manifest.json": []byte(`{"uuid":"x"}`),
|
||||||
|
})
|
||||||
|
|
||||||
|
// Pack with only manifest, nil metadata/history/deps
|
||||||
|
packed, err := Pack(original, &PackInput{
|
||||||
|
Manifest: &Manifest{UUID: "updated"},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Pack error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract — should have manifest but no metadata
|
||||||
|
result, err := Extract(packed)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Extract error: %v", err)
|
||||||
|
}
|
||||||
|
if result.Manifest == nil || result.Manifest.UUID != "updated" {
|
||||||
|
t.Errorf("manifest UUID = %v, want updated", result.Manifest)
|
||||||
|
}
|
||||||
|
if result.Metadata != nil {
|
||||||
|
t.Errorf("expected nil metadata, got %+v", result.Metadata)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify no old silo/ entries leaked through
|
||||||
|
r, _ := zip.NewReader(bytes.NewReader(packed), int64(len(packed)))
|
||||||
|
for _, f := range r.File {
|
||||||
|
if f.Name == "silo/metadata.json" {
|
||||||
|
t.Error("old silo/metadata.json should have been removed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPack_EmptyDependencies(t *testing.T) {
|
||||||
|
original := buildZip(t, map[string][]byte{
|
||||||
|
"silo/manifest.json": []byte(`{"uuid":"x"}`),
|
||||||
|
})
|
||||||
|
|
||||||
|
packed, err := Pack(original, &PackInput{
|
||||||
|
Manifest: &Manifest{UUID: "x"},
|
||||||
|
Dependencies: []any{},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Pack error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify dependencies.json exists and is []
|
||||||
|
r, _ := zip.NewReader(bytes.NewReader(packed), int64(len(packed)))
|
||||||
|
for _, f := range r.File {
|
||||||
|
if f.Name == "silo/dependencies.json" {
|
||||||
|
content := readZipEntry(t, f)
|
||||||
|
if string(content) != "[]" {
|
||||||
|
t.Errorf("dependencies.json = %q, want %q", content, "[]")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t.Error("silo/dependencies.json not found in packed ZIP")
|
||||||
|
}
|
||||||
|
|
||||||
|
// readZipEntry reads the full contents of a zip.File.
|
||||||
|
func readZipEntry(t *testing.T, f *zip.File) []byte {
|
||||||
|
t.Helper()
|
||||||
|
rc, err := f.Open()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("opening zip entry %s: %v", f.Name, err)
|
||||||
|
}
|
||||||
|
defer rc.Close()
|
||||||
|
data, err := io.ReadAll(rc)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("reading zip entry %s: %v", f.Name, err)
|
||||||
|
}
|
||||||
|
return data
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user