Compare commits
1 Commits
fix-web-st
...
test-cover
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
414a5cf3d6 |
3
Makefile
3
Makefile
@@ -11,7 +11,6 @@
|
||||
build: web-build
|
||||
go build -o silo ./cmd/silo
|
||||
go build -o silod ./cmd/silod
|
||||
go build -o silorunner ./cmd/silorunner
|
||||
|
||||
# Run the API server locally
|
||||
run:
|
||||
@@ -31,7 +30,7 @@ test-integration:
|
||||
|
||||
# Clean build artifacts
|
||||
clean:
|
||||
rm -f silo silod silorunner
|
||||
rm -f silo silod
|
||||
rm -f *.out
|
||||
rm -rf web/dist
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
@@ -14,12 +13,10 @@ import (
|
||||
|
||||
"github.com/alexedwards/scs/pgxstore"
|
||||
"github.com/alexedwards/scs/v2"
|
||||
|
||||
"github.com/kindredsystems/silo/internal/api"
|
||||
"github.com/kindredsystems/silo/internal/auth"
|
||||
"github.com/kindredsystems/silo/internal/config"
|
||||
"github.com/kindredsystems/silo/internal/db"
|
||||
"github.com/kindredsystems/silo/internal/jobdef"
|
||||
"github.com/kindredsystems/silo/internal/schema"
|
||||
"github.com/kindredsystems/silo/internal/storage"
|
||||
"github.com/rs/zerolog"
|
||||
@@ -181,44 +178,6 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
// Load job definitions (optional — directory may not exist yet)
|
||||
var jobDefs map[string]*jobdef.Definition
|
||||
if _, err := os.Stat(cfg.Jobs.Directory); err == nil {
|
||||
jobDefs, err = jobdef.LoadAll(cfg.Jobs.Directory)
|
||||
if err != nil {
|
||||
logger.Fatal().Err(err).Str("directory", cfg.Jobs.Directory).Msg("failed to load job definitions")
|
||||
}
|
||||
logger.Info().Int("count", len(jobDefs)).Msg("loaded job definitions")
|
||||
} else {
|
||||
jobDefs = make(map[string]*jobdef.Definition)
|
||||
logger.Info().Str("directory", cfg.Jobs.Directory).Msg("job definitions directory not found, skipping")
|
||||
}
|
||||
|
||||
// Upsert job definitions into database
|
||||
jobRepo := db.NewJobRepository(database)
|
||||
for _, def := range jobDefs {
|
||||
defJSON, _ := json.Marshal(def)
|
||||
var defMap map[string]any
|
||||
json.Unmarshal(defJSON, &defMap)
|
||||
|
||||
rec := &db.JobDefinitionRecord{
|
||||
Name: def.Name,
|
||||
Version: def.Version,
|
||||
TriggerType: def.Trigger.Type,
|
||||
ScopeType: def.Scope.Type,
|
||||
ComputeType: def.Compute.Type,
|
||||
RunnerTags: def.Runner.Tags,
|
||||
TimeoutSeconds: def.Timeout,
|
||||
MaxRetries: def.MaxRetries,
|
||||
Priority: def.Priority,
|
||||
Definition: defMap,
|
||||
Enabled: true,
|
||||
}
|
||||
if err := jobRepo.UpsertDefinition(ctx, rec); err != nil {
|
||||
logger.Fatal().Err(err).Str("name", def.Name).Msg("failed to upsert job definition")
|
||||
}
|
||||
}
|
||||
|
||||
// Create SSE broker and server state
|
||||
broker := api.NewBroker(logger)
|
||||
serverState := api.NewServerState(logger, store, broker)
|
||||
@@ -231,29 +190,9 @@ func main() {
|
||||
|
||||
// Create API server
|
||||
server := api.NewServer(logger, database, schemas, cfg.Schemas.Directory, store,
|
||||
authService, sessionManager, oidcBackend, &cfg.Auth, broker, serverState,
|
||||
jobDefs, cfg.Jobs.Directory)
|
||||
authService, sessionManager, oidcBackend, &cfg.Auth, broker, serverState)
|
||||
router := api.NewRouter(server, logger)
|
||||
|
||||
// Start background sweepers for job/runner timeouts
|
||||
go func() {
|
||||
ticker := time.NewTicker(time.Duration(cfg.Jobs.JobTimeoutCheck) * time.Second)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
if n, err := jobRepo.TimeoutExpiredJobs(ctx); err != nil {
|
||||
logger.Error().Err(err).Msg("job timeout sweep failed")
|
||||
} else if n > 0 {
|
||||
logger.Info().Int64("count", n).Msg("timed out expired jobs")
|
||||
}
|
||||
|
||||
if n, err := jobRepo.ExpireStaleRunners(ctx, time.Duration(cfg.Jobs.RunnerTimeout)*time.Second); err != nil {
|
||||
logger.Error().Err(err).Msg("runner expiry sweep failed")
|
||||
} else if n > 0 {
|
||||
logger.Info().Int64("count", n).Msg("expired stale runners")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Create HTTP server
|
||||
addr := fmt.Sprintf("%s:%d", cfg.Server.Host, cfg.Server.Port)
|
||||
httpServer := &http.Server{
|
||||
|
||||
@@ -1,330 +0,0 @@
|
||||
// Command silorunner is a compute worker that polls the Silo server for jobs
|
||||
// and executes them using Headless Create with silo-mod installed.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// RunnerConfig holds runner configuration.
|
||||
type RunnerConfig struct {
|
||||
ServerURL string `yaml:"server_url"`
|
||||
Token string `yaml:"token"`
|
||||
Name string `yaml:"name"`
|
||||
Tags []string `yaml:"tags"`
|
||||
PollInterval int `yaml:"poll_interval"` // seconds, default 5
|
||||
CreatePath string `yaml:"create_path"` // path to Headless Create binary
|
||||
}
|
||||
|
||||
func main() {
|
||||
configPath := flag.String("config", "runner.yaml", "Path to runner config file")
|
||||
flag.Parse()
|
||||
|
||||
logger := zerolog.New(os.Stdout).With().Timestamp().Str("component", "silorunner").Logger()
|
||||
|
||||
// Load config
|
||||
cfg, err := loadConfig(*configPath)
|
||||
if err != nil {
|
||||
logger.Fatal().Err(err).Msg("failed to load config")
|
||||
}
|
||||
|
||||
if cfg.ServerURL == "" {
|
||||
logger.Fatal().Msg("server_url is required")
|
||||
}
|
||||
if cfg.Token == "" {
|
||||
logger.Fatal().Msg("token is required")
|
||||
}
|
||||
if cfg.Name == "" {
|
||||
hostname, _ := os.Hostname()
|
||||
cfg.Name = "runner-" + hostname
|
||||
}
|
||||
if cfg.PollInterval <= 0 {
|
||||
cfg.PollInterval = 5
|
||||
}
|
||||
|
||||
logger.Info().
|
||||
Str("server", cfg.ServerURL).
|
||||
Str("name", cfg.Name).
|
||||
Strs("tags", cfg.Tags).
|
||||
Int("poll_interval", cfg.PollInterval).
|
||||
Msg("starting runner")
|
||||
|
||||
client := &http.Client{Timeout: 30 * time.Second}
|
||||
|
||||
// Graceful shutdown
|
||||
quit := make(chan os.Signal, 1)
|
||||
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
// Heartbeat goroutine
|
||||
go func() {
|
||||
ticker := time.NewTicker(30 * time.Second)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if err := heartbeat(client, cfg); err != nil {
|
||||
logger.Error().Err(err).Msg("heartbeat failed")
|
||||
}
|
||||
case <-quit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Initial heartbeat
|
||||
if err := heartbeat(client, cfg); err != nil {
|
||||
logger.Warn().Err(err).Msg("initial heartbeat failed")
|
||||
}
|
||||
|
||||
// Poll loop
|
||||
ticker := time.NewTicker(time.Duration(cfg.PollInterval) * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
job, definition, err := claimJob(client, cfg)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("claim failed")
|
||||
continue
|
||||
}
|
||||
if job == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
jobID, _ := job["id"].(string)
|
||||
defName, _ := job["definition_name"].(string)
|
||||
logger.Info().Str("job_id", jobID).Str("definition", defName).Msg("claimed job")
|
||||
|
||||
// Start the job
|
||||
if err := startJob(client, cfg, jobID); err != nil {
|
||||
logger.Error().Err(err).Str("job_id", jobID).Msg("failed to start job")
|
||||
continue
|
||||
}
|
||||
|
||||
// Execute the job
|
||||
executeJob(logger, client, cfg, jobID, job, definition)
|
||||
|
||||
case <-quit:
|
||||
logger.Info().Msg("shutting down")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func loadConfig(path string) (*RunnerConfig, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading config: %w", err)
|
||||
}
|
||||
data = []byte(os.ExpandEnv(string(data)))
|
||||
|
||||
var cfg RunnerConfig
|
||||
if err := yaml.Unmarshal(data, &cfg); err != nil {
|
||||
return nil, fmt.Errorf("parsing config: %w", err)
|
||||
}
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
func heartbeat(client *http.Client, cfg *RunnerConfig) error {
|
||||
req, err := http.NewRequest("POST", cfg.ServerURL+"/api/runner/heartbeat", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+cfg.Token)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return fmt.Errorf("heartbeat: %d %s", resp.StatusCode, string(body))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func claimJob(client *http.Client, cfg *RunnerConfig) (map[string]any, map[string]any, error) {
|
||||
req, err := http.NewRequest("POST", cfg.ServerURL+"/api/runner/claim", nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+cfg.Token)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == http.StatusNoContent {
|
||||
return nil, nil, nil // No jobs available
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return nil, nil, fmt.Errorf("claim: %d %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Job map[string]any `json:"job"`
|
||||
Definition map[string]any `json:"definition"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return nil, nil, fmt.Errorf("decoding claim response: %w", err)
|
||||
}
|
||||
|
||||
return result.Job, result.Definition, nil
|
||||
}
|
||||
|
||||
func startJob(client *http.Client, cfg *RunnerConfig, jobID string) error {
|
||||
req, err := http.NewRequest("POST", cfg.ServerURL+"/api/runner/jobs/"+jobID+"/start", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+cfg.Token)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return fmt.Errorf("start: %d %s", resp.StatusCode, string(body))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func reportProgress(client *http.Client, cfg *RunnerConfig, jobID string, progress int, message string) {
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"progress": progress,
|
||||
"message": message,
|
||||
})
|
||||
req, _ := http.NewRequest("PUT", cfg.ServerURL+"/api/runner/jobs/"+jobID+"/progress", bytes.NewReader(body))
|
||||
req.Header.Set("Authorization", "Bearer "+cfg.Token)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
||||
func completeJob(client *http.Client, cfg *RunnerConfig, jobID string, result map[string]any) error {
|
||||
body, _ := json.Marshal(map[string]any{"result": result})
|
||||
req, err := http.NewRequest("POST", cfg.ServerURL+"/api/runner/jobs/"+jobID+"/complete", bytes.NewReader(body))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+cfg.Token)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
respBody, _ := io.ReadAll(resp.Body)
|
||||
return fmt.Errorf("complete: %d %s", resp.StatusCode, string(respBody))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func failJob(client *http.Client, cfg *RunnerConfig, jobID string, errMsg string) {
|
||||
body, _ := json.Marshal(map[string]string{"error": errMsg})
|
||||
req, _ := http.NewRequest("POST", cfg.ServerURL+"/api/runner/jobs/"+jobID+"/fail", bytes.NewReader(body))
|
||||
req.Header.Set("Authorization", "Bearer "+cfg.Token)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
||||
func appendLog(client *http.Client, cfg *RunnerConfig, jobID, level, message string) {
|
||||
body, _ := json.Marshal(map[string]string{
|
||||
"level": level,
|
||||
"message": message,
|
||||
})
|
||||
req, _ := http.NewRequest("POST", cfg.ServerURL+"/api/runner/jobs/"+jobID+"/log", bytes.NewReader(body))
|
||||
req.Header.Set("Authorization", "Bearer "+cfg.Token)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
||||
// executeJob dispatches the job based on its compute command.
|
||||
// For now, this is a stub that demonstrates the lifecycle.
|
||||
// Real execution will shell out to Headless Create with silo-mod.
|
||||
func executeJob(logger zerolog.Logger, client *http.Client, cfg *RunnerConfig, jobID string, job, definition map[string]any) {
|
||||
defName, _ := job["definition_name"].(string)
|
||||
|
||||
// Extract compute config from definition
|
||||
var command string
|
||||
if definition != nil {
|
||||
if compute, ok := definition["compute"].(map[string]any); ok {
|
||||
command, _ = compute["command"].(string)
|
||||
}
|
||||
}
|
||||
|
||||
appendLog(client, cfg, jobID, "info", fmt.Sprintf("starting execution: %s (command: %s)", defName, command))
|
||||
reportProgress(client, cfg, jobID, 10, "preparing")
|
||||
|
||||
switch command {
|
||||
case "create-validate", "create-export", "create-dag-extract", "create-thumbnail":
|
||||
if cfg.CreatePath == "" {
|
||||
failJob(client, cfg, jobID, "create_path not configured")
|
||||
return
|
||||
}
|
||||
|
||||
appendLog(client, cfg, jobID, "info", fmt.Sprintf("would execute: %s --console with silo-mod", cfg.CreatePath))
|
||||
reportProgress(client, cfg, jobID, 50, "executing")
|
||||
|
||||
// TODO: Actual Create execution:
|
||||
// 1. Download item file from Silo API
|
||||
// 2. Shell out: create --console -e "from silo.runner import <entry>; <entry>(...)"
|
||||
// 3. Parse output JSON
|
||||
// 4. Upload results / sync DAG
|
||||
// For now, complete with a placeholder result.
|
||||
|
||||
reportProgress(client, cfg, jobID, 90, "finalizing")
|
||||
|
||||
if err := completeJob(client, cfg, jobID, map[string]any{
|
||||
"status": "placeholder",
|
||||
"message": "Create execution not yet implemented - runner lifecycle verified",
|
||||
"command": command,
|
||||
}); err != nil {
|
||||
logger.Error().Err(err).Str("job_id", jobID).Msg("failed to complete job")
|
||||
} else {
|
||||
logger.Info().Str("job_id", jobID).Msg("job completed (placeholder)")
|
||||
}
|
||||
|
||||
default:
|
||||
failJob(client, cfg, jobID, fmt.Sprintf("unknown compute command: %s", command))
|
||||
logger.Warn().Str("job_id", jobID).Str("command", command).Msg("unknown compute command")
|
||||
}
|
||||
}
|
||||
246
docs/DAG.md
246
docs/DAG.md
@@ -1,246 +0,0 @@
|
||||
# Dependency DAG Specification
|
||||
|
||||
**Status:** Draft
|
||||
**Last Updated:** 2026-02-13
|
||||
|
||||
---
|
||||
|
||||
## 1. Purpose
|
||||
|
||||
The Dependency DAG is a server-side graph that tracks how features, constraints, and assembly relationships depend on each other. It enables three capabilities described in [MULTI_USER_EDITS.md](MULTI_USER_EDITS.md):
|
||||
|
||||
1. **Interference detection** -- comparing dependency cones of concurrent edit sessions to classify conflicts as none, soft, or hard before the user encounters them.
|
||||
2. **Incremental validation** -- marking changed nodes dirty and propagating only through the affected subgraph, using input-hash memoization to stop early when inputs haven't changed.
|
||||
3. **Structured merge safety** -- walking the DAG to determine whether concurrent edits share upstream dependencies, deciding if auto-merge is safe or manual review is required.
|
||||
|
||||
---
|
||||
|
||||
## 2. Two-Tier Model
|
||||
|
||||
Silo maintains two levels of dependency graph:
|
||||
|
||||
### 2.1 BOM DAG (existing)
|
||||
|
||||
The assembly-to-part relationship graph already stored in the `relationships` table. Each row represents a parent item containing a child item with a quantity and relationship type (`component`, `alternate`, `reference`). This graph is queried via `GetBOM`, `GetExpandedBOM`, `GetWhereUsed`, and `HasCycle` in `internal/db/relationships.go`.
|
||||
|
||||
The BOM DAG is **not modified** by this specification. It continues to serve its existing purpose.
|
||||
|
||||
### 2.2 Feature DAG (new)
|
||||
|
||||
A finer-grained graph stored in `dag_nodes` and `dag_edges` tables. Each node represents a feature within a single item's revision -- a sketch, pad, fillet, pocket, constraint, body, or part-level container. Edges represent "depends on" relationships: if Pad003 depends on Sketch001, an edge runs from Sketch001 to Pad003.
|
||||
|
||||
The feature DAG is populated by clients (silo-mod) when users save, or by runners after compute jobs. Silo stores and queries it but does not generate it -- the Create client has access to the feature tree and is the authoritative source.
|
||||
|
||||
### 2.3 Cross-Item Edges
|
||||
|
||||
Assembly constraints often reference geometry on child parts (e.g., "mate Face6 of PartA to Face2 of PartB"). These cross-item dependencies are stored in `dag_cross_edges`, linking a node in one item to a node in another. Each cross-edge optionally references the `relationships` row that establishes the BOM connection.
|
||||
|
||||
---
|
||||
|
||||
## 3. Data Model
|
||||
|
||||
### 3.1 dag_nodes
|
||||
|
||||
| Column | Type | Description |
|
||||
|--------|------|-------------|
|
||||
| `id` | UUID | Primary key |
|
||||
| `item_id` | UUID | FK to `items.id` |
|
||||
| `revision_number` | INTEGER | Revision this DAG snapshot belongs to |
|
||||
| `node_key` | TEXT | Feature name from Create (e.g., `Sketch001`, `Pad003`, `Body`) |
|
||||
| `node_type` | TEXT | One of: `sketch`, `pad`, `pocket`, `fillet`, `chamfer`, `constraint`, `body`, `part`, `datum`, `mirror`, `pattern`, `boolean` |
|
||||
| `properties_hash` | TEXT | SHA-256 of the node's parametric inputs (sketch coordinates, fillet radius, constraint values). Used for memoization -- if the hash hasn't changed, validation can skip this node. |
|
||||
| `validation_state` | TEXT | One of: `clean`, `dirty`, `validating`, `failed` |
|
||||
| `validation_msg` | TEXT | Error message when `validation_state = 'failed'` |
|
||||
| `metadata` | JSONB | Type-specific data (sketch coords, feature params, constraint definitions) |
|
||||
| `created_at` | TIMESTAMPTZ | Row creation time |
|
||||
| `updated_at` | TIMESTAMPTZ | Last state change |
|
||||
|
||||
**Uniqueness:** `(item_id, revision_number, node_key)` -- one node per feature per revision.
|
||||
|
||||
### 3.2 dag_edges
|
||||
|
||||
| Column | Type | Description |
|
||||
|--------|------|-------------|
|
||||
| `id` | UUID | Primary key |
|
||||
| `source_node_id` | UUID | FK to `dag_nodes.id` -- the upstream node |
|
||||
| `target_node_id` | UUID | FK to `dag_nodes.id` -- the downstream node that depends on source |
|
||||
| `edge_type` | TEXT | `depends_on` (default), `references`, `constrains` |
|
||||
| `metadata` | JSONB | Optional edge metadata |
|
||||
|
||||
**Direction convention:** An edge from A to B means "B depends on A". A is upstream, B is downstream. Forward-cone traversal from A walks edges where A is the source.
|
||||
|
||||
**Uniqueness:** `(source_node_id, target_node_id, edge_type)`.
|
||||
|
||||
**Constraint:** `source_node_id != target_node_id` (no self-edges).
|
||||
|
||||
### 3.3 dag_cross_edges
|
||||
|
||||
| Column | Type | Description |
|
||||
|--------|------|-------------|
|
||||
| `id` | UUID | Primary key |
|
||||
| `source_node_id` | UUID | FK to `dag_nodes.id` -- node in item A |
|
||||
| `target_node_id` | UUID | FK to `dag_nodes.id` -- node in item B |
|
||||
| `relationship_id` | UUID | FK to `relationships.id` (nullable) -- the BOM entry connecting the two items |
|
||||
| `edge_type` | TEXT | `assembly_ref` (default) |
|
||||
| `metadata` | JSONB | Reference details (face ID, edge ID, etc.) |
|
||||
|
||||
**Uniqueness:** `(source_node_id, target_node_id)`.
|
||||
|
||||
---
|
||||
|
||||
## 4. Validation States
|
||||
|
||||
Each node has a `validation_state` that tracks whether its computed geometry is current:
|
||||
|
||||
| State | Meaning |
|
||||
|-------|---------|
|
||||
| `clean` | Node's geometry matches its `properties_hash`. No recompute needed. |
|
||||
| `dirty` | An upstream change has propagated to this node. Recompute required. |
|
||||
| `validating` | A compute job is currently revalidating this node. |
|
||||
| `failed` | Recompute failed. `validation_msg` contains the error. |
|
||||
|
||||
### 4.1 State Transitions
|
||||
|
||||
```
|
||||
clean → dirty (upstream change detected, or MarkDirty called)
|
||||
dirty → validating (compute job claims this node)
|
||||
validating → clean (recompute succeeded, properties_hash updated)
|
||||
validating → failed (recompute produced an error)
|
||||
failed → dirty (upstream change detected, retry possible)
|
||||
dirty → clean (properties_hash matches previous -- memoization shortcut)
|
||||
```
|
||||
|
||||
### 4.2 Dirty Propagation
|
||||
|
||||
When a node is marked dirty, all downstream nodes in its forward cone are also marked dirty. This is done atomically in a single recursive CTE:
|
||||
|
||||
```sql
|
||||
WITH RECURSIVE forward_cone AS (
|
||||
SELECT $1::uuid AS node_id
|
||||
UNION
|
||||
SELECT e.target_node_id
|
||||
FROM dag_edges e
|
||||
JOIN forward_cone fc ON fc.node_id = e.source_node_id
|
||||
)
|
||||
UPDATE dag_nodes SET validation_state = 'dirty', updated_at = now()
|
||||
WHERE id IN (SELECT node_id FROM forward_cone)
|
||||
AND validation_state = 'clean';
|
||||
```
|
||||
|
||||
### 4.3 Memoization
|
||||
|
||||
Before marking a node dirty, the system can compare the new `properties_hash` against the stored value. If they match, the change did not affect this node's inputs, and propagation stops. This is the memoization boundary described in MULTI_USER_EDITS.md Section 5.2.
|
||||
|
||||
---
|
||||
|
||||
## 5. Graph Queries
|
||||
|
||||
### 5.1 Forward Cone
|
||||
|
||||
Returns all nodes downstream of a given node -- everything that would be affected if the source node changes. Used for interference detection: if two users' forward cones overlap, there is potential interference.
|
||||
|
||||
```sql
|
||||
WITH RECURSIVE forward_cone AS (
|
||||
SELECT target_node_id AS node_id
|
||||
FROM dag_edges WHERE source_node_id = $1
|
||||
UNION
|
||||
SELECT e.target_node_id
|
||||
FROM dag_edges e
|
||||
JOIN forward_cone fc ON fc.node_id = e.source_node_id
|
||||
)
|
||||
SELECT n.* FROM dag_nodes n JOIN forward_cone fc ON n.id = fc.node_id;
|
||||
```
|
||||
|
||||
### 5.2 Backward Cone
|
||||
|
||||
Returns all nodes upstream of a given node -- everything the target node depends on.
|
||||
|
||||
### 5.3 Dirty Subgraph
|
||||
|
||||
Returns all nodes for a given item where `validation_state != 'clean'`, along with their edges. This is the input to an incremental validation job.
|
||||
|
||||
### 5.4 Cycle Detection
|
||||
|
||||
Before adding an edge, check that it would not create a cycle. Uses the same recursive ancestor-walk pattern as `HasCycle` in `internal/db/relationships.go`.
|
||||
|
||||
---
|
||||
|
||||
## 6. DAG Sync
|
||||
|
||||
Clients push the full feature DAG to Silo via `PUT /api/items/{partNumber}/dag`. The sync payload is a JSON document:
|
||||
|
||||
```json
|
||||
{
|
||||
"revision": 3,
|
||||
"nodes": [
|
||||
{
|
||||
"key": "Sketch001",
|
||||
"type": "sketch",
|
||||
"properties_hash": "a1b2c3...",
|
||||
"metadata": {
|
||||
"coordinates": [[0, 0], [10, 0], [10, 5]],
|
||||
"constraints": ["horizontal", "vertical"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"key": "Pad003",
|
||||
"type": "pad",
|
||||
"properties_hash": "d4e5f6...",
|
||||
"metadata": {
|
||||
"length": 15.0,
|
||||
"direction": [0, 0, 1]
|
||||
}
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{
|
||||
"source": "Sketch001",
|
||||
"target": "Pad003",
|
||||
"type": "depends_on"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
The server processes this within a single transaction:
|
||||
1. Upsert all nodes (matched by `item_id + revision_number + node_key`).
|
||||
2. Replace all edges for this item/revision.
|
||||
3. Compare new `properties_hash` values against stored values to detect changes.
|
||||
4. Mark changed nodes and their forward cones dirty.
|
||||
5. Publish `dag.updated` SSE event.
|
||||
|
||||
---
|
||||
|
||||
## 7. Interference Detection
|
||||
|
||||
When a user registers an edit context (MULTI_USER_EDITS.md Section 3.1), the server:
|
||||
|
||||
1. Looks up the node(s) being edited by `node_key` within the item's current revision.
|
||||
2. Computes the forward cone for those nodes.
|
||||
3. Compares the cone against all active edit sessions' cones.
|
||||
4. Classifies interference:
|
||||
- **No overlap** → no interference, fully concurrent.
|
||||
- **Overlap, different objects** → soft interference, visual indicator via SSE.
|
||||
- **Same object, same edit type** → hard interference, edit blocked.
|
||||
|
||||
---
|
||||
|
||||
## 8. REST API
|
||||
|
||||
All endpoints are under `/api/items/{partNumber}` and require authentication.
|
||||
|
||||
| Method | Path | Auth | Description |
|
||||
|--------|------|------|-------------|
|
||||
| `GET` | `/dag` | viewer | Get full feature DAG for current revision |
|
||||
| `GET` | `/dag/forward-cone/{nodeKey}` | viewer | Get forward dependency cone |
|
||||
| `GET` | `/dag/dirty` | viewer | Get dirty subgraph |
|
||||
| `PUT` | `/dag` | editor | Sync full feature tree (from client or runner) |
|
||||
| `POST` | `/dag/mark-dirty/{nodeKey}` | editor | Manually mark a node and its cone dirty |
|
||||
|
||||
---
|
||||
|
||||
## 9. References
|
||||
|
||||
- [MULTI_USER_EDITS.md](MULTI_USER_EDITS.md) -- Full multi-user editing specification
|
||||
- [WORKERS.md](WORKERS.md) -- Worker/runner system that executes validation jobs
|
||||
- [ROADMAP.md](ROADMAP.md) -- Tier 0 Dependency DAG entry
|
||||
@@ -1,395 +0,0 @@
|
||||
# DAG Client Integration Contract
|
||||
|
||||
**Status:** Draft
|
||||
**Last Updated:** 2026-02-13
|
||||
|
||||
This document describes what silo-mod and Headless Create runners need to implement to integrate with the Silo dependency DAG and worker system.
|
||||
|
||||
---
|
||||
|
||||
## 1. Overview
|
||||
|
||||
The DAG system has two client-side integration points:
|
||||
|
||||
1. **silo-mod workbench** (desktop) -- pushes DAG data to Silo on save or revision create.
|
||||
2. **silorunner + silo-mod** (headless) -- extracts DAGs, validates features, and exports geometry as compute jobs.
|
||||
|
||||
Both share the same Python codebase in the silo-mod repository. Desktop users call the code interactively; runners call it headlessly via `create --console`.
|
||||
|
||||
---
|
||||
|
||||
## 2. DAG Sync Payload
|
||||
|
||||
Clients push feature trees to Silo via:
|
||||
|
||||
```
|
||||
PUT /api/items/{partNumber}/dag
|
||||
Authorization: Bearer <user_token or runner_token>
|
||||
Content-Type: application/json
|
||||
```
|
||||
|
||||
### 2.1 Request Body
|
||||
|
||||
```json
|
||||
{
|
||||
"revision_number": 3,
|
||||
"nodes": [
|
||||
{
|
||||
"node_key": "Sketch001",
|
||||
"node_type": "sketch",
|
||||
"properties_hash": "a1b2c3d4e5f6...",
|
||||
"metadata": {
|
||||
"label": "Base Profile",
|
||||
"constraint_count": 12
|
||||
}
|
||||
},
|
||||
{
|
||||
"node_key": "Pad001",
|
||||
"node_type": "pad",
|
||||
"properties_hash": "f6e5d4c3b2a1...",
|
||||
"metadata": {
|
||||
"label": "Main Extrusion",
|
||||
"length": 25.0
|
||||
}
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{
|
||||
"source_key": "Sketch001",
|
||||
"target_key": "Pad001",
|
||||
"edge_type": "depends_on"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### 2.2 Field Reference
|
||||
|
||||
**Nodes:**
|
||||
|
||||
| Field | Type | Required | Description |
|
||||
|-------|------|----------|-------------|
|
||||
| `node_key` | string | yes | Unique within item+revision. Use Create's internal object name (e.g. `Sketch001`, `Pad003`). |
|
||||
| `node_type` | string | yes | One of: `sketch`, `pad`, `pocket`, `fillet`, `chamfer`, `constraint`, `body`, `part`, `datum`. |
|
||||
| `properties_hash` | string | no | SHA-256 hex digest of the node's parametric inputs. Used for memoization. |
|
||||
| `validation_state` | string | no | One of: `clean`, `dirty`, `validating`, `failed`. Defaults to `clean`. |
|
||||
| `metadata` | object | no | Arbitrary key-value pairs for display or debugging. |
|
||||
|
||||
**Edges:**
|
||||
|
||||
| Field | Type | Required | Description |
|
||||
|-------|------|----------|-------------|
|
||||
| `source_key` | string | yes | The node that is depended upon. |
|
||||
| `target_key` | string | yes | The node that depends on the source. |
|
||||
| `edge_type` | string | no | One of: `depends_on` (default), `references`, `constrains`. |
|
||||
|
||||
**Direction convention:** Edges point from dependency to dependent. If Pad001 depends on Sketch001, the edge is `source_key: "Sketch001"`, `target_key: "Pad001"`.
|
||||
|
||||
### 2.3 Response
|
||||
|
||||
```json
|
||||
{
|
||||
"synced": true,
|
||||
"node_count": 15,
|
||||
"edge_count": 14
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 3. Computing properties_hash
|
||||
|
||||
The `properties_hash` enables memoization -- if a node's inputs haven't changed since the last validation, it can be skipped. Computing it:
|
||||
|
||||
```python
|
||||
import hashlib
|
||||
import json
|
||||
|
||||
def compute_properties_hash(feature_obj):
|
||||
"""Hash the parametric inputs of a Create feature."""
|
||||
inputs = {}
|
||||
|
||||
if feature_obj.TypeId == "Sketcher::SketchObject":
|
||||
# Hash geometry + constraints
|
||||
inputs["geometry_count"] = feature_obj.GeometryCount
|
||||
inputs["constraint_count"] = feature_obj.ConstraintCount
|
||||
inputs["geometry"] = str(feature_obj.Shape.exportBrep())
|
||||
elif feature_obj.TypeId == "PartDesign::Pad":
|
||||
inputs["length"] = feature_obj.Length.Value
|
||||
inputs["type"] = str(feature_obj.Type)
|
||||
inputs["reversed"] = feature_obj.Reversed
|
||||
inputs["sketch"] = feature_obj.Profile[0].Name
|
||||
# ... other feature types
|
||||
|
||||
canonical = json.dumps(inputs, sort_keys=True)
|
||||
return hashlib.sha256(canonical.encode()).hexdigest()
|
||||
```
|
||||
|
||||
The exact inputs per feature type are determined by what parametric values affect the feature's geometry. Include anything that, if changed, would require recomputation.
|
||||
|
||||
---
|
||||
|
||||
## 4. Feature Tree Walking
|
||||
|
||||
To extract the DAG from a Create document:
|
||||
|
||||
```python
|
||||
import FreeCAD
|
||||
|
||||
def extract_dag(doc):
|
||||
"""Walk a Create document and return nodes + edges."""
|
||||
nodes = []
|
||||
edges = []
|
||||
|
||||
for obj in doc.Objects:
|
||||
# Skip non-feature objects
|
||||
if not hasattr(obj, "TypeId"):
|
||||
continue
|
||||
|
||||
node_type = classify_type(obj.TypeId)
|
||||
if node_type is None:
|
||||
continue
|
||||
|
||||
nodes.append({
|
||||
"node_key": obj.Name,
|
||||
"node_type": node_type,
|
||||
"properties_hash": compute_properties_hash(obj),
|
||||
"metadata": {
|
||||
"label": obj.Label,
|
||||
"type_id": obj.TypeId,
|
||||
}
|
||||
})
|
||||
|
||||
# Walk dependencies via InList (objects this one depends on)
|
||||
for dep in obj.InList:
|
||||
if hasattr(dep, "TypeId") and classify_type(dep.TypeId):
|
||||
edges.append({
|
||||
"source_key": dep.Name,
|
||||
"target_key": obj.Name,
|
||||
"edge_type": "depends_on",
|
||||
})
|
||||
|
||||
return nodes, edges
|
||||
|
||||
|
||||
def classify_type(type_id):
|
||||
"""Map Create TypeIds to DAG node types."""
|
||||
mapping = {
|
||||
"Sketcher::SketchObject": "sketch",
|
||||
"PartDesign::Pad": "pad",
|
||||
"PartDesign::Pocket": "pocket",
|
||||
"PartDesign::Fillet": "fillet",
|
||||
"PartDesign::Chamfer": "chamfer",
|
||||
"PartDesign::Body": "body",
|
||||
"Part::Feature": "part",
|
||||
"Sketcher::SketchConstraint": "constraint",
|
||||
}
|
||||
return mapping.get(type_id)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5. When to Push DAG Data
|
||||
|
||||
Push the DAG to Silo in these scenarios:
|
||||
|
||||
| Event | Trigger | Who |
|
||||
|-------|---------|-----|
|
||||
| User saves in silo-mod | On save callback | Desktop silo-mod workbench |
|
||||
| User creates a revision | After `POST /api/items/{pn}/revisions` succeeds | Desktop silo-mod workbench |
|
||||
| Runner extracts DAG | After `create-dag-extract` job completes | silorunner via `PUT /api/runner/jobs/{id}/dag` |
|
||||
| Runner validates | After `create-validate` job, push updated validation states | silorunner via `PUT /api/runner/jobs/{id}/dag` |
|
||||
|
||||
---
|
||||
|
||||
## 6. Runner Entry Points
|
||||
|
||||
silo-mod must provide these Python entry points for headless invocation:
|
||||
|
||||
### 6.1 silo.runner.dag_extract
|
||||
|
||||
Extracts the feature DAG from a Create file and writes it as JSON.
|
||||
|
||||
```python
|
||||
# silo/runner.py
|
||||
|
||||
def dag_extract(input_path, output_path):
|
||||
"""
|
||||
Extract feature DAG from a Create file.
|
||||
|
||||
Args:
|
||||
input_path: Path to the .kc (Kindred Create) file.
|
||||
output_path: Path to write the JSON output.
|
||||
|
||||
Output JSON format:
|
||||
{
|
||||
"nodes": [...], // Same format as DAG sync payload
|
||||
"edges": [...]
|
||||
}
|
||||
"""
|
||||
doc = FreeCAD.openDocument(input_path)
|
||||
nodes, edges = extract_dag(doc)
|
||||
|
||||
with open(output_path, 'w') as f:
|
||||
json.dump({"nodes": nodes, "edges": edges}, f)
|
||||
|
||||
FreeCAD.closeDocument(doc.Name)
|
||||
```
|
||||
|
||||
### 6.2 silo.runner.validate
|
||||
|
||||
Rebuilds all features and reports pass/fail per node.
|
||||
|
||||
```python
|
||||
def validate(input_path, output_path):
|
||||
"""
|
||||
Validate a Create file by rebuilding all features.
|
||||
|
||||
Output JSON format:
|
||||
{
|
||||
"valid": true/false,
|
||||
"nodes": [
|
||||
{
|
||||
"node_key": "Pad001",
|
||||
"state": "clean", // or "failed"
|
||||
"message": null, // error message if failed
|
||||
"properties_hash": "..."
|
||||
}
|
||||
]
|
||||
}
|
||||
"""
|
||||
doc = FreeCAD.openDocument(input_path)
|
||||
doc.recompute()
|
||||
|
||||
results = []
|
||||
all_valid = True
|
||||
for obj in doc.Objects:
|
||||
if not hasattr(obj, "TypeId"):
|
||||
continue
|
||||
node_type = classify_type(obj.TypeId)
|
||||
if node_type is None:
|
||||
continue
|
||||
|
||||
state = "clean"
|
||||
message = None
|
||||
if hasattr(obj, "isValid") and not obj.isValid():
|
||||
state = "failed"
|
||||
message = f"Feature {obj.Label} failed to recompute"
|
||||
all_valid = False
|
||||
|
||||
results.append({
|
||||
"node_key": obj.Name,
|
||||
"state": state,
|
||||
"message": message,
|
||||
"properties_hash": compute_properties_hash(obj),
|
||||
})
|
||||
|
||||
with open(output_path, 'w') as f:
|
||||
json.dump({"valid": all_valid, "nodes": results}, f)
|
||||
|
||||
FreeCAD.closeDocument(doc.Name)
|
||||
```
|
||||
|
||||
### 6.3 silo.runner.export
|
||||
|
||||
Exports geometry to STEP, IGES, or other formats.
|
||||
|
||||
```python
|
||||
def export(input_path, output_path, format="step"):
|
||||
"""
|
||||
Export a Create file to an external format.
|
||||
|
||||
Args:
|
||||
input_path: Path to the .kc file.
|
||||
output_path: Path to write the exported file.
|
||||
format: Export format ("step", "iges", "stl", "obj").
|
||||
"""
|
||||
doc = FreeCAD.openDocument(input_path)
|
||||
|
||||
import Part
|
||||
shapes = [obj.Shape for obj in doc.Objects if hasattr(obj, "Shape")]
|
||||
compound = Part.makeCompound(shapes)
|
||||
|
||||
format_map = {
|
||||
"step": "STEP",
|
||||
"iges": "IGES",
|
||||
"stl": "STL",
|
||||
"obj": "OBJ",
|
||||
}
|
||||
|
||||
Part.export([compound], output_path)
|
||||
FreeCAD.closeDocument(doc.Name)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 7. Headless Invocation
|
||||
|
||||
The `silorunner` binary shells out to Create (with silo-mod installed):
|
||||
|
||||
```bash
|
||||
# DAG extraction
|
||||
create --console -e "from silo.runner import dag_extract; dag_extract('/tmp/job/part.kc', '/tmp/job/dag.json')"
|
||||
|
||||
# Validation
|
||||
create --console -e "from silo.runner import validate; validate('/tmp/job/part.kc', '/tmp/job/result.json')"
|
||||
|
||||
# Export
|
||||
create --console -e "from silo.runner import export; export('/tmp/job/part.kc', '/tmp/job/output.step', 'step')"
|
||||
```
|
||||
|
||||
**Prerequisites:** The runner host must have:
|
||||
- Headless Create installed (Kindred's fork of FreeCAD)
|
||||
- silo-mod installed as a Create addon (so `from silo.runner import ...` works)
|
||||
- No display server required -- `--console` mode is headless
|
||||
|
||||
---
|
||||
|
||||
## 8. Validation Result Handling
|
||||
|
||||
After a runner completes a `create-validate` job, it should:
|
||||
|
||||
1. Read the result JSON.
|
||||
2. Push updated validation states via `PUT /api/runner/jobs/{jobID}/dag`:
|
||||
|
||||
```json
|
||||
{
|
||||
"revision_number": 3,
|
||||
"nodes": [
|
||||
{"node_key": "Sketch001", "node_type": "sketch", "validation_state": "clean", "properties_hash": "abc..."},
|
||||
{"node_key": "Pad001", "node_type": "pad", "validation_state": "failed", "properties_hash": "def..."}
|
||||
],
|
||||
"edges": [
|
||||
{"source_key": "Sketch001", "target_key": "Pad001"}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
3. Complete the job via `POST /api/runner/jobs/{jobID}/complete` with the summary result.
|
||||
|
||||
---
|
||||
|
||||
## 9. SSE Events
|
||||
|
||||
Clients should listen for these events on `GET /api/events`:
|
||||
|
||||
| Event | Payload | When |
|
||||
|-------|---------|------|
|
||||
| `dag.updated` | `{item_id, part_number, revision_number, node_count, edge_count}` | After any DAG sync |
|
||||
| `dag.validated` | `{item_id, part_number, valid, failed_count}` | After validation completes |
|
||||
| `job.created` | `{job_id, definition_name, trigger, item_id}` | Job auto-triggered or manually created |
|
||||
| `job.claimed` | `{job_id, runner_id, runner}` | Runner claims a job |
|
||||
| `job.progress` | `{job_id, progress, message}` | Runner reports progress |
|
||||
| `job.completed` | `{job_id, runner_id}` | Job finishes successfully |
|
||||
| `job.failed` | `{job_id, runner_id, error}` | Job fails |
|
||||
| `job.cancelled` | `{job_id, cancelled_by}` | Job cancelled by user |
|
||||
|
||||
---
|
||||
|
||||
## 10. Cross-Item Edges
|
||||
|
||||
For assembly constraints that reference geometry in child parts (e.g. a mate constraint between two parts), use the `dag_cross_edges` table. These edges bridge the BOM DAG and the feature DAG.
|
||||
|
||||
Cross-item edges are **not** included in the standard `PUT /dag` sync. They will be managed through a dedicated endpoint in a future iteration once the assembly constraint model in Create/silo-mod is finalized.
|
||||
|
||||
For now, the DAG sync covers intra-item dependencies only. Assembly-level interference detection uses the BOM DAG (`relationships` table) combined with per-item feature DAGs.
|
||||
@@ -170,11 +170,11 @@ Complete MVP and stabilize core functionality.
|
||||
|
||||
| Task | Description | Status |
|
||||
|------|-------------|--------|
|
||||
| Unit test suite | Core API, database, partnum, file, CSV/ODS handler tests | Partial (~40%) |
|
||||
| Date segment type | Implement `date` segment with strftime-style formatting | Not Started |
|
||||
| Part number validation | Validate format against schema on creation | Not Started |
|
||||
| Location CRUD API | Expose location hierarchy via REST | Not Started |
|
||||
| Inventory API | Expose inventory operations via REST | Not Started |
|
||||
| Unit test suite | Core API, database, partnum, file, CSV/ODS handler tests | Complete (137 tests) |
|
||||
| Date segment type | Implement `date` segment with strftime-style formatting | Complete (#79) |
|
||||
| Part number validation | Validate format against schema on creation | Complete (#80) |
|
||||
| Location CRUD API | Expose location hierarchy via REST | Not Started (#81) |
|
||||
| Inventory API | Expose inventory operations via REST | Not Started (#82) |
|
||||
|
||||
**Success metrics:**
|
||||
- All existing tests pass
|
||||
@@ -187,9 +187,9 @@ Enable team collaboration (feeds into Tier 1 and Tier 4).
|
||||
|
||||
| Task | Description | Status |
|
||||
|------|-------------|--------|
|
||||
| Check-out locking | Pessimistic locks with timeout | Not Started |
|
||||
| User/group management | Create, assign, manage users and groups | Not Started |
|
||||
| Folder permissions | Read/write/delete per folder hierarchy | Not Started |
|
||||
| Check-out locking | Pessimistic locks with timeout | Not Started (#87) |
|
||||
| User/group management | Create, assign, manage users and groups | Not Started (#88) |
|
||||
| Folder permissions | Read/write/delete per folder/project hierarchy | Not Started (#89) |
|
||||
|
||||
**Success metrics:**
|
||||
- 5+ concurrent users supported
|
||||
@@ -218,8 +218,8 @@ Improve findability and navigation (Tier 0 Web UI Shell).
|
||||
|
||||
| Task | Description | Status |
|
||||
|------|-------------|--------|
|
||||
| Advanced search UI | Web interface with filters and operators | Not Started |
|
||||
| Saved searches | User-defined query favorites | Not Started |
|
||||
| Advanced search UI | Web interface with filters and operators | Not Started (#90) |
|
||||
| Saved searches | User-defined query favorites | Not Started (#91) |
|
||||
|
||||
**Success metrics:**
|
||||
- Search returns results in <2 seconds
|
||||
@@ -367,11 +367,11 @@ For full SOLIDWORKS PDM comparison tables, see [GAP_ANALYSIS.md Appendix C](GAP_
|
||||
| Feature | Status | Notes |
|
||||
|---------|--------|-------|
|
||||
| Odoo ERP integration | Partial | Config and sync-log CRUD functional; push/pull sync operations are stubs |
|
||||
| Date segment type | Not started | Schema parser placeholder exists |
|
||||
| Part number validation | Not started | API accepts but doesn't validate format |
|
||||
| Location hierarchy CRUD | Schema only | Tables exist, no API endpoints |
|
||||
| Inventory tracking | Schema only | Tables exist, no API endpoints |
|
||||
| Unit tests | Partial | 11 Go test files across api, db, ods, partnum, schema packages |
|
||||
| Date segment type | Complete | strftime-style formatting via Go time layout (#79) |
|
||||
| Part number validation | Complete | Validates against schema on creation (#80) |
|
||||
| Location hierarchy CRUD | Schema only | Tables exist, no API endpoints (#81) |
|
||||
| Inventory tracking | Schema only | Tables exist, no API endpoints (#82) |
|
||||
| Unit tests | Complete | 137 tests across 20 files covering api, db, ods, partnum, schema packages |
|
||||
|
||||
---
|
||||
|
||||
@@ -400,18 +400,21 @@ For full SOLIDWORKS PDM comparison tables, see [GAP_ANALYSIS.md Appendix C](GAP_
|
||||
- [x] BOM ODS export
|
||||
- [x] ODS item export/import/template
|
||||
|
||||
### 1.4 Unit Test Suite
|
||||
- [ ] Database connection and transaction tests
|
||||
- [ ] Item CRUD operation tests
|
||||
- [ ] Revision creation and retrieval tests
|
||||
- [ ] Part number generation tests
|
||||
- [ ] File upload/download tests
|
||||
- [ ] CSV import/export tests
|
||||
- [ ] API endpoint tests
|
||||
### 1.4 Unit Test Suite -- COMPLETE
|
||||
- [x] Database connection and transaction tests
|
||||
- [x] Item CRUD operation tests (including edge cases: duplicate keys, pagination, search)
|
||||
- [x] Revision creation, retrieval, compare, rollback tests
|
||||
- [x] Part number generation tests (including date segments, validation)
|
||||
- [x] File upload/download handler tests
|
||||
- [x] CSV import/export tests (dry-run, commit, BOM export)
|
||||
- [x] ODS import/export tests (export, template, project sheet)
|
||||
- [x] API endpoint tests (revisions, schemas, audit, auth tokens)
|
||||
- [x] Item file CRUD tests
|
||||
- [x] BOM handler tests (get, flat, cost, add, delete)
|
||||
|
||||
### 1.5 Missing Segment Types
|
||||
- [ ] Implement date segment type
|
||||
- [ ] Add strftime-style format support
|
||||
### 1.5 Missing Segment Types -- COMPLETE
|
||||
- [x] Implement date segment type
|
||||
- [x] Add strftime-style format support
|
||||
|
||||
### 1.6 Location & Inventory APIs
|
||||
- [ ] `GET /api/locations` - List locations
|
||||
|
||||
364
docs/WORKERS.md
364
docs/WORKERS.md
@@ -1,364 +0,0 @@
|
||||
# Worker System Specification
|
||||
|
||||
**Status:** Draft
|
||||
**Last Updated:** 2026-02-13
|
||||
|
||||
---
|
||||
|
||||
## 1. Purpose
|
||||
|
||||
The worker system provides async compute job execution for Silo. Jobs are defined as YAML files, managed by the Silo server, and executed by external runner processes. The system is general-purpose -- while DAG validation is the first use case, it supports any compute workload: geometry export, thumbnail rendering, FEA/CFD batch jobs, report generation, and data migration.
|
||||
|
||||
---
|
||||
|
||||
## 2. Architecture
|
||||
|
||||
```
|
||||
YAML Job Definitions (files on disk, version-controllable)
|
||||
|
|
||||
v
|
||||
Silo Server (parser, scheduler, state machine, REST API, SSE events)
|
||||
|
|
||||
v
|
||||
Runners (silorunner binary, polls via REST, executes Headless Create)
|
||||
```
|
||||
|
||||
**Three layers:**
|
||||
|
||||
1. **Job definitions** -- YAML files in a configurable directory (default `/etc/silo/jobdefs`). Each file defines a job type: what triggers it, what it operates on, what computation to perform, and what runner capabilities are required. These are the source of truth and can be version-controlled alongside other Silo config.
|
||||
|
||||
2. **Silo server** -- Parses YAML definitions on startup and upserts them into the `job_definitions` table. Creates job instances when triggers fire (revision created, BOM changed, manual). Manages job lifecycle, enforces timeouts, and broadcasts status via SSE.
|
||||
|
||||
3. **Runners** -- Separate `silorunner` processes that authenticate with Silo via API tokens, poll for available jobs, claim them atomically, execute the compute, and report results. A runner host must have Headless Create and silo-mod installed for geometry jobs.
|
||||
|
||||
---
|
||||
|
||||
## 3. Job Definition Format
|
||||
|
||||
Job definitions are YAML files with the following structure:
|
||||
|
||||
```yaml
|
||||
job:
|
||||
name: assembly-validate
|
||||
version: 1
|
||||
description: "Validate assembly by rebuilding its dependency subgraph"
|
||||
|
||||
trigger:
|
||||
type: revision_created # revision_created, bom_changed, manual, schedule
|
||||
filter:
|
||||
item_type: assembly # only trigger for assemblies
|
||||
|
||||
scope:
|
||||
type: assembly # item, assembly, project
|
||||
|
||||
compute:
|
||||
type: validate # validate, rebuild, diff, export, custom
|
||||
command: create-validate # runner-side command identifier
|
||||
args: # passed to runner as JSON
|
||||
rebuild_mode: incremental
|
||||
check_interference: true
|
||||
|
||||
runner:
|
||||
tags: [create] # required runner capabilities
|
||||
|
||||
timeout: 900 # seconds before job is marked failed (default 600)
|
||||
max_retries: 2 # retry count on failure (default 1)
|
||||
priority: 50 # lower = higher priority (default 100)
|
||||
```
|
||||
|
||||
### 3.1 Trigger Types
|
||||
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| `revision_created` | Fires when a new revision is created on an item matching the filter |
|
||||
| `bom_changed` | Fires when a BOM merge completes |
|
||||
| `manual` | Only triggered via `POST /api/jobs` |
|
||||
| `schedule` | Future: cron-like scheduling (not yet implemented) |
|
||||
|
||||
### 3.2 Trigger Filters
|
||||
|
||||
The `filter` map supports key-value matching against item properties:
|
||||
|
||||
| Key | Description |
|
||||
|-----|-------------|
|
||||
| `item_type` | Match item type: `part`, `assembly`, `drawing`, etc. |
|
||||
| `schema` | Match schema name |
|
||||
|
||||
All filter keys must match for the trigger to fire. An empty filter matches all items.
|
||||
|
||||
### 3.3 Scope Types
|
||||
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| `item` | Job operates on a single item |
|
||||
| `assembly` | Job operates on an assembly and its BOM tree |
|
||||
| `project` | Job operates on all items in a project |
|
||||
|
||||
### 3.4 Compute Commands
|
||||
|
||||
The `command` field identifies what the runner should execute. Built-in commands:
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `create-validate` | Open file in Headless Create, rebuild features, report validation results |
|
||||
| `create-export` | Open file, export to specified format (STEP, IGES, 3MF) |
|
||||
| `create-dag-extract` | Open file, extract feature DAG, output as JSON |
|
||||
| `create-thumbnail` | Open file, render thumbnail image |
|
||||
|
||||
Custom commands can be added by extending silo-mod's `silo.runner` module.
|
||||
|
||||
---
|
||||
|
||||
## 4. Job Lifecycle
|
||||
|
||||
```
|
||||
pending → claimed → running → completed
|
||||
→ failed
|
||||
→ cancelled
|
||||
```
|
||||
|
||||
| State | Description |
|
||||
|-------|-------------|
|
||||
| `pending` | Job created, waiting for a runner to claim it |
|
||||
| `claimed` | Runner has claimed the job. `expires_at` is set. |
|
||||
| `running` | Runner has started execution (reported via progress update) |
|
||||
| `completed` | Runner reported success. `result` JSONB contains output. |
|
||||
| `failed` | Runner reported failure, timeout expired, or max retries exceeded |
|
||||
| `cancelled` | Admin cancelled the job before completion |
|
||||
|
||||
### 4.1 Claim Semantics
|
||||
|
||||
Runners claim jobs via `POST /api/runner/claim`. The server uses PostgreSQL's `SELECT FOR UPDATE SKIP LOCKED` to ensure exactly-once delivery:
|
||||
|
||||
```sql
|
||||
WITH claimable AS (
|
||||
SELECT id FROM jobs
|
||||
WHERE status = 'pending'
|
||||
AND runner_tags <@ $2::text[]
|
||||
ORDER BY priority ASC, created_at ASC
|
||||
LIMIT 1
|
||||
FOR UPDATE SKIP LOCKED
|
||||
)
|
||||
UPDATE jobs SET
|
||||
status = 'claimed',
|
||||
runner_id = $1,
|
||||
claimed_at = now(),
|
||||
expires_at = now() + (timeout_seconds || ' seconds')::interval
|
||||
FROM claimable
|
||||
WHERE jobs.id = claimable.id
|
||||
RETURNING jobs.*;
|
||||
```
|
||||
|
||||
The `runner_tags <@ $2::text[]` condition ensures the runner has all tags required by the job. A runner with tags `["create", "linux", "gpu"]` can claim a job requiring `["create"]`, but not one requiring `["create", "windows"]`.
|
||||
|
||||
### 4.2 Timeout Enforcement
|
||||
|
||||
A background sweeper runs every 30 seconds (configurable via `jobs.job_timeout_check`) and marks expired jobs as failed:
|
||||
|
||||
```sql
|
||||
UPDATE jobs SET status = 'failed', error_message = 'job timed out'
|
||||
WHERE status IN ('claimed', 'running')
|
||||
AND expires_at < now();
|
||||
```
|
||||
|
||||
### 4.3 Retry
|
||||
|
||||
When a job fails and `retry_count < max_retries`, a new job is created with the same definition and scope, with `retry_count` incremented.
|
||||
|
||||
---
|
||||
|
||||
## 5. Runners
|
||||
|
||||
### 5.1 Registration
|
||||
|
||||
Runners are registered via `POST /api/runners` (admin only). The server generates a token (shown once) and stores the SHA-256 hash in the `runners` table. This follows the same pattern as API tokens in `internal/auth/token.go`.
|
||||
|
||||
### 5.2 Authentication
|
||||
|
||||
Runners authenticate via `Authorization: Bearer silo_runner_<token>`. A dedicated `RequireRunnerAuth` middleware validates the token against the `runners` table and injects a `RunnerIdentity` into the request context.
|
||||
|
||||
### 5.3 Heartbeat
|
||||
|
||||
Runners send `POST /api/runner/heartbeat` every 30 seconds. The server updates `last_heartbeat` and sets `status = 'online'`. A background sweeper marks runners as `offline` if their heartbeat is older than `runner_timeout` seconds (default 90).
|
||||
|
||||
### 5.4 Tags
|
||||
|
||||
Each runner declares capability tags (e.g., `["create", "linux", "gpu"]`). Jobs require specific tags via the `runner.tags` field in their YAML definition. A runner can only claim jobs whose required tags are a subset of the runner's tags.
|
||||
|
||||
### 5.5 Runner Config
|
||||
|
||||
The `silorunner` binary reads its config from a YAML file:
|
||||
|
||||
```yaml
|
||||
server_url: "https://silo.example.com"
|
||||
token: "silo_runner_abc123..."
|
||||
name: "worker-01"
|
||||
tags: ["create", "linux"]
|
||||
poll_interval: 5 # seconds between claim attempts
|
||||
create_path: "/usr/bin/create" # path to Headless Create binary (with silo-mod installed)
|
||||
```
|
||||
|
||||
Or via environment variables: `SILO_RUNNER_SERVER_URL`, `SILO_RUNNER_TOKEN`, etc.
|
||||
|
||||
### 5.6 Deployment
|
||||
|
||||
Runner prerequisites:
|
||||
- `silorunner` binary (built from `cmd/silorunner/`)
|
||||
- Headless Create (Kindred's fork of FreeCAD) with silo-mod workbench installed
|
||||
- Network access to Silo server API
|
||||
|
||||
Runners can be deployed as:
|
||||
- Bare metal processes alongside Create installations
|
||||
- Docker containers with Create pre-installed
|
||||
- Scaled horizontally by registering multiple runners with different names
|
||||
|
||||
---
|
||||
|
||||
## 6. Job Log
|
||||
|
||||
Each job has an append-only log stored in the `job_log` table. Runners append entries via `POST /api/runner/jobs/{jobID}/log`:
|
||||
|
||||
```json
|
||||
{
|
||||
"level": "info",
|
||||
"message": "Rebuilding Pad003...",
|
||||
"metadata": {"node_key": "Pad003", "progress_pct": 45}
|
||||
}
|
||||
```
|
||||
|
||||
Log levels: `debug`, `info`, `warn`, `error`.
|
||||
|
||||
---
|
||||
|
||||
## 7. SSE Events
|
||||
|
||||
All job lifecycle transitions are broadcast via Silo's SSE broker. Clients subscribe to `/api/events` and receive:
|
||||
|
||||
| Event Type | Payload | When |
|
||||
|------------|---------|------|
|
||||
| `job.created` | `{id, definition_name, item_id, status, priority}` | Job created |
|
||||
| `job.claimed` | `{id, runner_id, runner_name}` | Runner claims job |
|
||||
| `job.progress` | `{id, progress, progress_message}` | Runner reports progress (0-100) |
|
||||
| `job.completed` | `{id, result_summary, duration_seconds}` | Job completed successfully |
|
||||
| `job.failed` | `{id, error_message}` | Job failed |
|
||||
| `job.cancelled` | `{id, cancelled_by}` | Admin cancelled job |
|
||||
| `runner.online` | `{id, name, tags}` | Runner heartbeat (first after offline) |
|
||||
| `runner.offline` | `{id, name}` | Runner heartbeat timeout |
|
||||
|
||||
---
|
||||
|
||||
## 8. REST API
|
||||
|
||||
### 8.1 Job Endpoints (user-facing, require auth)
|
||||
|
||||
| Method | Path | Auth | Description |
|
||||
|--------|------|------|-------------|
|
||||
| `GET` | `/api/jobs` | viewer | List jobs (filterable by status, item, definition) |
|
||||
| `GET` | `/api/jobs/{jobID}` | viewer | Get job details |
|
||||
| `GET` | `/api/jobs/{jobID}/logs` | viewer | Get job log entries |
|
||||
| `POST` | `/api/jobs` | editor | Manually trigger a job |
|
||||
| `POST` | `/api/jobs/{jobID}/cancel` | editor | Cancel a pending/running job |
|
||||
|
||||
### 8.2 Job Definition Endpoints
|
||||
|
||||
| Method | Path | Auth | Description |
|
||||
|--------|------|------|-------------|
|
||||
| `GET` | `/api/job-definitions` | viewer | List loaded definitions |
|
||||
| `GET` | `/api/job-definitions/{name}` | viewer | Get specific definition |
|
||||
| `POST` | `/api/job-definitions/reload` | admin | Re-read YAML from disk |
|
||||
|
||||
### 8.3 Runner Management Endpoints (admin)
|
||||
|
||||
| Method | Path | Auth | Description |
|
||||
|--------|------|------|-------------|
|
||||
| `GET` | `/api/runners` | admin | List registered runners |
|
||||
| `POST` | `/api/runners` | admin | Register runner (returns token) |
|
||||
| `DELETE` | `/api/runners/{runnerID}` | admin | Delete runner |
|
||||
|
||||
### 8.4 Runner-Facing Endpoints (runner token auth)
|
||||
|
||||
| Method | Path | Auth | Description |
|
||||
|--------|------|------|-------------|
|
||||
| `POST` | `/api/runner/heartbeat` | runner | Send heartbeat |
|
||||
| `POST` | `/api/runner/claim` | runner | Claim next available job |
|
||||
| `PUT` | `/api/runner/jobs/{jobID}/progress` | runner | Report progress |
|
||||
| `POST` | `/api/runner/jobs/{jobID}/complete` | runner | Report completion with result |
|
||||
| `POST` | `/api/runner/jobs/{jobID}/fail` | runner | Report failure |
|
||||
| `POST` | `/api/runner/jobs/{jobID}/log` | runner | Append log entry |
|
||||
| `PUT` | `/api/runner/jobs/{jobID}/dag` | runner | Sync DAG results after compute |
|
||||
|
||||
---
|
||||
|
||||
## 9. Configuration
|
||||
|
||||
Add to `config.yaml`:
|
||||
|
||||
```yaml
|
||||
jobs:
|
||||
directory: /etc/silo/jobdefs # path to YAML job definitions
|
||||
runner_timeout: 90 # seconds before marking runner offline
|
||||
job_timeout_check: 30 # seconds between timeout sweeps
|
||||
default_priority: 100 # default job priority
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 10. Example Job Definitions
|
||||
|
||||
### Assembly Validation
|
||||
|
||||
```yaml
|
||||
job:
|
||||
name: assembly-validate
|
||||
version: 1
|
||||
description: "Validate assembly by rebuilding its dependency subgraph"
|
||||
trigger:
|
||||
type: revision_created
|
||||
filter:
|
||||
item_type: assembly
|
||||
scope:
|
||||
type: assembly
|
||||
compute:
|
||||
type: validate
|
||||
command: create-validate
|
||||
args:
|
||||
rebuild_mode: incremental
|
||||
check_interference: true
|
||||
runner:
|
||||
tags: [create]
|
||||
timeout: 900
|
||||
max_retries: 2
|
||||
priority: 50
|
||||
```
|
||||
|
||||
### STEP Export
|
||||
|
||||
```yaml
|
||||
job:
|
||||
name: part-export-step
|
||||
version: 1
|
||||
description: "Export a part to STEP format"
|
||||
trigger:
|
||||
type: manual
|
||||
scope:
|
||||
type: item
|
||||
compute:
|
||||
type: export
|
||||
command: create-export
|
||||
args:
|
||||
format: step
|
||||
output_key_template: "exports/{part_number}_rev{revision}.step"
|
||||
runner:
|
||||
tags: [create]
|
||||
timeout: 300
|
||||
max_retries: 1
|
||||
priority: 100
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 11. References
|
||||
|
||||
- [DAG.md](DAG.md) -- Dependency DAG specification
|
||||
- [MULTI_USER_EDITS.md](MULTI_USER_EDITS.md) -- Multi-user editing specification
|
||||
- [ROADMAP.md](ROADMAP.md) -- Tier 0 Job Queue Infrastructure, Tier 1 Headless Create
|
||||
@@ -38,8 +38,6 @@ func newAuthTestServer(t *testing.T) *Server {
|
||||
nil, // authConfig
|
||||
broker,
|
||||
state,
|
||||
nil, // jobDefs
|
||||
"", // jobDefsDir
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/csv"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@@ -1220,9 +1219,6 @@ func (s *Server) HandleMergeBOM(w http.ResponseWriter, r *http.Request) {
|
||||
"unreferenced": len(diff.Removed),
|
||||
}))
|
||||
|
||||
// Trigger auto-jobs (e.g. assembly validation)
|
||||
go s.triggerJobs(context.Background(), "bom_changed", parent.ID, parent)
|
||||
|
||||
writeJSON(w, http.StatusOK, resp)
|
||||
}
|
||||
|
||||
|
||||
@@ -35,8 +35,6 @@ func newTestServer(t *testing.T) *Server {
|
||||
nil, // authConfig (nil = dev mode)
|
||||
broker,
|
||||
state,
|
||||
nil, // jobDefs
|
||||
"", // jobDefsDir
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -64,8 +64,6 @@ func newTestServerWithSchemas(t *testing.T) *Server {
|
||||
nil, // authConfig
|
||||
broker,
|
||||
state,
|
||||
nil, // jobDefs
|
||||
"", // jobDefsDir
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,271 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/kindredsystems/silo/internal/db"
|
||||
)
|
||||
|
||||
// dagSyncRequest is the payload for PUT /api/items/{partNumber}/dag.
|
||||
type dagSyncRequest struct {
|
||||
RevisionNumber int `json:"revision_number"`
|
||||
Nodes []dagSyncNode `json:"nodes"`
|
||||
Edges []dagSyncEdge `json:"edges"`
|
||||
}
|
||||
|
||||
type dagSyncNode struct {
|
||||
NodeKey string `json:"node_key"`
|
||||
NodeType string `json:"node_type"`
|
||||
PropertiesHash *string `json:"properties_hash,omitempty"`
|
||||
ValidationState string `json:"validation_state,omitempty"`
|
||||
Metadata map[string]any `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
type dagSyncEdge struct {
|
||||
SourceKey string `json:"source_key"`
|
||||
TargetKey string `json:"target_key"`
|
||||
EdgeType string `json:"edge_type,omitempty"`
|
||||
Metadata map[string]any `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
// HandleGetDAG returns the feature DAG for an item's current revision.
|
||||
func (s *Server) HandleGetDAG(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
partNumber := chi.URLParam(r, "partNumber")
|
||||
|
||||
item, err := s.items.GetByPartNumber(ctx, partNumber)
|
||||
if err != nil || item == nil {
|
||||
writeError(w, http.StatusNotFound, "not_found", "Item not found")
|
||||
return
|
||||
}
|
||||
|
||||
nodes, err := s.dag.GetNodes(ctx, item.ID, item.CurrentRevision)
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to get DAG nodes")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to get DAG")
|
||||
return
|
||||
}
|
||||
|
||||
edges, err := s.dag.GetEdges(ctx, item.ID, item.CurrentRevision)
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to get DAG edges")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to get DAG edges")
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]any{
|
||||
"item_id": item.ID,
|
||||
"part_number": item.PartNumber,
|
||||
"revision_number": item.CurrentRevision,
|
||||
"nodes": nodes,
|
||||
"edges": edges,
|
||||
})
|
||||
}
|
||||
|
||||
// HandleGetForwardCone returns all downstream dependents of a node.
|
||||
func (s *Server) HandleGetForwardCone(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
partNumber := chi.URLParam(r, "partNumber")
|
||||
nodeKey := chi.URLParam(r, "nodeKey")
|
||||
|
||||
item, err := s.items.GetByPartNumber(ctx, partNumber)
|
||||
if err != nil || item == nil {
|
||||
writeError(w, http.StatusNotFound, "not_found", "Item not found")
|
||||
return
|
||||
}
|
||||
|
||||
node, err := s.dag.GetNodeByKey(ctx, item.ID, item.CurrentRevision, nodeKey)
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to get DAG node")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to get node")
|
||||
return
|
||||
}
|
||||
if node == nil {
|
||||
writeError(w, http.StatusNotFound, "not_found", "Node not found")
|
||||
return
|
||||
}
|
||||
|
||||
cone, err := s.dag.GetForwardCone(ctx, node.ID)
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to get forward cone")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to get forward cone")
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]any{
|
||||
"root_node": node,
|
||||
"cone": cone,
|
||||
})
|
||||
}
|
||||
|
||||
// HandleGetDirtySubgraph returns all non-clean nodes for an item.
|
||||
func (s *Server) HandleGetDirtySubgraph(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
partNumber := chi.URLParam(r, "partNumber")
|
||||
|
||||
item, err := s.items.GetByPartNumber(ctx, partNumber)
|
||||
if err != nil || item == nil {
|
||||
writeError(w, http.StatusNotFound, "not_found", "Item not found")
|
||||
return
|
||||
}
|
||||
|
||||
nodes, err := s.dag.GetDirtySubgraph(ctx, item.ID)
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to get dirty subgraph")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to get dirty subgraph")
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]any{
|
||||
"item_id": item.ID,
|
||||
"nodes": nodes,
|
||||
})
|
||||
}
|
||||
|
||||
// HandleSyncDAG accepts a full feature tree from a client or runner.
|
||||
func (s *Server) HandleSyncDAG(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
partNumber := chi.URLParam(r, "partNumber")
|
||||
|
||||
item, err := s.items.GetByPartNumber(ctx, partNumber)
|
||||
if err != nil || item == nil {
|
||||
writeError(w, http.StatusNotFound, "not_found", "Item not found")
|
||||
return
|
||||
}
|
||||
|
||||
var req dagSyncRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "invalid_body", "Invalid JSON body")
|
||||
return
|
||||
}
|
||||
|
||||
if req.RevisionNumber == 0 {
|
||||
req.RevisionNumber = item.CurrentRevision
|
||||
}
|
||||
|
||||
// Convert request nodes to DB nodes
|
||||
nodes := make([]db.DAGNode, len(req.Nodes))
|
||||
for i, n := range req.Nodes {
|
||||
state := n.ValidationState
|
||||
if state == "" {
|
||||
state = "clean"
|
||||
}
|
||||
nodes[i] = db.DAGNode{
|
||||
NodeKey: n.NodeKey,
|
||||
NodeType: n.NodeType,
|
||||
PropertiesHash: n.PropertiesHash,
|
||||
ValidationState: state,
|
||||
Metadata: n.Metadata,
|
||||
}
|
||||
}
|
||||
|
||||
// Sync nodes first to get IDs
|
||||
if err := s.dag.SyncFeatureTree(ctx, item.ID, req.RevisionNumber, nodes, nil); err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to sync DAG nodes")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to sync DAG")
|
||||
return
|
||||
}
|
||||
|
||||
// Build key→ID map from synced nodes
|
||||
keyToID := make(map[string]string, len(nodes))
|
||||
for _, n := range nodes {
|
||||
keyToID[n.NodeKey] = n.ID
|
||||
}
|
||||
|
||||
// Convert request edges, resolving keys to IDs
|
||||
edges := make([]db.DAGEdge, len(req.Edges))
|
||||
for i, e := range req.Edges {
|
||||
sourceID, ok := keyToID[e.SourceKey]
|
||||
if !ok {
|
||||
writeError(w, http.StatusBadRequest, "invalid_edge",
|
||||
"Unknown source_key: "+e.SourceKey)
|
||||
return
|
||||
}
|
||||
targetID, ok := keyToID[e.TargetKey]
|
||||
if !ok {
|
||||
writeError(w, http.StatusBadRequest, "invalid_edge",
|
||||
"Unknown target_key: "+e.TargetKey)
|
||||
return
|
||||
}
|
||||
edgeType := e.EdgeType
|
||||
if edgeType == "" {
|
||||
edgeType = "depends_on"
|
||||
}
|
||||
edges[i] = db.DAGEdge{
|
||||
SourceNodeID: sourceID,
|
||||
TargetNodeID: targetID,
|
||||
EdgeType: edgeType,
|
||||
Metadata: e.Metadata,
|
||||
}
|
||||
}
|
||||
|
||||
// Sync edges (nodes already synced, so pass empty nodes to skip re-upsert)
|
||||
if len(edges) > 0 {
|
||||
// Delete old edges and insert new ones
|
||||
if err := s.dag.DeleteEdgesForItem(ctx, item.ID, req.RevisionNumber); err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to delete old edges")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to sync DAG edges")
|
||||
return
|
||||
}
|
||||
for i := range edges {
|
||||
if err := s.dag.CreateEdge(ctx, &edges[i]); err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to create edge")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to create edge")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Publish SSE event
|
||||
s.broker.Publish("dag.updated", mustMarshal(map[string]any{
|
||||
"item_id": item.ID,
|
||||
"part_number": item.PartNumber,
|
||||
"revision_number": req.RevisionNumber,
|
||||
"node_count": len(req.Nodes),
|
||||
"edge_count": len(req.Edges),
|
||||
}))
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]any{
|
||||
"synced": true,
|
||||
"node_count": len(req.Nodes),
|
||||
"edge_count": len(req.Edges),
|
||||
})
|
||||
}
|
||||
|
||||
// HandleMarkDirty marks a node and all its downstream dependents as dirty.
|
||||
func (s *Server) HandleMarkDirty(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
partNumber := chi.URLParam(r, "partNumber")
|
||||
nodeKey := chi.URLParam(r, "nodeKey")
|
||||
|
||||
item, err := s.items.GetByPartNumber(ctx, partNumber)
|
||||
if err != nil || item == nil {
|
||||
writeError(w, http.StatusNotFound, "not_found", "Item not found")
|
||||
return
|
||||
}
|
||||
|
||||
node, err := s.dag.GetNodeByKey(ctx, item.ID, item.CurrentRevision, nodeKey)
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to get DAG node")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to get node")
|
||||
return
|
||||
}
|
||||
if node == nil {
|
||||
writeError(w, http.StatusNotFound, "not_found", "Node not found")
|
||||
return
|
||||
}
|
||||
|
||||
affected, err := s.dag.MarkDirty(ctx, node.ID)
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to mark dirty")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to mark dirty")
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]any{
|
||||
"node_key": nodeKey,
|
||||
"nodes_affected": affected,
|
||||
})
|
||||
}
|
||||
@@ -1,247 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/kindredsystems/silo/internal/db"
|
||||
"github.com/kindredsystems/silo/internal/schema"
|
||||
"github.com/kindredsystems/silo/internal/testutil"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
func newDAGTestServer(t *testing.T) *Server {
|
||||
t.Helper()
|
||||
pool := testutil.MustConnectTestPool(t)
|
||||
database := db.NewFromPool(pool)
|
||||
broker := NewBroker(zerolog.Nop())
|
||||
state := NewServerState(zerolog.Nop(), nil, broker)
|
||||
return NewServer(
|
||||
zerolog.Nop(),
|
||||
database,
|
||||
map[string]*schema.Schema{},
|
||||
"",
|
||||
nil, nil, nil, nil, nil,
|
||||
broker, state,
|
||||
nil, "",
|
||||
)
|
||||
}
|
||||
|
||||
func newDAGRouter(s *Server) http.Handler {
|
||||
r := chi.NewRouter()
|
||||
r.Route("/api/items/{partNumber}", func(r chi.Router) {
|
||||
r.Get("/dag", s.HandleGetDAG)
|
||||
r.Get("/dag/forward-cone/{nodeKey}", s.HandleGetForwardCone)
|
||||
r.Get("/dag/dirty", s.HandleGetDirtySubgraph)
|
||||
r.Put("/dag", s.HandleSyncDAG)
|
||||
r.Post("/dag/mark-dirty/{nodeKey}", s.HandleMarkDirty)
|
||||
})
|
||||
return r
|
||||
}
|
||||
|
||||
func TestHandleGetDAG_Empty(t *testing.T) {
|
||||
s := newDAGTestServer(t)
|
||||
r := newDAGRouter(s)
|
||||
|
||||
// Create an item
|
||||
item := &db.Item{PartNumber: "DAG-TEST-001", ItemType: "part", Description: "DAG test"}
|
||||
if err := s.items.Create(context.Background(), item, nil); err != nil {
|
||||
t.Fatalf("creating item: %v", err)
|
||||
}
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/items/DAG-TEST-001/dag", nil)
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
var resp map[string]any
|
||||
json.Unmarshal(w.Body.Bytes(), &resp)
|
||||
if resp["part_number"] != "DAG-TEST-001" {
|
||||
t.Errorf("expected part_number DAG-TEST-001, got %v", resp["part_number"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleSyncDAG(t *testing.T) {
|
||||
s := newDAGTestServer(t)
|
||||
r := newDAGRouter(s)
|
||||
|
||||
// Create an item with a revision
|
||||
item := &db.Item{PartNumber: "DAG-SYNC-001", ItemType: "part", Description: "sync test"}
|
||||
if err := s.items.Create(context.Background(), item, nil); err != nil {
|
||||
t.Fatalf("creating item: %v", err)
|
||||
}
|
||||
|
||||
// Sync a feature tree
|
||||
body := `{
|
||||
"nodes": [
|
||||
{"node_key": "Sketch001", "node_type": "sketch"},
|
||||
{"node_key": "Pad001", "node_type": "pad"},
|
||||
{"node_key": "Fillet001", "node_type": "fillet"}
|
||||
],
|
||||
"edges": [
|
||||
{"source_key": "Sketch001", "target_key": "Pad001", "edge_type": "depends_on"},
|
||||
{"source_key": "Pad001", "target_key": "Fillet001", "edge_type": "depends_on"}
|
||||
]
|
||||
}`
|
||||
|
||||
req := httptest.NewRequest("PUT", "/api/items/DAG-SYNC-001/dag", strings.NewReader(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
var resp map[string]any
|
||||
json.Unmarshal(w.Body.Bytes(), &resp)
|
||||
if resp["node_count"] != float64(3) {
|
||||
t.Errorf("expected 3 nodes, got %v", resp["node_count"])
|
||||
}
|
||||
if resp["edge_count"] != float64(2) {
|
||||
t.Errorf("expected 2 edges, got %v", resp["edge_count"])
|
||||
}
|
||||
|
||||
// Verify we can read the DAG back
|
||||
req2 := httptest.NewRequest("GET", "/api/items/DAG-SYNC-001/dag", nil)
|
||||
w2 := httptest.NewRecorder()
|
||||
r.ServeHTTP(w2, req2)
|
||||
|
||||
if w2.Code != http.StatusOK {
|
||||
t.Fatalf("GET dag: expected 200, got %d", w2.Code)
|
||||
}
|
||||
var dagResp map[string]any
|
||||
json.Unmarshal(w2.Body.Bytes(), &dagResp)
|
||||
nodes, ok := dagResp["nodes"].([]any)
|
||||
if !ok || len(nodes) != 3 {
|
||||
t.Errorf("expected 3 nodes in GET, got %v", dagResp["nodes"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleForwardCone(t *testing.T) {
|
||||
s := newDAGTestServer(t)
|
||||
r := newDAGRouter(s)
|
||||
|
||||
item := &db.Item{PartNumber: "DAG-CONE-001", ItemType: "part", Description: "cone test"}
|
||||
if err := s.items.Create(context.Background(), item, nil); err != nil {
|
||||
t.Fatalf("creating item: %v", err)
|
||||
}
|
||||
|
||||
// Sync a linear chain: A -> B -> C
|
||||
body := `{
|
||||
"nodes": [
|
||||
{"node_key": "A", "node_type": "sketch"},
|
||||
{"node_key": "B", "node_type": "pad"},
|
||||
{"node_key": "C", "node_type": "fillet"}
|
||||
],
|
||||
"edges": [
|
||||
{"source_key": "A", "target_key": "B"},
|
||||
{"source_key": "B", "target_key": "C"}
|
||||
]
|
||||
}`
|
||||
req := httptest.NewRequest("PUT", "/api/items/DAG-CONE-001/dag", strings.NewReader(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("sync: %d %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
// Forward cone from A should include B and C
|
||||
req2 := httptest.NewRequest("GET", "/api/items/DAG-CONE-001/dag/forward-cone/A", nil)
|
||||
w2 := httptest.NewRecorder()
|
||||
r.ServeHTTP(w2, req2)
|
||||
|
||||
if w2.Code != http.StatusOK {
|
||||
t.Fatalf("forward-cone: %d %s", w2.Code, w2.Body.String())
|
||||
}
|
||||
|
||||
var resp map[string]any
|
||||
json.Unmarshal(w2.Body.Bytes(), &resp)
|
||||
cone, ok := resp["cone"].([]any)
|
||||
if !ok || len(cone) != 2 {
|
||||
t.Errorf("expected 2 nodes in forward cone, got %v", resp["cone"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleMarkDirty(t *testing.T) {
|
||||
s := newDAGTestServer(t)
|
||||
r := newDAGRouter(s)
|
||||
|
||||
item := &db.Item{PartNumber: "DAG-DIRTY-001", ItemType: "part", Description: "dirty test"}
|
||||
if err := s.items.Create(context.Background(), item, nil); err != nil {
|
||||
t.Fatalf("creating item: %v", err)
|
||||
}
|
||||
|
||||
// Sync: A -> B -> C
|
||||
body := `{
|
||||
"nodes": [
|
||||
{"node_key": "X", "node_type": "sketch"},
|
||||
{"node_key": "Y", "node_type": "pad"},
|
||||
{"node_key": "Z", "node_type": "fillet"}
|
||||
],
|
||||
"edges": [
|
||||
{"source_key": "X", "target_key": "Y"},
|
||||
{"source_key": "Y", "target_key": "Z"}
|
||||
]
|
||||
}`
|
||||
req := httptest.NewRequest("PUT", "/api/items/DAG-DIRTY-001/dag", strings.NewReader(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("sync: %d %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
// Mark X dirty — should propagate to Y and Z
|
||||
req2 := httptest.NewRequest("POST", "/api/items/DAG-DIRTY-001/dag/mark-dirty/X", nil)
|
||||
w2 := httptest.NewRecorder()
|
||||
r.ServeHTTP(w2, req2)
|
||||
|
||||
if w2.Code != http.StatusOK {
|
||||
t.Fatalf("mark-dirty: %d %s", w2.Code, w2.Body.String())
|
||||
}
|
||||
|
||||
var resp map[string]any
|
||||
json.Unmarshal(w2.Body.Bytes(), &resp)
|
||||
affected := resp["nodes_affected"].(float64)
|
||||
if affected != 3 {
|
||||
t.Errorf("expected 3 nodes affected, got %v", affected)
|
||||
}
|
||||
|
||||
// Verify dirty subgraph
|
||||
req3 := httptest.NewRequest("GET", "/api/items/DAG-DIRTY-001/dag/dirty", nil)
|
||||
w3 := httptest.NewRecorder()
|
||||
r.ServeHTTP(w3, req3)
|
||||
|
||||
if w3.Code != http.StatusOK {
|
||||
t.Fatalf("dirty: %d %s", w3.Code, w3.Body.String())
|
||||
}
|
||||
var dirtyResp map[string]any
|
||||
json.Unmarshal(w3.Body.Bytes(), &dirtyResp)
|
||||
dirtyNodes, ok := dirtyResp["nodes"].([]any)
|
||||
if !ok || len(dirtyNodes) != 3 {
|
||||
t.Errorf("expected 3 dirty nodes, got %v", dirtyResp["nodes"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleGetDAG_NotFound(t *testing.T) {
|
||||
s := newDAGTestServer(t)
|
||||
r := newDAGRouter(s)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/items/NONEXISTENT-999/dag", nil)
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusNotFound {
|
||||
t.Errorf("expected 404, got %d", w.Code)
|
||||
}
|
||||
}
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"github.com/kindredsystems/silo/internal/auth"
|
||||
"github.com/kindredsystems/silo/internal/config"
|
||||
"github.com/kindredsystems/silo/internal/db"
|
||||
"github.com/kindredsystems/silo/internal/jobdef"
|
||||
"github.com/kindredsystems/silo/internal/partnum"
|
||||
"github.com/kindredsystems/silo/internal/schema"
|
||||
"github.com/kindredsystems/silo/internal/storage"
|
||||
@@ -44,10 +43,6 @@ type Server struct {
|
||||
itemFiles *db.ItemFileRepository
|
||||
broker *Broker
|
||||
serverState *ServerState
|
||||
dag *db.DAGRepository
|
||||
jobs *db.JobRepository
|
||||
jobDefs map[string]*jobdef.Definition
|
||||
jobDefsDir string
|
||||
}
|
||||
|
||||
// NewServer creates a new API server.
|
||||
@@ -63,15 +58,11 @@ func NewServer(
|
||||
authCfg *config.AuthConfig,
|
||||
broker *Broker,
|
||||
state *ServerState,
|
||||
jobDefs map[string]*jobdef.Definition,
|
||||
jobDefsDir string,
|
||||
) *Server {
|
||||
items := db.NewItemRepository(database)
|
||||
projects := db.NewProjectRepository(database)
|
||||
relationships := db.NewRelationshipRepository(database)
|
||||
itemFiles := db.NewItemFileRepository(database)
|
||||
dag := db.NewDAGRepository(database)
|
||||
jobs := db.NewJobRepository(database)
|
||||
seqStore := &dbSequenceStore{db: database, schemas: schemas}
|
||||
partgen := partnum.NewGenerator(schemas, seqStore)
|
||||
|
||||
@@ -92,10 +83,6 @@ func NewServer(
|
||||
itemFiles: itemFiles,
|
||||
broker: broker,
|
||||
serverState: state,
|
||||
dag: dag,
|
||||
jobs: jobs,
|
||||
jobDefs: jobDefs,
|
||||
jobDefsDir: jobDefsDir,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1489,9 +1476,6 @@ func (s *Server) HandleCreateRevision(w http.ResponseWriter, r *http.Request) {
|
||||
"part_number": partNumber,
|
||||
"revision_number": rev.RevisionNumber,
|
||||
}))
|
||||
|
||||
// Trigger auto-jobs (e.g. validation, export)
|
||||
go s.triggerJobs(context.Background(), "revision_created", item.ID, item)
|
||||
}
|
||||
|
||||
// HandleUploadFile uploads a file and creates a new revision.
|
||||
|
||||
@@ -1,378 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/kindredsystems/silo/internal/auth"
|
||||
"github.com/kindredsystems/silo/internal/db"
|
||||
)
|
||||
|
||||
// HandleListJobs returns jobs filtered by status and/or item.
|
||||
func (s *Server) HandleListJobs(w http.ResponseWriter, r *http.Request) {
|
||||
status := r.URL.Query().Get("status")
|
||||
itemID := r.URL.Query().Get("item_id")
|
||||
|
||||
limit := 50
|
||||
if v := r.URL.Query().Get("limit"); v != "" {
|
||||
if n, err := strconv.Atoi(v); err == nil && n > 0 && n <= 200 {
|
||||
limit = n
|
||||
}
|
||||
}
|
||||
offset := 0
|
||||
if v := r.URL.Query().Get("offset"); v != "" {
|
||||
if n, err := strconv.Atoi(v); err == nil && n >= 0 {
|
||||
offset = n
|
||||
}
|
||||
}
|
||||
|
||||
jobs, err := s.jobs.ListJobs(r.Context(), status, itemID, limit, offset)
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to list jobs")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to list jobs")
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, jobs)
|
||||
}
|
||||
|
||||
// HandleGetJob returns a single job by ID.
|
||||
func (s *Server) HandleGetJob(w http.ResponseWriter, r *http.Request) {
|
||||
jobID := chi.URLParam(r, "jobID")
|
||||
|
||||
job, err := s.jobs.GetJob(r.Context(), jobID)
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to get job")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to get job")
|
||||
return
|
||||
}
|
||||
if job == nil {
|
||||
writeError(w, http.StatusNotFound, "not_found", "Job not found")
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, job)
|
||||
}
|
||||
|
||||
// HandleGetJobLogs returns log entries for a job.
|
||||
func (s *Server) HandleGetJobLogs(w http.ResponseWriter, r *http.Request) {
|
||||
jobID := chi.URLParam(r, "jobID")
|
||||
|
||||
logs, err := s.jobs.GetJobLogs(r.Context(), jobID)
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to get job logs")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to get job logs")
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, logs)
|
||||
}
|
||||
|
||||
// HandleCreateJob manually triggers a job.
|
||||
func (s *Server) HandleCreateJob(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
user := auth.UserFromContext(ctx)
|
||||
|
||||
var req struct {
|
||||
DefinitionName string `json:"definition_name"`
|
||||
ItemID *string `json:"item_id,omitempty"`
|
||||
ProjectID *string `json:"project_id,omitempty"`
|
||||
ScopeMetadata map[string]any `json:"scope_metadata,omitempty"`
|
||||
}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "invalid_body", "Invalid JSON body")
|
||||
return
|
||||
}
|
||||
|
||||
if req.DefinitionName == "" {
|
||||
writeError(w, http.StatusBadRequest, "missing_field", "definition_name is required")
|
||||
return
|
||||
}
|
||||
|
||||
// Look up definition
|
||||
def, err := s.jobs.GetDefinition(ctx, req.DefinitionName)
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to look up job definition")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to look up definition")
|
||||
return
|
||||
}
|
||||
if def == nil {
|
||||
writeError(w, http.StatusNotFound, "not_found", "Job definition not found: "+req.DefinitionName)
|
||||
return
|
||||
}
|
||||
|
||||
var createdBy *string
|
||||
if user != nil {
|
||||
createdBy = &user.Username
|
||||
}
|
||||
|
||||
job := &db.Job{
|
||||
JobDefinitionID: &def.ID,
|
||||
DefinitionName: def.Name,
|
||||
Priority: def.Priority,
|
||||
ItemID: req.ItemID,
|
||||
ProjectID: req.ProjectID,
|
||||
ScopeMetadata: req.ScopeMetadata,
|
||||
RunnerTags: def.RunnerTags,
|
||||
TimeoutSeconds: def.TimeoutSeconds,
|
||||
MaxRetries: def.MaxRetries,
|
||||
CreatedBy: createdBy,
|
||||
}
|
||||
|
||||
if err := s.jobs.CreateJob(ctx, job); err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to create job")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to create job")
|
||||
return
|
||||
}
|
||||
|
||||
s.broker.Publish("job.created", mustMarshal(map[string]any{
|
||||
"job_id": job.ID,
|
||||
"definition_name": job.DefinitionName,
|
||||
"item_id": job.ItemID,
|
||||
}))
|
||||
|
||||
writeJSON(w, http.StatusCreated, job)
|
||||
}
|
||||
|
||||
// HandleCancelJob cancels a pending or active job.
|
||||
func (s *Server) HandleCancelJob(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
jobID := chi.URLParam(r, "jobID")
|
||||
user := auth.UserFromContext(ctx)
|
||||
|
||||
cancelledBy := "system"
|
||||
if user != nil {
|
||||
cancelledBy = user.Username
|
||||
}
|
||||
|
||||
if err := s.jobs.CancelJob(ctx, jobID, cancelledBy); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "cancel_failed", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
s.broker.Publish("job.cancelled", mustMarshal(map[string]any{
|
||||
"job_id": jobID,
|
||||
"cancelled_by": cancelledBy,
|
||||
}))
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]string{"status": "cancelled"})
|
||||
}
|
||||
|
||||
// HandleListJobDefinitions returns all loaded job definitions.
|
||||
func (s *Server) HandleListJobDefinitions(w http.ResponseWriter, r *http.Request) {
|
||||
defs, err := s.jobs.ListDefinitions(r.Context())
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to list job definitions")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to list definitions")
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, defs)
|
||||
}
|
||||
|
||||
// HandleGetJobDefinition returns a single job definition by name.
|
||||
func (s *Server) HandleGetJobDefinition(w http.ResponseWriter, r *http.Request) {
|
||||
name := chi.URLParam(r, "name")
|
||||
|
||||
def, err := s.jobs.GetDefinition(r.Context(), name)
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to get job definition")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to get definition")
|
||||
return
|
||||
}
|
||||
if def == nil {
|
||||
writeError(w, http.StatusNotFound, "not_found", "Job definition not found")
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, def)
|
||||
}
|
||||
|
||||
// HandleReloadJobDefinitions re-reads YAML files from disk and upserts them.
|
||||
func (s *Server) HandleReloadJobDefinitions(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
if s.jobDefsDir == "" {
|
||||
writeError(w, http.StatusBadRequest, "no_directory", "Job definitions directory not configured")
|
||||
return
|
||||
}
|
||||
|
||||
defs, err := loadAndUpsertJobDefs(ctx, s.jobDefsDir, s.jobs)
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to reload job definitions")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to reload definitions")
|
||||
return
|
||||
}
|
||||
|
||||
// Update in-memory map
|
||||
s.jobDefs = defs
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]any{
|
||||
"reloaded": len(defs),
|
||||
})
|
||||
}
|
||||
|
||||
// HandleListRunners returns all registered runners (admin).
|
||||
func (s *Server) HandleListRunners(w http.ResponseWriter, r *http.Request) {
|
||||
runners, err := s.jobs.ListRunners(r.Context())
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to list runners")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to list runners")
|
||||
return
|
||||
}
|
||||
|
||||
// Redact token hashes from response
|
||||
type runnerResponse struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
TokenPrefix string `json:"token_prefix"`
|
||||
Tags []string `json:"tags"`
|
||||
Status string `json:"status"`
|
||||
LastHeartbeat *string `json:"last_heartbeat,omitempty"`
|
||||
LastJobID *string `json:"last_job_id,omitempty"`
|
||||
Metadata map[string]any `json:"metadata,omitempty"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
}
|
||||
|
||||
resp := make([]runnerResponse, len(runners))
|
||||
for i, runner := range runners {
|
||||
var hb *string
|
||||
if runner.LastHeartbeat != nil {
|
||||
s := runner.LastHeartbeat.Format("2006-01-02T15:04:05Z07:00")
|
||||
hb = &s
|
||||
}
|
||||
resp[i] = runnerResponse{
|
||||
ID: runner.ID,
|
||||
Name: runner.Name,
|
||||
TokenPrefix: runner.TokenPrefix,
|
||||
Tags: runner.Tags,
|
||||
Status: runner.Status,
|
||||
LastHeartbeat: hb,
|
||||
LastJobID: runner.LastJobID,
|
||||
Metadata: runner.Metadata,
|
||||
CreatedAt: runner.CreatedAt.Format("2006-01-02T15:04:05Z07:00"),
|
||||
}
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, resp)
|
||||
}
|
||||
|
||||
// HandleRegisterRunner creates a new runner and returns the token (admin).
|
||||
func (s *Server) HandleRegisterRunner(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
var req struct {
|
||||
Name string `json:"name"`
|
||||
Tags []string `json:"tags"`
|
||||
Metadata map[string]any `json:"metadata,omitempty"`
|
||||
}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "invalid_body", "Invalid JSON body")
|
||||
return
|
||||
}
|
||||
if req.Name == "" {
|
||||
writeError(w, http.StatusBadRequest, "missing_field", "name is required")
|
||||
return
|
||||
}
|
||||
if len(req.Tags) == 0 {
|
||||
writeError(w, http.StatusBadRequest, "missing_field", "tags is required (at least one)")
|
||||
return
|
||||
}
|
||||
|
||||
rawToken, tokenHash, tokenPrefix := generateRunnerToken()
|
||||
|
||||
runner := &db.Runner{
|
||||
Name: req.Name,
|
||||
TokenHash: tokenHash,
|
||||
TokenPrefix: tokenPrefix,
|
||||
Tags: req.Tags,
|
||||
Metadata: req.Metadata,
|
||||
}
|
||||
|
||||
if err := s.jobs.RegisterRunner(ctx, runner); err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to register runner")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to register runner")
|
||||
return
|
||||
}
|
||||
|
||||
s.broker.Publish("runner.online", mustMarshal(map[string]any{
|
||||
"runner_id": runner.ID,
|
||||
"name": runner.Name,
|
||||
}))
|
||||
|
||||
writeJSON(w, http.StatusCreated, map[string]any{
|
||||
"id": runner.ID,
|
||||
"name": runner.Name,
|
||||
"token": rawToken,
|
||||
"tags": runner.Tags,
|
||||
})
|
||||
}
|
||||
|
||||
// HandleDeleteRunner removes a runner (admin).
|
||||
func (s *Server) HandleDeleteRunner(w http.ResponseWriter, r *http.Request) {
|
||||
runnerID := chi.URLParam(r, "runnerID")
|
||||
|
||||
if err := s.jobs.DeleteRunner(r.Context(), runnerID); err != nil {
|
||||
writeError(w, http.StatusNotFound, "not_found", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
// triggerJobs creates jobs for all enabled definitions matching the trigger type.
|
||||
// It applies trigger filters (e.g. item_type) before creating each job.
|
||||
func (s *Server) triggerJobs(ctx context.Context, triggerType string, itemID string, item *db.Item) {
|
||||
defs, err := s.jobs.GetDefinitionsByTrigger(ctx, triggerType)
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Str("trigger", triggerType).Msg("failed to get job definitions for trigger")
|
||||
return
|
||||
}
|
||||
|
||||
for _, def := range defs {
|
||||
// Apply trigger filter (e.g. item_type == "assembly")
|
||||
if def.Definition != nil {
|
||||
if triggerCfg, ok := def.Definition["trigger"].(map[string]any); ok {
|
||||
if filterCfg, ok := triggerCfg["filter"].(map[string]any); ok {
|
||||
if reqType, ok := filterCfg["item_type"].(string); ok && item != nil {
|
||||
if item.ItemType != reqType {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
job := &db.Job{
|
||||
JobDefinitionID: &def.ID,
|
||||
DefinitionName: def.Name,
|
||||
Priority: def.Priority,
|
||||
ItemID: &itemID,
|
||||
RunnerTags: def.RunnerTags,
|
||||
TimeoutSeconds: def.TimeoutSeconds,
|
||||
MaxRetries: def.MaxRetries,
|
||||
}
|
||||
|
||||
if err := s.jobs.CreateJob(ctx, job); err != nil {
|
||||
s.logger.Error().Err(err).Str("definition", def.Name).Msg("failed to create triggered job")
|
||||
continue
|
||||
}
|
||||
|
||||
s.broker.Publish("job.created", mustMarshal(map[string]any{
|
||||
"job_id": job.ID,
|
||||
"definition_name": def.Name,
|
||||
"trigger": triggerType,
|
||||
"item_id": itemID,
|
||||
}))
|
||||
|
||||
s.logger.Info().
|
||||
Str("job_id", job.ID).
|
||||
Str("definition", def.Name).
|
||||
Str("trigger", triggerType).
|
||||
Str("item_id", itemID).
|
||||
Msg("triggered job")
|
||||
}
|
||||
}
|
||||
@@ -1,338 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/kindredsystems/silo/internal/db"
|
||||
"github.com/kindredsystems/silo/internal/schema"
|
||||
"github.com/kindredsystems/silo/internal/testutil"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
func newJobTestServer(t *testing.T) *Server {
|
||||
t.Helper()
|
||||
pool := testutil.MustConnectTestPool(t)
|
||||
database := db.NewFromPool(pool)
|
||||
broker := NewBroker(zerolog.Nop())
|
||||
state := NewServerState(zerolog.Nop(), nil, broker)
|
||||
return NewServer(
|
||||
zerolog.Nop(),
|
||||
database,
|
||||
map[string]*schema.Schema{},
|
||||
"",
|
||||
nil, nil, nil, nil, nil,
|
||||
broker, state,
|
||||
nil, "",
|
||||
)
|
||||
}
|
||||
|
||||
func newJobRouter(s *Server) http.Handler {
|
||||
r := chi.NewRouter()
|
||||
r.Route("/api/jobs", func(r chi.Router) {
|
||||
r.Get("/", s.HandleListJobs)
|
||||
r.Get("/{jobID}", s.HandleGetJob)
|
||||
r.Get("/{jobID}/logs", s.HandleGetJobLogs)
|
||||
r.Post("/", s.HandleCreateJob)
|
||||
r.Post("/{jobID}/cancel", s.HandleCancelJob)
|
||||
})
|
||||
r.Route("/api/job-definitions", func(r chi.Router) {
|
||||
r.Get("/", s.HandleListJobDefinitions)
|
||||
r.Get("/{name}", s.HandleGetJobDefinition)
|
||||
})
|
||||
r.Route("/api/runners", func(r chi.Router) {
|
||||
r.Get("/", s.HandleListRunners)
|
||||
r.Post("/", s.HandleRegisterRunner)
|
||||
r.Delete("/{runnerID}", s.HandleDeleteRunner)
|
||||
})
|
||||
return r
|
||||
}
|
||||
|
||||
func seedJobDefinition(t *testing.T, s *Server) *db.JobDefinitionRecord {
|
||||
t.Helper()
|
||||
rec := &db.JobDefinitionRecord{
|
||||
Name: "test-validate",
|
||||
Version: 1,
|
||||
TriggerType: "manual",
|
||||
ScopeType: "item",
|
||||
ComputeType: "validate",
|
||||
RunnerTags: []string{"create"},
|
||||
TimeoutSeconds: 300,
|
||||
MaxRetries: 1,
|
||||
Priority: 100,
|
||||
Definition: map[string]any{"compute": map[string]any{"command": "create-validate"}},
|
||||
Enabled: true,
|
||||
}
|
||||
if err := s.jobs.UpsertDefinition(context.Background(), rec); err != nil {
|
||||
t.Fatalf("seeding job definition: %v", err)
|
||||
}
|
||||
return rec
|
||||
}
|
||||
|
||||
func TestHandleListJobDefinitions(t *testing.T) {
|
||||
s := newJobTestServer(t)
|
||||
r := newJobRouter(s)
|
||||
|
||||
seedJobDefinition(t, s)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/job-definitions", nil)
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
var defs []map[string]any
|
||||
json.Unmarshal(w.Body.Bytes(), &defs)
|
||||
if len(defs) == 0 {
|
||||
t.Error("expected at least one definition")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleGetJobDefinition(t *testing.T) {
|
||||
s := newJobTestServer(t)
|
||||
r := newJobRouter(s)
|
||||
|
||||
seedJobDefinition(t, s)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/job-definitions/test-validate", nil)
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
var def map[string]any
|
||||
json.Unmarshal(w.Body.Bytes(), &def)
|
||||
if def["name"] != "test-validate" {
|
||||
t.Errorf("expected name test-validate, got %v", def["name"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleCreateAndGetJob(t *testing.T) {
|
||||
s := newJobTestServer(t)
|
||||
r := newJobRouter(s)
|
||||
|
||||
seedJobDefinition(t, s)
|
||||
|
||||
// Create a job
|
||||
body := `{"definition_name": "test-validate"}`
|
||||
req := httptest.NewRequest("POST", "/api/jobs", strings.NewReader(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusCreated {
|
||||
t.Fatalf("create: expected 201, got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
var job map[string]any
|
||||
json.Unmarshal(w.Body.Bytes(), &job)
|
||||
jobID := job["ID"].(string)
|
||||
if jobID == "" {
|
||||
t.Fatal("job ID is empty")
|
||||
}
|
||||
|
||||
// Get the job
|
||||
req2 := httptest.NewRequest("GET", "/api/jobs/"+jobID, nil)
|
||||
w2 := httptest.NewRecorder()
|
||||
r.ServeHTTP(w2, req2)
|
||||
|
||||
if w2.Code != http.StatusOK {
|
||||
t.Fatalf("get: expected 200, got %d: %s", w2.Code, w2.Body.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleCancelJob(t *testing.T) {
|
||||
s := newJobTestServer(t)
|
||||
r := newJobRouter(s)
|
||||
|
||||
seedJobDefinition(t, s)
|
||||
|
||||
// Create a job
|
||||
body := `{"definition_name": "test-validate"}`
|
||||
req := httptest.NewRequest("POST", "/api/jobs", strings.NewReader(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
var job map[string]any
|
||||
json.Unmarshal(w.Body.Bytes(), &job)
|
||||
jobID := job["ID"].(string)
|
||||
|
||||
// Cancel the job
|
||||
req2 := httptest.NewRequest("POST", "/api/jobs/"+jobID+"/cancel", nil)
|
||||
w2 := httptest.NewRecorder()
|
||||
r.ServeHTTP(w2, req2)
|
||||
|
||||
if w2.Code != http.StatusOK {
|
||||
t.Fatalf("cancel: expected 200, got %d: %s", w2.Code, w2.Body.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleListJobs(t *testing.T) {
|
||||
s := newJobTestServer(t)
|
||||
r := newJobRouter(s)
|
||||
|
||||
seedJobDefinition(t, s)
|
||||
|
||||
// Create a job
|
||||
body := `{"definition_name": "test-validate"}`
|
||||
req := httptest.NewRequest("POST", "/api/jobs", strings.NewReader(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
// List jobs
|
||||
req2 := httptest.NewRequest("GET", "/api/jobs", nil)
|
||||
w2 := httptest.NewRecorder()
|
||||
r.ServeHTTP(w2, req2)
|
||||
|
||||
if w2.Code != http.StatusOK {
|
||||
t.Fatalf("list: expected 200, got %d: %s", w2.Code, w2.Body.String())
|
||||
}
|
||||
|
||||
var jobs []map[string]any
|
||||
json.Unmarshal(w2.Body.Bytes(), &jobs)
|
||||
if len(jobs) == 0 {
|
||||
t.Error("expected at least one job")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleListJobs_FilterByStatus(t *testing.T) {
|
||||
s := newJobTestServer(t)
|
||||
r := newJobRouter(s)
|
||||
|
||||
seedJobDefinition(t, s)
|
||||
|
||||
// Create a job
|
||||
body := `{"definition_name": "test-validate"}`
|
||||
req := httptest.NewRequest("POST", "/api/jobs", strings.NewReader(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
// Filter by pending
|
||||
req2 := httptest.NewRequest("GET", "/api/jobs?status=pending", nil)
|
||||
w2 := httptest.NewRecorder()
|
||||
r.ServeHTTP(w2, req2)
|
||||
|
||||
if w2.Code != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d", w2.Code)
|
||||
}
|
||||
|
||||
var jobs []map[string]any
|
||||
json.Unmarshal(w2.Body.Bytes(), &jobs)
|
||||
if len(jobs) == 0 {
|
||||
t.Error("expected pending jobs")
|
||||
}
|
||||
|
||||
// Filter by completed (should be empty)
|
||||
req3 := httptest.NewRequest("GET", "/api/jobs?status=completed", nil)
|
||||
w3 := httptest.NewRecorder()
|
||||
r.ServeHTTP(w3, req3)
|
||||
|
||||
var completedJobs []map[string]any
|
||||
json.Unmarshal(w3.Body.Bytes(), &completedJobs)
|
||||
if len(completedJobs) != 0 {
|
||||
t.Errorf("expected no completed jobs, got %d", len(completedJobs))
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleRegisterAndListRunners(t *testing.T) {
|
||||
s := newJobTestServer(t)
|
||||
r := newJobRouter(s)
|
||||
|
||||
// Register a runner
|
||||
body := `{"name": "test-runner-1", "tags": ["create", "linux"]}`
|
||||
req := httptest.NewRequest("POST", "/api/runners", strings.NewReader(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusCreated {
|
||||
t.Fatalf("register: expected 201, got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
var resp map[string]any
|
||||
json.Unmarshal(w.Body.Bytes(), &resp)
|
||||
if resp["token"] == nil || resp["token"] == "" {
|
||||
t.Error("expected a token in response")
|
||||
}
|
||||
if !strings.HasPrefix(resp["token"].(string), "silo_runner_") {
|
||||
t.Errorf("expected token to start with silo_runner_, got %s", resp["token"])
|
||||
}
|
||||
|
||||
// List runners
|
||||
req2 := httptest.NewRequest("GET", "/api/runners", nil)
|
||||
w2 := httptest.NewRecorder()
|
||||
r.ServeHTTP(w2, req2)
|
||||
|
||||
if w2.Code != http.StatusOK {
|
||||
t.Fatalf("list: expected 200, got %d", w2.Code)
|
||||
}
|
||||
|
||||
var runners []map[string]any
|
||||
json.Unmarshal(w2.Body.Bytes(), &runners)
|
||||
if len(runners) == 0 {
|
||||
t.Error("expected at least one runner")
|
||||
}
|
||||
// Token hash should not be exposed
|
||||
for _, runner := range runners {
|
||||
if runner["token_hash"] != nil {
|
||||
t.Error("token_hash should not be in response")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleDeleteRunner(t *testing.T) {
|
||||
s := newJobTestServer(t)
|
||||
r := newJobRouter(s)
|
||||
|
||||
// Register a runner
|
||||
body := `{"name": "test-runner-delete", "tags": ["create"]}`
|
||||
req := httptest.NewRequest("POST", "/api/runners", strings.NewReader(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
var resp map[string]any
|
||||
json.Unmarshal(w.Body.Bytes(), &resp)
|
||||
runnerID := resp["id"].(string)
|
||||
|
||||
// Delete the runner
|
||||
req2 := httptest.NewRequest("DELETE", "/api/runners/"+runnerID, nil)
|
||||
w2 := httptest.NewRecorder()
|
||||
r.ServeHTTP(w2, req2)
|
||||
|
||||
if w2.Code != http.StatusNoContent {
|
||||
t.Fatalf("delete: expected 204, got %d: %s", w2.Code, w2.Body.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateRunnerToken(t *testing.T) {
|
||||
raw, hash, prefix := generateRunnerToken()
|
||||
|
||||
if !strings.HasPrefix(raw, "silo_runner_") {
|
||||
t.Errorf("raw token should start with silo_runner_, got %s", raw[:20])
|
||||
}
|
||||
if len(hash) != 64 {
|
||||
t.Errorf("hash should be 64 hex chars, got %d", len(hash))
|
||||
}
|
||||
if len(prefix) != 20 {
|
||||
t.Errorf("prefix should be 20 chars, got %d: %s", len(prefix), prefix)
|
||||
}
|
||||
|
||||
// Two tokens should be different
|
||||
raw2, _, _ := generateRunnerToken()
|
||||
if raw == raw2 {
|
||||
t.Error("two generated tokens should be different")
|
||||
}
|
||||
}
|
||||
@@ -2,8 +2,6 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -150,39 +148,6 @@ func (s *Server) RequireWritable(next http.Handler) http.Handler {
|
||||
})
|
||||
}
|
||||
|
||||
// RequireRunnerAuth extracts and validates a runner token from the
|
||||
// Authorization header. On success, injects RunnerIdentity into context
|
||||
// and updates the runner's heartbeat.
|
||||
func (s *Server) RequireRunnerAuth(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
token := extractBearerToken(r)
|
||||
if token == "" || !strings.HasPrefix(token, "silo_runner_") {
|
||||
writeError(w, http.StatusUnauthorized, "unauthorized", "Runner token required")
|
||||
return
|
||||
}
|
||||
|
||||
hash := sha256.Sum256([]byte(token))
|
||||
tokenHash := hex.EncodeToString(hash[:])
|
||||
|
||||
runner, err := s.jobs.GetRunnerByToken(r.Context(), tokenHash)
|
||||
if err != nil || runner == nil {
|
||||
writeError(w, http.StatusUnauthorized, "unauthorized", "Invalid runner token")
|
||||
return
|
||||
}
|
||||
|
||||
// Update heartbeat on every authenticated request
|
||||
_ = s.jobs.Heartbeat(r.Context(), runner.ID)
|
||||
|
||||
identity := &auth.RunnerIdentity{
|
||||
ID: runner.ID,
|
||||
Name: runner.Name,
|
||||
Tags: runner.Tags,
|
||||
}
|
||||
ctx := auth.ContextWithRunner(r.Context(), identity)
|
||||
next.ServeHTTP(w, r.WithContext(ctx))
|
||||
})
|
||||
}
|
||||
|
||||
func extractBearerToken(r *http.Request) string {
|
||||
h := r.Header.Get("Authorization")
|
||||
if strings.HasPrefix(h, "Bearer ") {
|
||||
|
||||
@@ -150,11 +150,6 @@ func NewRouter(server *Server, logger zerolog.Logger) http.Handler {
|
||||
r.Get("/bom/export.csv", server.HandleExportBOMCSV)
|
||||
r.Get("/bom/export.ods", server.HandleExportBOMODS)
|
||||
|
||||
// DAG (read: viewer, write: editor)
|
||||
r.Get("/dag", server.HandleGetDAG)
|
||||
r.Get("/dag/forward-cone/{nodeKey}", server.HandleGetForwardCone)
|
||||
r.Get("/dag/dirty", server.HandleGetDirtySubgraph)
|
||||
|
||||
r.Group(func(r chi.Router) {
|
||||
r.Use(server.RequireWritable)
|
||||
r.Use(server.RequireRole(auth.RoleEditor))
|
||||
@@ -174,8 +169,6 @@ func NewRouter(server *Server, logger zerolog.Logger) http.Handler {
|
||||
r.Post("/bom/merge", server.HandleMergeBOM)
|
||||
r.Put("/bom/{childPartNumber}", server.HandleUpdateBOMEntry)
|
||||
r.Delete("/bom/{childPartNumber}", server.HandleDeleteBOMEntry)
|
||||
r.Put("/dag", server.HandleSyncDAG)
|
||||
r.Post("/dag/mark-dirty/{nodeKey}", server.HandleMarkDirty)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -208,39 +201,6 @@ func NewRouter(server *Server, logger zerolog.Logger) http.Handler {
|
||||
r.Post("/sheets/diff", server.HandleSheetDiff)
|
||||
})
|
||||
|
||||
// Jobs (read: viewer, write: editor)
|
||||
r.Route("/jobs", func(r chi.Router) {
|
||||
r.Get("/", server.HandleListJobs)
|
||||
r.Get("/{jobID}", server.HandleGetJob)
|
||||
r.Get("/{jobID}/logs", server.HandleGetJobLogs)
|
||||
|
||||
r.Group(func(r chi.Router) {
|
||||
r.Use(server.RequireWritable)
|
||||
r.Use(server.RequireRole(auth.RoleEditor))
|
||||
r.Post("/", server.HandleCreateJob)
|
||||
r.Post("/{jobID}/cancel", server.HandleCancelJob)
|
||||
})
|
||||
})
|
||||
|
||||
// Job definitions (read: viewer, reload: admin)
|
||||
r.Route("/job-definitions", func(r chi.Router) {
|
||||
r.Get("/", server.HandleListJobDefinitions)
|
||||
r.Get("/{name}", server.HandleGetJobDefinition)
|
||||
|
||||
r.Group(func(r chi.Router) {
|
||||
r.Use(server.RequireRole(auth.RoleAdmin))
|
||||
r.Post("/reload", server.HandleReloadJobDefinitions)
|
||||
})
|
||||
})
|
||||
|
||||
// Runners (admin)
|
||||
r.Route("/runners", func(r chi.Router) {
|
||||
r.Use(server.RequireRole(auth.RoleAdmin))
|
||||
r.Get("/", server.HandleListRunners)
|
||||
r.Post("/", server.HandleRegisterRunner)
|
||||
r.Delete("/{runnerID}", server.HandleDeleteRunner)
|
||||
})
|
||||
|
||||
// Part number generation (editor)
|
||||
r.Group(func(r chi.Router) {
|
||||
r.Use(server.RequireWritable)
|
||||
@@ -249,19 +209,6 @@ func NewRouter(server *Server, logger zerolog.Logger) http.Handler {
|
||||
})
|
||||
})
|
||||
|
||||
// Runner-facing API (runner token auth, not user auth)
|
||||
r.Route("/api/runner", func(r chi.Router) {
|
||||
r.Use(server.RequireRunnerAuth)
|
||||
r.Post("/heartbeat", server.HandleRunnerHeartbeat)
|
||||
r.Post("/claim", server.HandleRunnerClaim)
|
||||
r.Post("/jobs/{jobID}/start", server.HandleRunnerStartJob)
|
||||
r.Put("/jobs/{jobID}/progress", server.HandleRunnerUpdateProgress)
|
||||
r.Post("/jobs/{jobID}/complete", server.HandleRunnerCompleteJob)
|
||||
r.Post("/jobs/{jobID}/fail", server.HandleRunnerFailJob)
|
||||
r.Post("/jobs/{jobID}/log", server.HandleRunnerAppendLog)
|
||||
r.Put("/jobs/{jobID}/dag", server.HandleRunnerSyncDAG)
|
||||
})
|
||||
|
||||
// React SPA — serve from web/dist at root, fallback to index.html
|
||||
if info, err := os.Stat("web/dist"); err == nil && info.IsDir() {
|
||||
spa := http.FileServerFS(os.DirFS("web/dist"))
|
||||
|
||||
@@ -1,385 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/kindredsystems/silo/internal/auth"
|
||||
"github.com/kindredsystems/silo/internal/db"
|
||||
"github.com/kindredsystems/silo/internal/jobdef"
|
||||
)
|
||||
|
||||
// HandleRunnerHeartbeat updates the runner's heartbeat timestamp.
|
||||
func (s *Server) HandleRunnerHeartbeat(w http.ResponseWriter, r *http.Request) {
|
||||
runner := auth.RunnerFromContext(r.Context())
|
||||
if runner == nil {
|
||||
writeError(w, http.StatusUnauthorized, "unauthorized", "Runner identity required")
|
||||
return
|
||||
}
|
||||
|
||||
// Heartbeat already updated by RequireRunnerAuth middleware
|
||||
writeJSON(w, http.StatusOK, map[string]string{"status": "ok"})
|
||||
}
|
||||
|
||||
// HandleRunnerClaim claims the next available job matching the runner's tags.
|
||||
func (s *Server) HandleRunnerClaim(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
runner := auth.RunnerFromContext(ctx)
|
||||
if runner == nil {
|
||||
writeError(w, http.StatusUnauthorized, "unauthorized", "Runner identity required")
|
||||
return
|
||||
}
|
||||
|
||||
job, err := s.jobs.ClaimJob(ctx, runner.ID, runner.Tags)
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to claim job")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to claim job")
|
||||
return
|
||||
}
|
||||
if job == nil {
|
||||
writeJSON(w, http.StatusNoContent, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// Look up the full definition to send to the runner
|
||||
var defPayload map[string]any
|
||||
if job.JobDefinitionID != nil {
|
||||
rec, err := s.jobs.GetDefinitionByID(ctx, *job.JobDefinitionID)
|
||||
if err == nil && rec != nil {
|
||||
defPayload = rec.Definition
|
||||
}
|
||||
}
|
||||
|
||||
s.broker.Publish("job.claimed", mustMarshal(map[string]any{
|
||||
"job_id": job.ID,
|
||||
"runner_id": runner.ID,
|
||||
"runner": runner.Name,
|
||||
}))
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]any{
|
||||
"job": job,
|
||||
"definition": defPayload,
|
||||
})
|
||||
}
|
||||
|
||||
// HandleRunnerStartJob transitions a claimed job to running.
|
||||
func (s *Server) HandleRunnerStartJob(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
runner := auth.RunnerFromContext(ctx)
|
||||
if runner == nil {
|
||||
writeError(w, http.StatusUnauthorized, "unauthorized", "Runner identity required")
|
||||
return
|
||||
}
|
||||
|
||||
jobID := chi.URLParam(r, "jobID")
|
||||
if err := s.jobs.StartJob(ctx, jobID, runner.ID); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "start_failed", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]string{"status": "running"})
|
||||
}
|
||||
|
||||
// HandleRunnerUpdateProgress updates a running job's progress.
|
||||
func (s *Server) HandleRunnerUpdateProgress(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
runner := auth.RunnerFromContext(ctx)
|
||||
if runner == nil {
|
||||
writeError(w, http.StatusUnauthorized, "unauthorized", "Runner identity required")
|
||||
return
|
||||
}
|
||||
|
||||
jobID := chi.URLParam(r, "jobID")
|
||||
var req struct {
|
||||
Progress int `json:"progress"`
|
||||
Message string `json:"message,omitempty"`
|
||||
}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "invalid_body", "Invalid JSON body")
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.jobs.UpdateProgress(ctx, jobID, runner.ID, req.Progress, req.Message); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "update_failed", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
s.broker.Publish("job.progress", mustMarshal(map[string]any{
|
||||
"job_id": jobID,
|
||||
"progress": req.Progress,
|
||||
"message": req.Message,
|
||||
}))
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]string{"status": "ok"})
|
||||
}
|
||||
|
||||
// HandleRunnerCompleteJob marks a job as completed.
|
||||
func (s *Server) HandleRunnerCompleteJob(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
runner := auth.RunnerFromContext(ctx)
|
||||
if runner == nil {
|
||||
writeError(w, http.StatusUnauthorized, "unauthorized", "Runner identity required")
|
||||
return
|
||||
}
|
||||
|
||||
jobID := chi.URLParam(r, "jobID")
|
||||
var req struct {
|
||||
Result map[string]any `json:"result,omitempty"`
|
||||
}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "invalid_body", "Invalid JSON body")
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.jobs.CompleteJob(ctx, jobID, runner.ID, req.Result); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "complete_failed", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
s.broker.Publish("job.completed", mustMarshal(map[string]any{
|
||||
"job_id": jobID,
|
||||
"runner_id": runner.ID,
|
||||
}))
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]string{"status": "completed"})
|
||||
}
|
||||
|
||||
// HandleRunnerFailJob marks a job as failed.
|
||||
func (s *Server) HandleRunnerFailJob(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
runner := auth.RunnerFromContext(ctx)
|
||||
if runner == nil {
|
||||
writeError(w, http.StatusUnauthorized, "unauthorized", "Runner identity required")
|
||||
return
|
||||
}
|
||||
|
||||
jobID := chi.URLParam(r, "jobID")
|
||||
var req struct {
|
||||
Error string `json:"error"`
|
||||
}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "invalid_body", "Invalid JSON body")
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.jobs.FailJob(ctx, jobID, runner.ID, req.Error); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "fail_failed", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
s.broker.Publish("job.failed", mustMarshal(map[string]any{
|
||||
"job_id": jobID,
|
||||
"runner_id": runner.ID,
|
||||
"error": req.Error,
|
||||
}))
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]string{"status": "failed"})
|
||||
}
|
||||
|
||||
// HandleRunnerAppendLog appends a log entry to a job.
|
||||
func (s *Server) HandleRunnerAppendLog(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
runner := auth.RunnerFromContext(ctx)
|
||||
if runner == nil {
|
||||
writeError(w, http.StatusUnauthorized, "unauthorized", "Runner identity required")
|
||||
return
|
||||
}
|
||||
|
||||
jobID := chi.URLParam(r, "jobID")
|
||||
var req struct {
|
||||
Level string `json:"level"`
|
||||
Message string `json:"message"`
|
||||
Metadata map[string]any `json:"metadata,omitempty"`
|
||||
}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "invalid_body", "Invalid JSON body")
|
||||
return
|
||||
}
|
||||
|
||||
if req.Level == "" {
|
||||
req.Level = "info"
|
||||
}
|
||||
|
||||
entry := &db.JobLogEntry{
|
||||
JobID: jobID,
|
||||
Level: req.Level,
|
||||
Message: req.Message,
|
||||
Metadata: req.Metadata,
|
||||
}
|
||||
if err := s.jobs.AppendLog(ctx, entry); err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to append job log")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to append log")
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusCreated, entry)
|
||||
}
|
||||
|
||||
// HandleRunnerSyncDAG allows a runner to push DAG results for a job's item.
|
||||
func (s *Server) HandleRunnerSyncDAG(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
runner := auth.RunnerFromContext(ctx)
|
||||
if runner == nil {
|
||||
writeError(w, http.StatusUnauthorized, "unauthorized", "Runner identity required")
|
||||
return
|
||||
}
|
||||
|
||||
jobID := chi.URLParam(r, "jobID")
|
||||
|
||||
// Get the job to find the item
|
||||
job, err := s.jobs.GetJob(ctx, jobID)
|
||||
if err != nil || job == nil {
|
||||
writeError(w, http.StatusNotFound, "not_found", "Job not found")
|
||||
return
|
||||
}
|
||||
if job.ItemID == nil {
|
||||
writeError(w, http.StatusBadRequest, "no_item", "Job has no associated item")
|
||||
return
|
||||
}
|
||||
|
||||
// Delegate to the DAG sync handler logic
|
||||
var req dagSyncRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "invalid_body", "Invalid JSON body")
|
||||
return
|
||||
}
|
||||
|
||||
if req.RevisionNumber == 0 {
|
||||
// Look up current revision
|
||||
item, err := s.items.GetByID(ctx, *job.ItemID)
|
||||
if err != nil || item == nil {
|
||||
writeError(w, http.StatusNotFound, "not_found", "Item not found")
|
||||
return
|
||||
}
|
||||
req.RevisionNumber = item.CurrentRevision
|
||||
}
|
||||
|
||||
// Convert and sync nodes
|
||||
nodes := make([]db.DAGNode, len(req.Nodes))
|
||||
for i, n := range req.Nodes {
|
||||
state := n.ValidationState
|
||||
if state == "" {
|
||||
state = "clean"
|
||||
}
|
||||
nodes[i] = db.DAGNode{
|
||||
NodeKey: n.NodeKey,
|
||||
NodeType: n.NodeType,
|
||||
PropertiesHash: n.PropertiesHash,
|
||||
ValidationState: state,
|
||||
Metadata: n.Metadata,
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.dag.SyncFeatureTree(ctx, *job.ItemID, req.RevisionNumber, nodes, nil); err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to sync DAG from runner")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to sync DAG")
|
||||
return
|
||||
}
|
||||
|
||||
// Build key→ID map and sync edges
|
||||
keyToID := make(map[string]string, len(nodes))
|
||||
for _, n := range nodes {
|
||||
keyToID[n.NodeKey] = n.ID
|
||||
}
|
||||
|
||||
if len(req.Edges) > 0 {
|
||||
if err := s.dag.DeleteEdgesForItem(ctx, *job.ItemID, req.RevisionNumber); err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to delete old edges")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to sync DAG edges")
|
||||
return
|
||||
}
|
||||
for _, e := range req.Edges {
|
||||
sourceID, ok := keyToID[e.SourceKey]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
targetID, ok := keyToID[e.TargetKey]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
edgeType := e.EdgeType
|
||||
if edgeType == "" {
|
||||
edgeType = "depends_on"
|
||||
}
|
||||
edge := &db.DAGEdge{
|
||||
SourceNodeID: sourceID,
|
||||
TargetNodeID: targetID,
|
||||
EdgeType: edgeType,
|
||||
Metadata: e.Metadata,
|
||||
}
|
||||
if err := s.dag.CreateEdge(ctx, edge); err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to create edge from runner")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s.broker.Publish("dag.updated", mustMarshal(map[string]any{
|
||||
"item_id": *job.ItemID,
|
||||
"job_id": jobID,
|
||||
"runner": runner.Name,
|
||||
"node_count": len(req.Nodes),
|
||||
"edge_count": len(req.Edges),
|
||||
}))
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]any{
|
||||
"synced": true,
|
||||
"node_count": len(req.Nodes),
|
||||
"edge_count": len(req.Edges),
|
||||
})
|
||||
}
|
||||
|
||||
// generateRunnerToken creates a new runner token. Returns raw token, hash, and prefix.
|
||||
func generateRunnerToken() (raw, hash, prefix string) {
|
||||
rawBytes := make([]byte, 32)
|
||||
if _, err := rand.Read(rawBytes); err != nil {
|
||||
panic(fmt.Sprintf("generating random bytes: %v", err))
|
||||
}
|
||||
|
||||
raw = "silo_runner_" + hex.EncodeToString(rawBytes)
|
||||
|
||||
h := sha256.Sum256([]byte(raw))
|
||||
hash = hex.EncodeToString(h[:])
|
||||
|
||||
prefix = raw[:20] // "silo_runner_" + first 8 hex chars
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// loadAndUpsertJobDefs loads YAML definitions from a directory and upserts them into the database.
|
||||
func loadAndUpsertJobDefs(ctx context.Context, dir string, repo *db.JobRepository) (map[string]*jobdef.Definition, error) {
|
||||
defs, err := jobdef.LoadAll(dir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("loading job definitions: %w", err)
|
||||
}
|
||||
|
||||
for _, def := range defs {
|
||||
defJSON, _ := json.Marshal(def)
|
||||
var defMap map[string]any
|
||||
json.Unmarshal(defJSON, &defMap)
|
||||
|
||||
rec := &db.JobDefinitionRecord{
|
||||
Name: def.Name,
|
||||
Version: def.Version,
|
||||
TriggerType: def.Trigger.Type,
|
||||
ScopeType: def.Scope.Type,
|
||||
ComputeType: def.Compute.Type,
|
||||
RunnerTags: def.Runner.Tags,
|
||||
TimeoutSeconds: def.Timeout,
|
||||
MaxRetries: def.MaxRetries,
|
||||
Priority: def.Priority,
|
||||
Definition: defMap,
|
||||
Enabled: true,
|
||||
}
|
||||
if err := repo.UpsertDefinition(ctx, rec); err != nil {
|
||||
return nil, fmt.Errorf("upserting definition %s: %w", def.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
return defs, nil
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
package auth
|
||||
|
||||
import "context"
|
||||
|
||||
const runnerContextKey contextKey = iota + 1
|
||||
|
||||
// RunnerIdentity represents an authenticated runner in the request context.
|
||||
type RunnerIdentity struct {
|
||||
ID string
|
||||
Name string
|
||||
Tags []string
|
||||
}
|
||||
|
||||
// RunnerFromContext extracts the authenticated runner from the request context.
|
||||
// Returns nil if no runner is present.
|
||||
func RunnerFromContext(ctx context.Context) *RunnerIdentity {
|
||||
r, _ := ctx.Value(runnerContextKey).(*RunnerIdentity)
|
||||
return r
|
||||
}
|
||||
|
||||
// ContextWithRunner returns a new context carrying the given runner identity.
|
||||
func ContextWithRunner(ctx context.Context, r *RunnerIdentity) context.Context {
|
||||
return context.WithValue(ctx, runnerContextKey, r)
|
||||
}
|
||||
@@ -17,7 +17,6 @@ type Config struct {
|
||||
FreeCAD FreeCADConfig `yaml:"freecad"`
|
||||
Odoo OdooConfig `yaml:"odoo"`
|
||||
Auth AuthConfig `yaml:"auth"`
|
||||
Jobs JobsConfig `yaml:"jobs"`
|
||||
}
|
||||
|
||||
// AuthConfig holds authentication and authorization settings.
|
||||
@@ -112,14 +111,6 @@ type FreeCADConfig struct {
|
||||
Executable string `yaml:"executable"`
|
||||
}
|
||||
|
||||
// JobsConfig holds worker/runner system settings.
|
||||
type JobsConfig struct {
|
||||
Directory string `yaml:"directory"` // default /etc/silo/jobdefs
|
||||
RunnerTimeout int `yaml:"runner_timeout"` // seconds, default 90
|
||||
JobTimeoutCheck int `yaml:"job_timeout_check"` // seconds, default 30
|
||||
DefaultPriority int `yaml:"default_priority"` // default 100
|
||||
}
|
||||
|
||||
// OdooConfig holds Odoo ERP integration settings.
|
||||
type OdooConfig struct {
|
||||
Enabled bool `yaml:"enabled"`
|
||||
@@ -166,18 +157,6 @@ func Load(path string) (*Config, error) {
|
||||
if cfg.FreeCAD.URIScheme == "" {
|
||||
cfg.FreeCAD.URIScheme = "silo"
|
||||
}
|
||||
if cfg.Jobs.Directory == "" {
|
||||
cfg.Jobs.Directory = "/etc/silo/jobdefs"
|
||||
}
|
||||
if cfg.Jobs.RunnerTimeout == 0 {
|
||||
cfg.Jobs.RunnerTimeout = 90
|
||||
}
|
||||
if cfg.Jobs.JobTimeoutCheck == 0 {
|
||||
cfg.Jobs.JobTimeoutCheck = 30
|
||||
}
|
||||
if cfg.Jobs.DefaultPriority == 0 {
|
||||
cfg.Jobs.DefaultPriority = 100
|
||||
}
|
||||
|
||||
// Override with environment variables
|
||||
if v := os.Getenv("SILO_DB_HOST"); v != "" {
|
||||
|
||||
@@ -1,520 +0,0 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
)
|
||||
|
||||
// DAGNode represents a feature-level node in the dependency graph.
|
||||
type DAGNode struct {
|
||||
ID string
|
||||
ItemID string
|
||||
RevisionNumber int
|
||||
NodeKey string
|
||||
NodeType string
|
||||
PropertiesHash *string
|
||||
ValidationState string
|
||||
ValidationMsg *string
|
||||
Metadata map[string]any
|
||||
CreatedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
}
|
||||
|
||||
// DAGEdge represents a dependency between two nodes.
|
||||
type DAGEdge struct {
|
||||
ID string
|
||||
SourceNodeID string
|
||||
TargetNodeID string
|
||||
EdgeType string
|
||||
Metadata map[string]any
|
||||
}
|
||||
|
||||
// DAGCrossEdge represents a dependency between nodes in different items.
|
||||
type DAGCrossEdge struct {
|
||||
ID string
|
||||
SourceNodeID string
|
||||
TargetNodeID string
|
||||
RelationshipID *string
|
||||
EdgeType string
|
||||
Metadata map[string]any
|
||||
}
|
||||
|
||||
// DAGRepository provides dependency graph database operations.
|
||||
type DAGRepository struct {
|
||||
db *DB
|
||||
}
|
||||
|
||||
// NewDAGRepository creates a new DAG repository.
|
||||
func NewDAGRepository(db *DB) *DAGRepository {
|
||||
return &DAGRepository{db: db}
|
||||
}
|
||||
|
||||
// GetNodes returns all DAG nodes for an item at a specific revision.
|
||||
func (r *DAGRepository) GetNodes(ctx context.Context, itemID string, revisionNumber int) ([]*DAGNode, error) {
|
||||
rows, err := r.db.pool.Query(ctx, `
|
||||
SELECT id, item_id, revision_number, node_key, node_type,
|
||||
properties_hash, validation_state, validation_msg,
|
||||
metadata, created_at, updated_at
|
||||
FROM dag_nodes
|
||||
WHERE item_id = $1 AND revision_number = $2
|
||||
ORDER BY node_key
|
||||
`, itemID, revisionNumber)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying DAG nodes: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
return scanDAGNodes(rows)
|
||||
}
|
||||
|
||||
// GetNodeByKey returns a single DAG node by item, revision, and key.
|
||||
func (r *DAGRepository) GetNodeByKey(ctx context.Context, itemID string, revisionNumber int, nodeKey string) (*DAGNode, error) {
|
||||
n := &DAGNode{}
|
||||
var metadataJSON []byte
|
||||
err := r.db.pool.QueryRow(ctx, `
|
||||
SELECT id, item_id, revision_number, node_key, node_type,
|
||||
properties_hash, validation_state, validation_msg,
|
||||
metadata, created_at, updated_at
|
||||
FROM dag_nodes
|
||||
WHERE item_id = $1 AND revision_number = $2 AND node_key = $3
|
||||
`, itemID, revisionNumber, nodeKey).Scan(
|
||||
&n.ID, &n.ItemID, &n.RevisionNumber, &n.NodeKey, &n.NodeType,
|
||||
&n.PropertiesHash, &n.ValidationState, &n.ValidationMsg,
|
||||
&metadataJSON, &n.CreatedAt, &n.UpdatedAt,
|
||||
)
|
||||
if err == pgx.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying DAG node: %w", err)
|
||||
}
|
||||
if metadataJSON != nil {
|
||||
if err := json.Unmarshal(metadataJSON, &n.Metadata); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling node metadata: %w", err)
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// GetNodeByID returns a single DAG node by its ID.
|
||||
func (r *DAGRepository) GetNodeByID(ctx context.Context, nodeID string) (*DAGNode, error) {
|
||||
n := &DAGNode{}
|
||||
var metadataJSON []byte
|
||||
err := r.db.pool.QueryRow(ctx, `
|
||||
SELECT id, item_id, revision_number, node_key, node_type,
|
||||
properties_hash, validation_state, validation_msg,
|
||||
metadata, created_at, updated_at
|
||||
FROM dag_nodes
|
||||
WHERE id = $1
|
||||
`, nodeID).Scan(
|
||||
&n.ID, &n.ItemID, &n.RevisionNumber, &n.NodeKey, &n.NodeType,
|
||||
&n.PropertiesHash, &n.ValidationState, &n.ValidationMsg,
|
||||
&metadataJSON, &n.CreatedAt, &n.UpdatedAt,
|
||||
)
|
||||
if err == pgx.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying DAG node by ID: %w", err)
|
||||
}
|
||||
if metadataJSON != nil {
|
||||
if err := json.Unmarshal(metadataJSON, &n.Metadata); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling node metadata: %w", err)
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// UpsertNode inserts or updates a single DAG node.
|
||||
func (r *DAGRepository) UpsertNode(ctx context.Context, n *DAGNode) error {
|
||||
metadataJSON, err := json.Marshal(n.Metadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling metadata: %w", err)
|
||||
}
|
||||
|
||||
err = r.db.pool.QueryRow(ctx, `
|
||||
INSERT INTO dag_nodes (item_id, revision_number, node_key, node_type,
|
||||
properties_hash, validation_state, validation_msg, metadata)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
||||
ON CONFLICT (item_id, revision_number, node_key)
|
||||
DO UPDATE SET
|
||||
node_type = EXCLUDED.node_type,
|
||||
properties_hash = EXCLUDED.properties_hash,
|
||||
validation_state = EXCLUDED.validation_state,
|
||||
validation_msg = EXCLUDED.validation_msg,
|
||||
metadata = EXCLUDED.metadata,
|
||||
updated_at = now()
|
||||
RETURNING id, created_at, updated_at
|
||||
`, n.ItemID, n.RevisionNumber, n.NodeKey, n.NodeType,
|
||||
n.PropertiesHash, n.ValidationState, n.ValidationMsg, metadataJSON,
|
||||
).Scan(&n.ID, &n.CreatedAt, &n.UpdatedAt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("upserting DAG node: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetEdges returns all edges for nodes belonging to an item at a specific revision.
|
||||
func (r *DAGRepository) GetEdges(ctx context.Context, itemID string, revisionNumber int) ([]*DAGEdge, error) {
|
||||
rows, err := r.db.pool.Query(ctx, `
|
||||
SELECT e.id, e.source_node_id, e.target_node_id, e.edge_type, e.metadata
|
||||
FROM dag_edges e
|
||||
JOIN dag_nodes src ON src.id = e.source_node_id
|
||||
WHERE src.item_id = $1 AND src.revision_number = $2
|
||||
ORDER BY e.source_node_id, e.target_node_id
|
||||
`, itemID, revisionNumber)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying DAG edges: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var edges []*DAGEdge
|
||||
for rows.Next() {
|
||||
e := &DAGEdge{}
|
||||
var metadataJSON []byte
|
||||
if err := rows.Scan(&e.ID, &e.SourceNodeID, &e.TargetNodeID, &e.EdgeType, &metadataJSON); err != nil {
|
||||
return nil, fmt.Errorf("scanning DAG edge: %w", err)
|
||||
}
|
||||
if metadataJSON != nil {
|
||||
if err := json.Unmarshal(metadataJSON, &e.Metadata); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling edge metadata: %w", err)
|
||||
}
|
||||
}
|
||||
edges = append(edges, e)
|
||||
}
|
||||
return edges, rows.Err()
|
||||
}
|
||||
|
||||
// CreateEdge inserts a new edge between two nodes.
|
||||
func (r *DAGRepository) CreateEdge(ctx context.Context, e *DAGEdge) error {
|
||||
if e.EdgeType == "" {
|
||||
e.EdgeType = "depends_on"
|
||||
}
|
||||
metadataJSON, err := json.Marshal(e.Metadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling edge metadata: %w", err)
|
||||
}
|
||||
|
||||
err = r.db.pool.QueryRow(ctx, `
|
||||
INSERT INTO dag_edges (source_node_id, target_node_id, edge_type, metadata)
|
||||
VALUES ($1, $2, $3, $4)
|
||||
ON CONFLICT (source_node_id, target_node_id, edge_type) DO NOTHING
|
||||
RETURNING id
|
||||
`, e.SourceNodeID, e.TargetNodeID, e.EdgeType, metadataJSON).Scan(&e.ID)
|
||||
if err == pgx.ErrNoRows {
|
||||
// Edge already exists, not an error
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating DAG edge: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteEdgesForItem removes all edges for nodes belonging to an item/revision.
|
||||
func (r *DAGRepository) DeleteEdgesForItem(ctx context.Context, itemID string, revisionNumber int) error {
|
||||
_, err := r.db.pool.Exec(ctx, `
|
||||
DELETE FROM dag_edges
|
||||
WHERE source_node_id IN (
|
||||
SELECT id FROM dag_nodes WHERE item_id = $1 AND revision_number = $2
|
||||
)
|
||||
`, itemID, revisionNumber)
|
||||
if err != nil {
|
||||
return fmt.Errorf("deleting edges for item: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetForwardCone returns all downstream dependent nodes reachable from the
|
||||
// given node via edges. This is the key query for interference detection.
|
||||
func (r *DAGRepository) GetForwardCone(ctx context.Context, nodeID string) ([]*DAGNode, error) {
|
||||
rows, err := r.db.pool.Query(ctx, `
|
||||
WITH RECURSIVE forward_cone AS (
|
||||
SELECT target_node_id AS node_id
|
||||
FROM dag_edges
|
||||
WHERE source_node_id = $1
|
||||
UNION
|
||||
SELECT e.target_node_id
|
||||
FROM dag_edges e
|
||||
JOIN forward_cone fc ON fc.node_id = e.source_node_id
|
||||
)
|
||||
SELECT n.id, n.item_id, n.revision_number, n.node_key, n.node_type,
|
||||
n.properties_hash, n.validation_state, n.validation_msg,
|
||||
n.metadata, n.created_at, n.updated_at
|
||||
FROM dag_nodes n
|
||||
JOIN forward_cone fc ON n.id = fc.node_id
|
||||
ORDER BY n.node_key
|
||||
`, nodeID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying forward cone: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
return scanDAGNodes(rows)
|
||||
}
|
||||
|
||||
// GetBackwardCone returns all upstream dependency nodes that the given
|
||||
// node depends on.
|
||||
func (r *DAGRepository) GetBackwardCone(ctx context.Context, nodeID string) ([]*DAGNode, error) {
|
||||
rows, err := r.db.pool.Query(ctx, `
|
||||
WITH RECURSIVE backward_cone AS (
|
||||
SELECT source_node_id AS node_id
|
||||
FROM dag_edges
|
||||
WHERE target_node_id = $1
|
||||
UNION
|
||||
SELECT e.source_node_id
|
||||
FROM dag_edges e
|
||||
JOIN backward_cone bc ON bc.node_id = e.target_node_id
|
||||
)
|
||||
SELECT n.id, n.item_id, n.revision_number, n.node_key, n.node_type,
|
||||
n.properties_hash, n.validation_state, n.validation_msg,
|
||||
n.metadata, n.created_at, n.updated_at
|
||||
FROM dag_nodes n
|
||||
JOIN backward_cone bc ON n.id = bc.node_id
|
||||
ORDER BY n.node_key
|
||||
`, nodeID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying backward cone: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
return scanDAGNodes(rows)
|
||||
}
|
||||
|
||||
// GetDirtySubgraph returns all non-clean nodes for an item.
|
||||
func (r *DAGRepository) GetDirtySubgraph(ctx context.Context, itemID string) ([]*DAGNode, error) {
|
||||
rows, err := r.db.pool.Query(ctx, `
|
||||
SELECT id, item_id, revision_number, node_key, node_type,
|
||||
properties_hash, validation_state, validation_msg,
|
||||
metadata, created_at, updated_at
|
||||
FROM dag_nodes
|
||||
WHERE item_id = $1 AND validation_state != 'clean'
|
||||
ORDER BY node_key
|
||||
`, itemID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying dirty subgraph: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
return scanDAGNodes(rows)
|
||||
}
|
||||
|
||||
// MarkDirty marks a node and all its downstream dependents as dirty.
|
||||
func (r *DAGRepository) MarkDirty(ctx context.Context, nodeID string) (int64, error) {
|
||||
result, err := r.db.pool.Exec(ctx, `
|
||||
WITH RECURSIVE forward_cone AS (
|
||||
SELECT $1::uuid AS node_id
|
||||
UNION
|
||||
SELECT e.target_node_id
|
||||
FROM dag_edges e
|
||||
JOIN forward_cone fc ON fc.node_id = e.source_node_id
|
||||
)
|
||||
UPDATE dag_nodes SET validation_state = 'dirty', updated_at = now()
|
||||
WHERE id IN (SELECT node_id FROM forward_cone)
|
||||
AND validation_state = 'clean'
|
||||
`, nodeID)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("marking dirty: %w", err)
|
||||
}
|
||||
return result.RowsAffected(), nil
|
||||
}
|
||||
|
||||
// MarkValidating sets a node's state to 'validating'.
|
||||
func (r *DAGRepository) MarkValidating(ctx context.Context, nodeID string) error {
|
||||
_, err := r.db.pool.Exec(ctx, `
|
||||
UPDATE dag_nodes SET validation_state = 'validating', updated_at = now()
|
||||
WHERE id = $1
|
||||
`, nodeID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marking validating: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkClean sets a node's state to 'clean' and updates its properties hash.
|
||||
func (r *DAGRepository) MarkClean(ctx context.Context, nodeID string, propertiesHash string) error {
|
||||
_, err := r.db.pool.Exec(ctx, `
|
||||
UPDATE dag_nodes
|
||||
SET validation_state = 'clean',
|
||||
properties_hash = $2,
|
||||
validation_msg = NULL,
|
||||
updated_at = now()
|
||||
WHERE id = $1
|
||||
`, nodeID, propertiesHash)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marking clean: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkFailed sets a node's state to 'failed' with an error message.
|
||||
func (r *DAGRepository) MarkFailed(ctx context.Context, nodeID string, message string) error {
|
||||
_, err := r.db.pool.Exec(ctx, `
|
||||
UPDATE dag_nodes
|
||||
SET validation_state = 'failed',
|
||||
validation_msg = $2,
|
||||
updated_at = now()
|
||||
WHERE id = $1
|
||||
`, nodeID, message)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marking failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasCycle checks whether adding an edge from sourceID to targetID would
|
||||
// create a cycle. It walks upward from sourceID to see if targetID is
|
||||
// already an ancestor.
|
||||
func (r *DAGRepository) HasCycle(ctx context.Context, sourceID, targetID string) (bool, error) {
|
||||
if sourceID == targetID {
|
||||
return true, nil
|
||||
}
|
||||
var hasCycle bool
|
||||
err := r.db.pool.QueryRow(ctx, `
|
||||
WITH RECURSIVE ancestors AS (
|
||||
SELECT source_node_id AS node_id
|
||||
FROM dag_edges
|
||||
WHERE target_node_id = $1
|
||||
UNION
|
||||
SELECT e.source_node_id
|
||||
FROM dag_edges e
|
||||
JOIN ancestors a ON a.node_id = e.target_node_id
|
||||
)
|
||||
SELECT EXISTS (
|
||||
SELECT 1 FROM ancestors WHERE node_id = $2
|
||||
)
|
||||
`, sourceID, targetID).Scan(&hasCycle)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("checking for cycle: %w", err)
|
||||
}
|
||||
return hasCycle, nil
|
||||
}
|
||||
|
||||
// SyncFeatureTree replaces the entire feature DAG for an item/revision
|
||||
// within a single transaction. It upserts nodes, replaces edges, and
|
||||
// marks changed nodes dirty.
|
||||
func (r *DAGRepository) SyncFeatureTree(ctx context.Context, itemID string, revisionNumber int, nodes []DAGNode, edges []DAGEdge) error {
|
||||
tx, err := r.db.pool.Begin(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("beginning transaction: %w", err)
|
||||
}
|
||||
defer tx.Rollback(ctx)
|
||||
|
||||
// Upsert all nodes
|
||||
for i := range nodes {
|
||||
n := &nodes[i]
|
||||
n.ItemID = itemID
|
||||
n.RevisionNumber = revisionNumber
|
||||
if n.ValidationState == "" {
|
||||
n.ValidationState = "clean"
|
||||
}
|
||||
|
||||
metadataJSON, err := json.Marshal(n.Metadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling node metadata: %w", err)
|
||||
}
|
||||
|
||||
err = tx.QueryRow(ctx, `
|
||||
INSERT INTO dag_nodes (item_id, revision_number, node_key, node_type,
|
||||
properties_hash, validation_state, validation_msg, metadata)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
||||
ON CONFLICT (item_id, revision_number, node_key)
|
||||
DO UPDATE SET
|
||||
node_type = EXCLUDED.node_type,
|
||||
properties_hash = EXCLUDED.properties_hash,
|
||||
metadata = EXCLUDED.metadata,
|
||||
updated_at = now()
|
||||
RETURNING id, created_at, updated_at
|
||||
`, n.ItemID, n.RevisionNumber, n.NodeKey, n.NodeType,
|
||||
n.PropertiesHash, n.ValidationState, n.ValidationMsg, metadataJSON,
|
||||
).Scan(&n.ID, &n.CreatedAt, &n.UpdatedAt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("upserting node %s: %w", n.NodeKey, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Build key→ID map for edge resolution
|
||||
keyToID := make(map[string]string, len(nodes))
|
||||
for _, n := range nodes {
|
||||
keyToID[n.NodeKey] = n.ID
|
||||
}
|
||||
|
||||
// Delete existing edges for this item/revision
|
||||
_, err = tx.Exec(ctx, `
|
||||
DELETE FROM dag_edges
|
||||
WHERE source_node_id IN (
|
||||
SELECT id FROM dag_nodes WHERE item_id = $1 AND revision_number = $2
|
||||
)
|
||||
`, itemID, revisionNumber)
|
||||
if err != nil {
|
||||
return fmt.Errorf("deleting old edges: %w", err)
|
||||
}
|
||||
|
||||
// Insert new edges
|
||||
for i := range edges {
|
||||
e := &edges[i]
|
||||
if e.EdgeType == "" {
|
||||
e.EdgeType = "depends_on"
|
||||
}
|
||||
|
||||
// Resolve source/target from node keys if IDs are not set
|
||||
sourceID := e.SourceNodeID
|
||||
targetID := e.TargetNodeID
|
||||
if sourceID == "" {
|
||||
return fmt.Errorf("edge %d: source_node_id is required", i)
|
||||
}
|
||||
if targetID == "" {
|
||||
return fmt.Errorf("edge %d: target_node_id is required", i)
|
||||
}
|
||||
|
||||
metadataJSON, err := json.Marshal(e.Metadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling edge metadata: %w", err)
|
||||
}
|
||||
|
||||
err = tx.QueryRow(ctx, `
|
||||
INSERT INTO dag_edges (source_node_id, target_node_id, edge_type, metadata)
|
||||
VALUES ($1, $2, $3, $4)
|
||||
RETURNING id
|
||||
`, sourceID, targetID, e.EdgeType, metadataJSON).Scan(&e.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating edge: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return tx.Commit(ctx)
|
||||
}
|
||||
|
||||
// DeleteNodesForItem removes all DAG nodes (and cascades to edges) for an item/revision.
|
||||
func (r *DAGRepository) DeleteNodesForItem(ctx context.Context, itemID string, revisionNumber int) error {
|
||||
_, err := r.db.pool.Exec(ctx, `
|
||||
DELETE FROM dag_nodes WHERE item_id = $1 AND revision_number = $2
|
||||
`, itemID, revisionNumber)
|
||||
if err != nil {
|
||||
return fmt.Errorf("deleting nodes for item: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func scanDAGNodes(rows pgx.Rows) ([]*DAGNode, error) {
|
||||
var nodes []*DAGNode
|
||||
for rows.Next() {
|
||||
n := &DAGNode{}
|
||||
var metadataJSON []byte
|
||||
err := rows.Scan(
|
||||
&n.ID, &n.ItemID, &n.RevisionNumber, &n.NodeKey, &n.NodeType,
|
||||
&n.PropertiesHash, &n.ValidationState, &n.ValidationMsg,
|
||||
&metadataJSON, &n.CreatedAt, &n.UpdatedAt,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("scanning DAG node: %w", err)
|
||||
}
|
||||
if metadataJSON != nil {
|
||||
if err := json.Unmarshal(metadataJSON, &n.Metadata); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling node metadata: %w", err)
|
||||
}
|
||||
}
|
||||
nodes = append(nodes, n)
|
||||
}
|
||||
return nodes, rows.Err()
|
||||
}
|
||||
@@ -1,759 +0,0 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
)
|
||||
|
||||
// Runner represents a registered compute worker.
|
||||
type Runner struct {
|
||||
ID string
|
||||
Name string
|
||||
TokenHash string
|
||||
TokenPrefix string
|
||||
Tags []string
|
||||
Status string
|
||||
LastHeartbeat *time.Time
|
||||
LastJobID *string
|
||||
Metadata map[string]any
|
||||
CreatedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
}
|
||||
|
||||
// JobDefinitionRecord is a job definition stored in the database.
|
||||
type JobDefinitionRecord struct {
|
||||
ID string
|
||||
Name string
|
||||
Version int
|
||||
TriggerType string
|
||||
ScopeType string
|
||||
ComputeType string
|
||||
RunnerTags []string
|
||||
TimeoutSeconds int
|
||||
MaxRetries int
|
||||
Priority int
|
||||
Definition map[string]any
|
||||
Enabled bool
|
||||
CreatedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
}
|
||||
|
||||
// Job represents a single compute job instance.
|
||||
type Job struct {
|
||||
ID string
|
||||
JobDefinitionID *string
|
||||
DefinitionName string
|
||||
Status string
|
||||
Priority int
|
||||
ItemID *string
|
||||
ProjectID *string
|
||||
ScopeMetadata map[string]any
|
||||
RunnerID *string
|
||||
RunnerTags []string
|
||||
CreatedAt time.Time
|
||||
ClaimedAt *time.Time
|
||||
StartedAt *time.Time
|
||||
CompletedAt *time.Time
|
||||
TimeoutSeconds int
|
||||
ExpiresAt *time.Time
|
||||
Progress int
|
||||
ProgressMessage *string
|
||||
Result map[string]any
|
||||
ErrorMessage *string
|
||||
RetryCount int
|
||||
MaxRetries int
|
||||
CreatedBy *string
|
||||
CancelledBy *string
|
||||
}
|
||||
|
||||
// JobLogEntry is a single log line for a job.
|
||||
type JobLogEntry struct {
|
||||
ID string
|
||||
JobID string
|
||||
Timestamp time.Time
|
||||
Level string
|
||||
Message string
|
||||
Metadata map[string]any
|
||||
}
|
||||
|
||||
// JobRepository provides job and runner database operations.
|
||||
type JobRepository struct {
|
||||
db *DB
|
||||
}
|
||||
|
||||
// NewJobRepository creates a new job repository.
|
||||
func NewJobRepository(db *DB) *JobRepository {
|
||||
return &JobRepository{db: db}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Job Definitions
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// UpsertDefinition inserts or updates a job definition record.
|
||||
func (r *JobRepository) UpsertDefinition(ctx context.Context, d *JobDefinitionRecord) error {
|
||||
defJSON, err := json.Marshal(d.Definition)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling definition: %w", err)
|
||||
}
|
||||
|
||||
err = r.db.pool.QueryRow(ctx, `
|
||||
INSERT INTO job_definitions (name, version, trigger_type, scope_type, compute_type,
|
||||
runner_tags, timeout_seconds, max_retries, priority,
|
||||
definition, enabled)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)
|
||||
ON CONFLICT (name) DO UPDATE SET
|
||||
version = EXCLUDED.version,
|
||||
trigger_type = EXCLUDED.trigger_type,
|
||||
scope_type = EXCLUDED.scope_type,
|
||||
compute_type = EXCLUDED.compute_type,
|
||||
runner_tags = EXCLUDED.runner_tags,
|
||||
timeout_seconds = EXCLUDED.timeout_seconds,
|
||||
max_retries = EXCLUDED.max_retries,
|
||||
priority = EXCLUDED.priority,
|
||||
definition = EXCLUDED.definition,
|
||||
enabled = EXCLUDED.enabled,
|
||||
updated_at = now()
|
||||
RETURNING id, created_at, updated_at
|
||||
`, d.Name, d.Version, d.TriggerType, d.ScopeType, d.ComputeType,
|
||||
d.RunnerTags, d.TimeoutSeconds, d.MaxRetries, d.Priority,
|
||||
defJSON, d.Enabled,
|
||||
).Scan(&d.ID, &d.CreatedAt, &d.UpdatedAt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("upserting job definition: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetDefinition returns a job definition by name.
|
||||
func (r *JobRepository) GetDefinition(ctx context.Context, name string) (*JobDefinitionRecord, error) {
|
||||
d := &JobDefinitionRecord{}
|
||||
var defJSON []byte
|
||||
err := r.db.pool.QueryRow(ctx, `
|
||||
SELECT id, name, version, trigger_type, scope_type, compute_type,
|
||||
runner_tags, timeout_seconds, max_retries, priority,
|
||||
definition, enabled, created_at, updated_at
|
||||
FROM job_definitions WHERE name = $1
|
||||
`, name).Scan(
|
||||
&d.ID, &d.Name, &d.Version, &d.TriggerType, &d.ScopeType, &d.ComputeType,
|
||||
&d.RunnerTags, &d.TimeoutSeconds, &d.MaxRetries, &d.Priority,
|
||||
&defJSON, &d.Enabled, &d.CreatedAt, &d.UpdatedAt,
|
||||
)
|
||||
if err == pgx.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying job definition: %w", err)
|
||||
}
|
||||
if defJSON != nil {
|
||||
if err := json.Unmarshal(defJSON, &d.Definition); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling definition: %w", err)
|
||||
}
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// ListDefinitions returns all job definitions.
|
||||
func (r *JobRepository) ListDefinitions(ctx context.Context) ([]*JobDefinitionRecord, error) {
|
||||
rows, err := r.db.pool.Query(ctx, `
|
||||
SELECT id, name, version, trigger_type, scope_type, compute_type,
|
||||
runner_tags, timeout_seconds, max_retries, priority,
|
||||
definition, enabled, created_at, updated_at
|
||||
FROM job_definitions ORDER BY name
|
||||
`)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying job definitions: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
return scanJobDefinitions(rows)
|
||||
}
|
||||
|
||||
// GetDefinitionsByTrigger returns all enabled definitions matching a trigger type.
|
||||
func (r *JobRepository) GetDefinitionsByTrigger(ctx context.Context, triggerType string) ([]*JobDefinitionRecord, error) {
|
||||
rows, err := r.db.pool.Query(ctx, `
|
||||
SELECT id, name, version, trigger_type, scope_type, compute_type,
|
||||
runner_tags, timeout_seconds, max_retries, priority,
|
||||
definition, enabled, created_at, updated_at
|
||||
FROM job_definitions
|
||||
WHERE trigger_type = $1 AND enabled = true
|
||||
ORDER BY priority ASC, name
|
||||
`, triggerType)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying definitions by trigger: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
return scanJobDefinitions(rows)
|
||||
}
|
||||
|
||||
// GetDefinitionByID returns a job definition by ID.
|
||||
func (r *JobRepository) GetDefinitionByID(ctx context.Context, id string) (*JobDefinitionRecord, error) {
|
||||
d := &JobDefinitionRecord{}
|
||||
var defJSON []byte
|
||||
err := r.db.pool.QueryRow(ctx, `
|
||||
SELECT id, name, version, trigger_type, scope_type, compute_type,
|
||||
runner_tags, timeout_seconds, max_retries, priority,
|
||||
definition, enabled, created_at, updated_at
|
||||
FROM job_definitions WHERE id = $1
|
||||
`, id).Scan(
|
||||
&d.ID, &d.Name, &d.Version, &d.TriggerType, &d.ScopeType, &d.ComputeType,
|
||||
&d.RunnerTags, &d.TimeoutSeconds, &d.MaxRetries, &d.Priority,
|
||||
&defJSON, &d.Enabled, &d.CreatedAt, &d.UpdatedAt,
|
||||
)
|
||||
if err == pgx.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying job definition by ID: %w", err)
|
||||
}
|
||||
if defJSON != nil {
|
||||
if err := json.Unmarshal(defJSON, &d.Definition); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling definition: %w", err)
|
||||
}
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Jobs
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// CreateJob inserts a new job.
|
||||
func (r *JobRepository) CreateJob(ctx context.Context, j *Job) error {
|
||||
scopeJSON, err := json.Marshal(j.ScopeMetadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling scope metadata: %w", err)
|
||||
}
|
||||
|
||||
err = r.db.pool.QueryRow(ctx, `
|
||||
INSERT INTO jobs (job_definition_id, definition_name, status, priority,
|
||||
item_id, project_id, scope_metadata,
|
||||
runner_tags, timeout_seconds, max_retries, created_by)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)
|
||||
RETURNING id, created_at
|
||||
`, j.JobDefinitionID, j.DefinitionName, "pending", j.Priority,
|
||||
j.ItemID, j.ProjectID, scopeJSON,
|
||||
j.RunnerTags, j.TimeoutSeconds, j.MaxRetries, j.CreatedBy,
|
||||
).Scan(&j.ID, &j.CreatedAt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating job: %w", err)
|
||||
}
|
||||
j.Status = "pending"
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetJob returns a job by ID.
|
||||
func (r *JobRepository) GetJob(ctx context.Context, jobID string) (*Job, error) {
|
||||
j := &Job{}
|
||||
var scopeJSON, resultJSON []byte
|
||||
err := r.db.pool.QueryRow(ctx, `
|
||||
SELECT id, job_definition_id, definition_name, status, priority,
|
||||
item_id, project_id, scope_metadata, runner_id, runner_tags,
|
||||
created_at, claimed_at, started_at, completed_at,
|
||||
timeout_seconds, expires_at, progress, progress_message,
|
||||
result, error_message, retry_count, max_retries,
|
||||
created_by, cancelled_by
|
||||
FROM jobs WHERE id = $1
|
||||
`, jobID).Scan(
|
||||
&j.ID, &j.JobDefinitionID, &j.DefinitionName, &j.Status, &j.Priority,
|
||||
&j.ItemID, &j.ProjectID, &scopeJSON, &j.RunnerID, &j.RunnerTags,
|
||||
&j.CreatedAt, &j.ClaimedAt, &j.StartedAt, &j.CompletedAt,
|
||||
&j.TimeoutSeconds, &j.ExpiresAt, &j.Progress, &j.ProgressMessage,
|
||||
&resultJSON, &j.ErrorMessage, &j.RetryCount, &j.MaxRetries,
|
||||
&j.CreatedBy, &j.CancelledBy,
|
||||
)
|
||||
if err == pgx.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying job: %w", err)
|
||||
}
|
||||
if scopeJSON != nil {
|
||||
if err := json.Unmarshal(scopeJSON, &j.ScopeMetadata); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling scope metadata: %w", err)
|
||||
}
|
||||
}
|
||||
if resultJSON != nil {
|
||||
if err := json.Unmarshal(resultJSON, &j.Result); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling result: %w", err)
|
||||
}
|
||||
}
|
||||
return j, nil
|
||||
}
|
||||
|
||||
// ListJobs returns jobs matching optional filters.
|
||||
func (r *JobRepository) ListJobs(ctx context.Context, status, itemID string, limit, offset int) ([]*Job, error) {
|
||||
query := `
|
||||
SELECT id, job_definition_id, definition_name, status, priority,
|
||||
item_id, project_id, scope_metadata, runner_id, runner_tags,
|
||||
created_at, claimed_at, started_at, completed_at,
|
||||
timeout_seconds, expires_at, progress, progress_message,
|
||||
result, error_message, retry_count, max_retries,
|
||||
created_by, cancelled_by
|
||||
FROM jobs WHERE 1=1`
|
||||
args := []any{}
|
||||
argN := 1
|
||||
|
||||
if status != "" {
|
||||
query += fmt.Sprintf(" AND status = $%d", argN)
|
||||
args = append(args, status)
|
||||
argN++
|
||||
}
|
||||
if itemID != "" {
|
||||
query += fmt.Sprintf(" AND item_id = $%d", argN)
|
||||
args = append(args, itemID)
|
||||
argN++
|
||||
}
|
||||
|
||||
query += " ORDER BY created_at DESC"
|
||||
|
||||
if limit > 0 {
|
||||
query += fmt.Sprintf(" LIMIT $%d", argN)
|
||||
args = append(args, limit)
|
||||
argN++
|
||||
}
|
||||
if offset > 0 {
|
||||
query += fmt.Sprintf(" OFFSET $%d", argN)
|
||||
args = append(args, offset)
|
||||
}
|
||||
|
||||
rows, err := r.db.pool.Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying jobs: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
return scanJobs(rows)
|
||||
}
|
||||
|
||||
// ClaimJob atomically claims the next available job matching the runner's tags.
|
||||
// Uses SELECT FOR UPDATE SKIP LOCKED for exactly-once delivery.
|
||||
func (r *JobRepository) ClaimJob(ctx context.Context, runnerID string, tags []string) (*Job, error) {
|
||||
j := &Job{}
|
||||
var scopeJSON, resultJSON []byte
|
||||
err := r.db.pool.QueryRow(ctx, `
|
||||
WITH claimable AS (
|
||||
SELECT id FROM jobs
|
||||
WHERE status = 'pending' AND runner_tags <@ $2::text[]
|
||||
ORDER BY priority ASC, created_at ASC
|
||||
LIMIT 1
|
||||
FOR UPDATE SKIP LOCKED
|
||||
)
|
||||
UPDATE jobs SET
|
||||
status = 'claimed',
|
||||
runner_id = $1,
|
||||
claimed_at = now(),
|
||||
expires_at = now() + (timeout_seconds || ' seconds')::interval
|
||||
FROM claimable
|
||||
WHERE jobs.id = claimable.id
|
||||
RETURNING jobs.id, jobs.job_definition_id, jobs.definition_name, jobs.status,
|
||||
jobs.priority, jobs.item_id, jobs.project_id, jobs.scope_metadata,
|
||||
jobs.runner_id, jobs.runner_tags, jobs.created_at, jobs.claimed_at,
|
||||
jobs.started_at, jobs.completed_at, jobs.timeout_seconds, jobs.expires_at,
|
||||
jobs.progress, jobs.progress_message, jobs.result, jobs.error_message,
|
||||
jobs.retry_count, jobs.max_retries, jobs.created_by, jobs.cancelled_by
|
||||
`, runnerID, tags).Scan(
|
||||
&j.ID, &j.JobDefinitionID, &j.DefinitionName, &j.Status,
|
||||
&j.Priority, &j.ItemID, &j.ProjectID, &scopeJSON,
|
||||
&j.RunnerID, &j.RunnerTags, &j.CreatedAt, &j.ClaimedAt,
|
||||
&j.StartedAt, &j.CompletedAt, &j.TimeoutSeconds, &j.ExpiresAt,
|
||||
&j.Progress, &j.ProgressMessage, &resultJSON, &j.ErrorMessage,
|
||||
&j.RetryCount, &j.MaxRetries, &j.CreatedBy, &j.CancelledBy,
|
||||
)
|
||||
if err == pgx.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("claiming job: %w", err)
|
||||
}
|
||||
if scopeJSON != nil {
|
||||
if err := json.Unmarshal(scopeJSON, &j.ScopeMetadata); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling scope metadata: %w", err)
|
||||
}
|
||||
}
|
||||
if resultJSON != nil {
|
||||
if err := json.Unmarshal(resultJSON, &j.Result); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling result: %w", err)
|
||||
}
|
||||
}
|
||||
return j, nil
|
||||
}
|
||||
|
||||
// StartJob transitions a claimed job to running.
|
||||
func (r *JobRepository) StartJob(ctx context.Context, jobID, runnerID string) error {
|
||||
result, err := r.db.pool.Exec(ctx, `
|
||||
UPDATE jobs SET status = 'running', started_at = now()
|
||||
WHERE id = $1 AND runner_id = $2 AND status = 'claimed'
|
||||
`, jobID, runnerID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("starting job: %w", err)
|
||||
}
|
||||
if result.RowsAffected() == 0 {
|
||||
return fmt.Errorf("job %s not claimable by runner %s or not in claimed state", jobID, runnerID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateProgress updates a running job's progress.
|
||||
func (r *JobRepository) UpdateProgress(ctx context.Context, jobID, runnerID string, progress int, message string) error {
|
||||
var msg *string
|
||||
if message != "" {
|
||||
msg = &message
|
||||
}
|
||||
result, err := r.db.pool.Exec(ctx, `
|
||||
UPDATE jobs SET progress = $3, progress_message = $4
|
||||
WHERE id = $1 AND runner_id = $2 AND status IN ('claimed', 'running')
|
||||
`, jobID, runnerID, progress, msg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("updating progress: %w", err)
|
||||
}
|
||||
if result.RowsAffected() == 0 {
|
||||
return fmt.Errorf("job %s not owned by runner %s or not active", jobID, runnerID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CompleteJob marks a job as completed with optional result data.
|
||||
func (r *JobRepository) CompleteJob(ctx context.Context, jobID, runnerID string, resultData map[string]any) error {
|
||||
var resultJSON []byte
|
||||
var err error
|
||||
if resultData != nil {
|
||||
resultJSON, err = json.Marshal(resultData)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling result: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
res, err := r.db.pool.Exec(ctx, `
|
||||
UPDATE jobs SET
|
||||
status = 'completed',
|
||||
progress = 100,
|
||||
result = $3,
|
||||
completed_at = now()
|
||||
WHERE id = $1 AND runner_id = $2 AND status IN ('claimed', 'running')
|
||||
`, jobID, runnerID, resultJSON)
|
||||
if err != nil {
|
||||
return fmt.Errorf("completing job: %w", err)
|
||||
}
|
||||
if res.RowsAffected() == 0 {
|
||||
return fmt.Errorf("job %s not owned by runner %s or not active", jobID, runnerID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FailJob marks a job as failed with an error message.
|
||||
func (r *JobRepository) FailJob(ctx context.Context, jobID, runnerID string, errMsg string) error {
|
||||
res, err := r.db.pool.Exec(ctx, `
|
||||
UPDATE jobs SET
|
||||
status = 'failed',
|
||||
error_message = $3,
|
||||
completed_at = now()
|
||||
WHERE id = $1 AND runner_id = $2 AND status IN ('claimed', 'running')
|
||||
`, jobID, runnerID, errMsg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failing job: %w", err)
|
||||
}
|
||||
if res.RowsAffected() == 0 {
|
||||
return fmt.Errorf("job %s not owned by runner %s or not active", jobID, runnerID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CancelJob cancels a pending or active job.
|
||||
func (r *JobRepository) CancelJob(ctx context.Context, jobID string, cancelledBy string) error {
|
||||
res, err := r.db.pool.Exec(ctx, `
|
||||
UPDATE jobs SET
|
||||
status = 'cancelled',
|
||||
cancelled_by = $2,
|
||||
completed_at = now()
|
||||
WHERE id = $1 AND status IN ('pending', 'claimed', 'running')
|
||||
`, jobID, cancelledBy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cancelling job: %w", err)
|
||||
}
|
||||
if res.RowsAffected() == 0 {
|
||||
return fmt.Errorf("job %s not cancellable", jobID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TimeoutExpiredJobs marks expired claimed/running jobs as failed.
|
||||
// Returns the number of jobs timed out.
|
||||
func (r *JobRepository) TimeoutExpiredJobs(ctx context.Context) (int64, error) {
|
||||
result, err := r.db.pool.Exec(ctx, `
|
||||
UPDATE jobs SET
|
||||
status = 'failed',
|
||||
error_message = 'job timed out',
|
||||
completed_at = now()
|
||||
WHERE status IN ('claimed', 'running')
|
||||
AND expires_at IS NOT NULL
|
||||
AND expires_at < now()
|
||||
`)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("timing out expired jobs: %w", err)
|
||||
}
|
||||
return result.RowsAffected(), nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Job Log
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// AppendLog adds a log entry to a job.
|
||||
func (r *JobRepository) AppendLog(ctx context.Context, entry *JobLogEntry) error {
|
||||
metaJSON, err := json.Marshal(entry.Metadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling log metadata: %w", err)
|
||||
}
|
||||
|
||||
err = r.db.pool.QueryRow(ctx, `
|
||||
INSERT INTO job_log (job_id, level, message, metadata)
|
||||
VALUES ($1, $2, $3, $4)
|
||||
RETURNING id, timestamp
|
||||
`, entry.JobID, entry.Level, entry.Message, metaJSON,
|
||||
).Scan(&entry.ID, &entry.Timestamp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("appending job log: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetJobLogs returns all log entries for a job.
|
||||
func (r *JobRepository) GetJobLogs(ctx context.Context, jobID string) ([]*JobLogEntry, error) {
|
||||
rows, err := r.db.pool.Query(ctx, `
|
||||
SELECT id, job_id, timestamp, level, message, metadata
|
||||
FROM job_log WHERE job_id = $1 ORDER BY timestamp ASC
|
||||
`, jobID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying job logs: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var entries []*JobLogEntry
|
||||
for rows.Next() {
|
||||
e := &JobLogEntry{}
|
||||
var metaJSON []byte
|
||||
if err := rows.Scan(&e.ID, &e.JobID, &e.Timestamp, &e.Level, &e.Message, &metaJSON); err != nil {
|
||||
return nil, fmt.Errorf("scanning job log: %w", err)
|
||||
}
|
||||
if metaJSON != nil {
|
||||
if err := json.Unmarshal(metaJSON, &e.Metadata); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling log metadata: %w", err)
|
||||
}
|
||||
}
|
||||
entries = append(entries, e)
|
||||
}
|
||||
return entries, rows.Err()
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Runners
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// RegisterRunner creates a new runner record.
|
||||
func (r *JobRepository) RegisterRunner(ctx context.Context, runner *Runner) error {
|
||||
metaJSON, err := json.Marshal(runner.Metadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling runner metadata: %w", err)
|
||||
}
|
||||
|
||||
err = r.db.pool.QueryRow(ctx, `
|
||||
INSERT INTO runners (name, token_hash, token_prefix, tags, status, metadata)
|
||||
VALUES ($1, $2, $3, $4, 'offline', $5)
|
||||
RETURNING id, created_at, updated_at
|
||||
`, runner.Name, runner.TokenHash, runner.TokenPrefix, runner.Tags, metaJSON,
|
||||
).Scan(&runner.ID, &runner.CreatedAt, &runner.UpdatedAt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("registering runner: %w", err)
|
||||
}
|
||||
runner.Status = "offline"
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetRunnerByToken looks up a runner by token hash.
|
||||
func (r *JobRepository) GetRunnerByToken(ctx context.Context, tokenHash string) (*Runner, error) {
|
||||
runner := &Runner{}
|
||||
var metaJSON []byte
|
||||
err := r.db.pool.QueryRow(ctx, `
|
||||
SELECT id, name, token_hash, token_prefix, tags, status,
|
||||
last_heartbeat, last_job_id, metadata, created_at, updated_at
|
||||
FROM runners WHERE token_hash = $1
|
||||
`, tokenHash).Scan(
|
||||
&runner.ID, &runner.Name, &runner.TokenHash, &runner.TokenPrefix,
|
||||
&runner.Tags, &runner.Status, &runner.LastHeartbeat, &runner.LastJobID,
|
||||
&metaJSON, &runner.CreatedAt, &runner.UpdatedAt,
|
||||
)
|
||||
if err == pgx.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying runner by token: %w", err)
|
||||
}
|
||||
if metaJSON != nil {
|
||||
if err := json.Unmarshal(metaJSON, &runner.Metadata); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling runner metadata: %w", err)
|
||||
}
|
||||
}
|
||||
return runner, nil
|
||||
}
|
||||
|
||||
// GetRunner returns a runner by ID.
|
||||
func (r *JobRepository) GetRunner(ctx context.Context, runnerID string) (*Runner, error) {
|
||||
runner := &Runner{}
|
||||
var metaJSON []byte
|
||||
err := r.db.pool.QueryRow(ctx, `
|
||||
SELECT id, name, token_hash, token_prefix, tags, status,
|
||||
last_heartbeat, last_job_id, metadata, created_at, updated_at
|
||||
FROM runners WHERE id = $1
|
||||
`, runnerID).Scan(
|
||||
&runner.ID, &runner.Name, &runner.TokenHash, &runner.TokenPrefix,
|
||||
&runner.Tags, &runner.Status, &runner.LastHeartbeat, &runner.LastJobID,
|
||||
&metaJSON, &runner.CreatedAt, &runner.UpdatedAt,
|
||||
)
|
||||
if err == pgx.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying runner: %w", err)
|
||||
}
|
||||
if metaJSON != nil {
|
||||
if err := json.Unmarshal(metaJSON, &runner.Metadata); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling runner metadata: %w", err)
|
||||
}
|
||||
}
|
||||
return runner, nil
|
||||
}
|
||||
|
||||
// Heartbeat updates a runner's heartbeat timestamp and sets status to online.
|
||||
func (r *JobRepository) Heartbeat(ctx context.Context, runnerID string) error {
|
||||
res, err := r.db.pool.Exec(ctx, `
|
||||
UPDATE runners SET
|
||||
status = 'online',
|
||||
last_heartbeat = now(),
|
||||
updated_at = now()
|
||||
WHERE id = $1
|
||||
`, runnerID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("updating heartbeat: %w", err)
|
||||
}
|
||||
if res.RowsAffected() == 0 {
|
||||
return fmt.Errorf("runner %s not found", runnerID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListRunners returns all registered runners.
|
||||
func (r *JobRepository) ListRunners(ctx context.Context) ([]*Runner, error) {
|
||||
rows, err := r.db.pool.Query(ctx, `
|
||||
SELECT id, name, token_hash, token_prefix, tags, status,
|
||||
last_heartbeat, last_job_id, metadata, created_at, updated_at
|
||||
FROM runners ORDER BY name
|
||||
`)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying runners: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var runners []*Runner
|
||||
for rows.Next() {
|
||||
runner := &Runner{}
|
||||
var metaJSON []byte
|
||||
if err := rows.Scan(
|
||||
&runner.ID, &runner.Name, &runner.TokenHash, &runner.TokenPrefix,
|
||||
&runner.Tags, &runner.Status, &runner.LastHeartbeat, &runner.LastJobID,
|
||||
&metaJSON, &runner.CreatedAt, &runner.UpdatedAt,
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf("scanning runner: %w", err)
|
||||
}
|
||||
if metaJSON != nil {
|
||||
if err := json.Unmarshal(metaJSON, &runner.Metadata); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling runner metadata: %w", err)
|
||||
}
|
||||
}
|
||||
runners = append(runners, runner)
|
||||
}
|
||||
return runners, rows.Err()
|
||||
}
|
||||
|
||||
// DeleteRunner removes a runner by ID.
|
||||
func (r *JobRepository) DeleteRunner(ctx context.Context, runnerID string) error {
|
||||
res, err := r.db.pool.Exec(ctx, `DELETE FROM runners WHERE id = $1`, runnerID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("deleting runner: %w", err)
|
||||
}
|
||||
if res.RowsAffected() == 0 {
|
||||
return fmt.Errorf("runner %s not found", runnerID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExpireStaleRunners marks runners with no recent heartbeat as offline.
|
||||
func (r *JobRepository) ExpireStaleRunners(ctx context.Context, timeout time.Duration) (int64, error) {
|
||||
result, err := r.db.pool.Exec(ctx, `
|
||||
UPDATE runners SET status = 'offline', updated_at = now()
|
||||
WHERE status = 'online'
|
||||
AND last_heartbeat < now() - $1::interval
|
||||
`, timeout.String())
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("expiring stale runners: %w", err)
|
||||
}
|
||||
return result.RowsAffected(), nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func scanJobDefinitions(rows pgx.Rows) ([]*JobDefinitionRecord, error) {
|
||||
var defs []*JobDefinitionRecord
|
||||
for rows.Next() {
|
||||
d := &JobDefinitionRecord{}
|
||||
var defJSON []byte
|
||||
if err := rows.Scan(
|
||||
&d.ID, &d.Name, &d.Version, &d.TriggerType, &d.ScopeType, &d.ComputeType,
|
||||
&d.RunnerTags, &d.TimeoutSeconds, &d.MaxRetries, &d.Priority,
|
||||
&defJSON, &d.Enabled, &d.CreatedAt, &d.UpdatedAt,
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf("scanning job definition: %w", err)
|
||||
}
|
||||
if defJSON != nil {
|
||||
if err := json.Unmarshal(defJSON, &d.Definition); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling definition: %w", err)
|
||||
}
|
||||
}
|
||||
defs = append(defs, d)
|
||||
}
|
||||
return defs, rows.Err()
|
||||
}
|
||||
|
||||
func scanJobs(rows pgx.Rows) ([]*Job, error) {
|
||||
var jobs []*Job
|
||||
for rows.Next() {
|
||||
j := &Job{}
|
||||
var scopeJSON, resultJSON []byte
|
||||
if err := rows.Scan(
|
||||
&j.ID, &j.JobDefinitionID, &j.DefinitionName, &j.Status, &j.Priority,
|
||||
&j.ItemID, &j.ProjectID, &scopeJSON, &j.RunnerID, &j.RunnerTags,
|
||||
&j.CreatedAt, &j.ClaimedAt, &j.StartedAt, &j.CompletedAt,
|
||||
&j.TimeoutSeconds, &j.ExpiresAt, &j.Progress, &j.ProgressMessage,
|
||||
&resultJSON, &j.ErrorMessage, &j.RetryCount, &j.MaxRetries,
|
||||
&j.CreatedBy, &j.CancelledBy,
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf("scanning job: %w", err)
|
||||
}
|
||||
if scopeJSON != nil {
|
||||
if err := json.Unmarshal(scopeJSON, &j.ScopeMetadata); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling scope metadata: %w", err)
|
||||
}
|
||||
}
|
||||
if resultJSON != nil {
|
||||
if err := json.Unmarshal(resultJSON, &j.Result); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling result: %w", err)
|
||||
}
|
||||
}
|
||||
jobs = append(jobs, j)
|
||||
}
|
||||
return jobs, rows.Err()
|
||||
}
|
||||
@@ -1,166 +0,0 @@
|
||||
// Package jobdef handles YAML job definition parsing and validation.
|
||||
package jobdef
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// Definition represents a compute job definition loaded from YAML.
|
||||
type Definition struct {
|
||||
Name string `yaml:"name" json:"name"`
|
||||
Version int `yaml:"version" json:"version"`
|
||||
Description string `yaml:"description" json:"description"`
|
||||
Trigger TriggerConfig `yaml:"trigger" json:"trigger"`
|
||||
Scope ScopeConfig `yaml:"scope" json:"scope"`
|
||||
Compute ComputeConfig `yaml:"compute" json:"compute"`
|
||||
Runner RunnerConfig `yaml:"runner" json:"runner"`
|
||||
Timeout int `yaml:"timeout" json:"timeout"`
|
||||
MaxRetries int `yaml:"max_retries" json:"max_retries"`
|
||||
Priority int `yaml:"priority" json:"priority"`
|
||||
}
|
||||
|
||||
// TriggerConfig describes when a job is created.
|
||||
type TriggerConfig struct {
|
||||
Type string `yaml:"type" json:"type"`
|
||||
Filter map[string]string `yaml:"filter,omitempty" json:"filter,omitempty"`
|
||||
}
|
||||
|
||||
// ScopeConfig describes what a job operates on.
|
||||
type ScopeConfig struct {
|
||||
Type string `yaml:"type" json:"type"`
|
||||
}
|
||||
|
||||
// ComputeConfig describes the computation to perform.
|
||||
type ComputeConfig struct {
|
||||
Type string `yaml:"type" json:"type"`
|
||||
Command string `yaml:"command" json:"command"`
|
||||
Args map[string]any `yaml:"args,omitempty" json:"args,omitempty"`
|
||||
}
|
||||
|
||||
// RunnerConfig describes runner requirements.
|
||||
type RunnerConfig struct {
|
||||
Tags []string `yaml:"tags" json:"tags"`
|
||||
}
|
||||
|
||||
// DefinitionFile wraps a definition for YAML parsing.
|
||||
type DefinitionFile struct {
|
||||
Job Definition `yaml:"job"`
|
||||
}
|
||||
|
||||
var validTriggerTypes = map[string]bool{
|
||||
"revision_created": true,
|
||||
"bom_changed": true,
|
||||
"manual": true,
|
||||
"schedule": true,
|
||||
}
|
||||
|
||||
var validScopeTypes = map[string]bool{
|
||||
"item": true,
|
||||
"assembly": true,
|
||||
"project": true,
|
||||
}
|
||||
|
||||
var validComputeTypes = map[string]bool{
|
||||
"validate": true,
|
||||
"rebuild": true,
|
||||
"diff": true,
|
||||
"export": true,
|
||||
"custom": true,
|
||||
}
|
||||
|
||||
// Load reads a job definition from a YAML file.
|
||||
func Load(path string) (*Definition, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading job definition file: %w", err)
|
||||
}
|
||||
|
||||
var df DefinitionFile
|
||||
if err := yaml.Unmarshal(data, &df); err != nil {
|
||||
return nil, fmt.Errorf("parsing job definition YAML: %w", err)
|
||||
}
|
||||
|
||||
def := &df.Job
|
||||
|
||||
// Apply defaults
|
||||
if def.Timeout <= 0 {
|
||||
def.Timeout = 600
|
||||
}
|
||||
if def.MaxRetries <= 0 {
|
||||
def.MaxRetries = 1
|
||||
}
|
||||
if def.Priority <= 0 {
|
||||
def.Priority = 100
|
||||
}
|
||||
if def.Version <= 0 {
|
||||
def.Version = 1
|
||||
}
|
||||
|
||||
if err := def.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("validating %s: %w", path, err)
|
||||
}
|
||||
|
||||
return def, nil
|
||||
}
|
||||
|
||||
// LoadAll reads all job definitions from a directory.
|
||||
func LoadAll(dir string) (map[string]*Definition, error) {
|
||||
defs := make(map[string]*Definition)
|
||||
|
||||
entries, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading job definitions directory: %w", err)
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
if !strings.HasSuffix(entry.Name(), ".yaml") && !strings.HasSuffix(entry.Name(), ".yml") {
|
||||
continue
|
||||
}
|
||||
|
||||
path := filepath.Join(dir, entry.Name())
|
||||
def, err := Load(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("loading %s: %w", entry.Name(), err)
|
||||
}
|
||||
defs[def.Name] = def
|
||||
}
|
||||
|
||||
return defs, nil
|
||||
}
|
||||
|
||||
// Validate checks that the definition is well-formed.
|
||||
func (d *Definition) Validate() error {
|
||||
if d.Name == "" {
|
||||
return fmt.Errorf("job definition name is required")
|
||||
}
|
||||
if d.Trigger.Type == "" {
|
||||
return fmt.Errorf("trigger type is required")
|
||||
}
|
||||
if !validTriggerTypes[d.Trigger.Type] {
|
||||
return fmt.Errorf("invalid trigger type %q", d.Trigger.Type)
|
||||
}
|
||||
if d.Scope.Type == "" {
|
||||
return fmt.Errorf("scope type is required")
|
||||
}
|
||||
if !validScopeTypes[d.Scope.Type] {
|
||||
return fmt.Errorf("invalid scope type %q", d.Scope.Type)
|
||||
}
|
||||
if d.Compute.Type == "" {
|
||||
return fmt.Errorf("compute type is required")
|
||||
}
|
||||
if !validComputeTypes[d.Compute.Type] {
|
||||
return fmt.Errorf("invalid compute type %q", d.Compute.Type)
|
||||
}
|
||||
if d.Compute.Command == "" {
|
||||
return fmt.Errorf("compute command is required")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,328 +0,0 @@
|
||||
package jobdef
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLoadValid(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
content := `
|
||||
job:
|
||||
name: test-job
|
||||
version: 1
|
||||
description: "A test job"
|
||||
trigger:
|
||||
type: manual
|
||||
scope:
|
||||
type: item
|
||||
compute:
|
||||
type: validate
|
||||
command: create-validate
|
||||
runner:
|
||||
tags: [create]
|
||||
timeout: 300
|
||||
max_retries: 2
|
||||
priority: 50
|
||||
`
|
||||
path := filepath.Join(dir, "test-job.yaml")
|
||||
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
|
||||
t.Fatalf("writing test file: %v", err)
|
||||
}
|
||||
|
||||
def, err := Load(path)
|
||||
if err != nil {
|
||||
t.Fatalf("Load: %v", err)
|
||||
}
|
||||
|
||||
if def.Name != "test-job" {
|
||||
t.Errorf("name = %q, want %q", def.Name, "test-job")
|
||||
}
|
||||
if def.Version != 1 {
|
||||
t.Errorf("version = %d, want 1", def.Version)
|
||||
}
|
||||
if def.Trigger.Type != "manual" {
|
||||
t.Errorf("trigger type = %q, want %q", def.Trigger.Type, "manual")
|
||||
}
|
||||
if def.Scope.Type != "item" {
|
||||
t.Errorf("scope type = %q, want %q", def.Scope.Type, "item")
|
||||
}
|
||||
if def.Compute.Type != "validate" {
|
||||
t.Errorf("compute type = %q, want %q", def.Compute.Type, "validate")
|
||||
}
|
||||
if def.Compute.Command != "create-validate" {
|
||||
t.Errorf("compute command = %q, want %q", def.Compute.Command, "create-validate")
|
||||
}
|
||||
if len(def.Runner.Tags) != 1 || def.Runner.Tags[0] != "create" {
|
||||
t.Errorf("runner tags = %v, want [create]", def.Runner.Tags)
|
||||
}
|
||||
if def.Timeout != 300 {
|
||||
t.Errorf("timeout = %d, want 300", def.Timeout)
|
||||
}
|
||||
if def.MaxRetries != 2 {
|
||||
t.Errorf("max_retries = %d, want 2", def.MaxRetries)
|
||||
}
|
||||
if def.Priority != 50 {
|
||||
t.Errorf("priority = %d, want 50", def.Priority)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadDefaults(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
content := `
|
||||
job:
|
||||
name: minimal
|
||||
trigger:
|
||||
type: manual
|
||||
scope:
|
||||
type: item
|
||||
compute:
|
||||
type: custom
|
||||
command: do-something
|
||||
`
|
||||
path := filepath.Join(dir, "minimal.yaml")
|
||||
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
|
||||
t.Fatalf("writing test file: %v", err)
|
||||
}
|
||||
|
||||
def, err := Load(path)
|
||||
if err != nil {
|
||||
t.Fatalf("Load: %v", err)
|
||||
}
|
||||
|
||||
if def.Timeout != 600 {
|
||||
t.Errorf("default timeout = %d, want 600", def.Timeout)
|
||||
}
|
||||
if def.MaxRetries != 1 {
|
||||
t.Errorf("default max_retries = %d, want 1", def.MaxRetries)
|
||||
}
|
||||
if def.Priority != 100 {
|
||||
t.Errorf("default priority = %d, want 100", def.Priority)
|
||||
}
|
||||
if def.Version != 1 {
|
||||
t.Errorf("default version = %d, want 1", def.Version)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadInvalidTriggerType(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
content := `
|
||||
job:
|
||||
name: bad-trigger
|
||||
trigger:
|
||||
type: invalid_trigger
|
||||
scope:
|
||||
type: item
|
||||
compute:
|
||||
type: validate
|
||||
command: create-validate
|
||||
`
|
||||
path := filepath.Join(dir, "bad.yaml")
|
||||
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
|
||||
t.Fatalf("writing test file: %v", err)
|
||||
}
|
||||
|
||||
_, err := Load(path)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for invalid trigger type")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadMissingName(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
content := `
|
||||
job:
|
||||
trigger:
|
||||
type: manual
|
||||
scope:
|
||||
type: item
|
||||
compute:
|
||||
type: validate
|
||||
command: create-validate
|
||||
`
|
||||
path := filepath.Join(dir, "no-name.yaml")
|
||||
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
|
||||
t.Fatalf("writing test file: %v", err)
|
||||
}
|
||||
|
||||
_, err := Load(path)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for missing name")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadMissingCommand(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
content := `
|
||||
job:
|
||||
name: no-command
|
||||
trigger:
|
||||
type: manual
|
||||
scope:
|
||||
type: item
|
||||
compute:
|
||||
type: validate
|
||||
`
|
||||
path := filepath.Join(dir, "no-cmd.yaml")
|
||||
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
|
||||
t.Fatalf("writing test file: %v", err)
|
||||
}
|
||||
|
||||
_, err := Load(path)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for missing command")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadAllDirectory(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
job1 := `
|
||||
job:
|
||||
name: job-one
|
||||
trigger:
|
||||
type: manual
|
||||
scope:
|
||||
type: item
|
||||
compute:
|
||||
type: validate
|
||||
command: create-validate
|
||||
`
|
||||
job2 := `
|
||||
job:
|
||||
name: job-two
|
||||
trigger:
|
||||
type: revision_created
|
||||
scope:
|
||||
type: assembly
|
||||
compute:
|
||||
type: export
|
||||
command: create-export
|
||||
`
|
||||
if err := os.WriteFile(filepath.Join(dir, "one.yaml"), []byte(job1), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(dir, "two.yml"), []byte(job2), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Non-YAML file should be ignored
|
||||
if err := os.WriteFile(filepath.Join(dir, "readme.txt"), []byte("ignore me"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defs, err := LoadAll(dir)
|
||||
if err != nil {
|
||||
t.Fatalf("LoadAll: %v", err)
|
||||
}
|
||||
|
||||
if len(defs) != 2 {
|
||||
t.Fatalf("loaded %d definitions, want 2", len(defs))
|
||||
}
|
||||
if _, ok := defs["job-one"]; !ok {
|
||||
t.Error("job-one not found")
|
||||
}
|
||||
if _, ok := defs["job-two"]; !ok {
|
||||
t.Error("job-two not found")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadAllEmptyDirectory(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
defs, err := LoadAll(dir)
|
||||
if err != nil {
|
||||
t.Fatalf("LoadAll: %v", err)
|
||||
}
|
||||
if len(defs) != 0 {
|
||||
t.Errorf("loaded %d definitions from empty dir, want 0", len(defs))
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadWithFilter(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
content := `
|
||||
job:
|
||||
name: filtered-job
|
||||
trigger:
|
||||
type: revision_created
|
||||
filter:
|
||||
item_type: assembly
|
||||
scope:
|
||||
type: assembly
|
||||
compute:
|
||||
type: validate
|
||||
command: create-validate
|
||||
`
|
||||
path := filepath.Join(dir, "filtered.yaml")
|
||||
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
|
||||
t.Fatalf("writing test file: %v", err)
|
||||
}
|
||||
|
||||
def, err := Load(path)
|
||||
if err != nil {
|
||||
t.Fatalf("Load: %v", err)
|
||||
}
|
||||
|
||||
if def.Trigger.Filter["item_type"] != "assembly" {
|
||||
t.Errorf("filter item_type = %q, want %q", def.Trigger.Filter["item_type"], "assembly")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadWithArgs(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
content := `
|
||||
job:
|
||||
name: args-job
|
||||
trigger:
|
||||
type: manual
|
||||
scope:
|
||||
type: item
|
||||
compute:
|
||||
type: export
|
||||
command: create-export
|
||||
args:
|
||||
format: step
|
||||
include_mesh: true
|
||||
`
|
||||
path := filepath.Join(dir, "args.yaml")
|
||||
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
|
||||
t.Fatalf("writing test file: %v", err)
|
||||
}
|
||||
|
||||
def, err := Load(path)
|
||||
if err != nil {
|
||||
t.Fatalf("Load: %v", err)
|
||||
}
|
||||
|
||||
if def.Compute.Args["format"] != "step" {
|
||||
t.Errorf("args format = %v, want %q", def.Compute.Args["format"], "step")
|
||||
}
|
||||
if def.Compute.Args["include_mesh"] != true {
|
||||
t.Errorf("args include_mesh = %v, want true", def.Compute.Args["include_mesh"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateInvalidScopeType(t *testing.T) {
|
||||
d := &Definition{
|
||||
Name: "test",
|
||||
Trigger: TriggerConfig{Type: "manual"},
|
||||
Scope: ScopeConfig{Type: "galaxy"},
|
||||
Compute: ComputeConfig{Type: "validate", Command: "create-validate"},
|
||||
}
|
||||
if err := d.Validate(); err == nil {
|
||||
t.Fatal("expected error for invalid scope type")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateInvalidComputeType(t *testing.T) {
|
||||
d := &Definition{
|
||||
Name: "test",
|
||||
Trigger: TriggerConfig{Type: "manual"},
|
||||
Scope: ScopeConfig{Type: "item"},
|
||||
Compute: ComputeConfig{Type: "teleport", Command: "beam-up"},
|
||||
}
|
||||
if err := d.Validate(); err == nil {
|
||||
t.Fatal("expected error for invalid compute type")
|
||||
}
|
||||
}
|
||||
@@ -80,8 +80,6 @@ func TruncateAll(t *testing.T, pool *pgxpool.Pool) {
|
||||
|
||||
_, err := pool.Exec(context.Background(), `
|
||||
TRUNCATE
|
||||
job_log, jobs, job_definitions, runners,
|
||||
dag_cross_edges, dag_edges, dag_nodes,
|
||||
audit_log, sync_log, api_tokens, sessions, item_files,
|
||||
item_projects, relationships, revisions, inventory, items,
|
||||
projects, sequences_by_name, users, property_migrations
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
job:
|
||||
name: assembly-validate
|
||||
version: 1
|
||||
description: "Validate assembly by rebuilding its dependency subgraph"
|
||||
|
||||
trigger:
|
||||
type: revision_created
|
||||
filter:
|
||||
item_type: assembly
|
||||
|
||||
scope:
|
||||
type: assembly
|
||||
|
||||
compute:
|
||||
type: validate
|
||||
command: create-validate
|
||||
args:
|
||||
rebuild_mode: incremental
|
||||
check_interference: true
|
||||
|
||||
runner:
|
||||
tags: [create]
|
||||
|
||||
timeout: 900
|
||||
max_retries: 2
|
||||
priority: 50
|
||||
@@ -1,24 +0,0 @@
|
||||
job:
|
||||
name: part-export-step
|
||||
version: 1
|
||||
description: "Export a part to STEP format"
|
||||
|
||||
trigger:
|
||||
type: manual
|
||||
|
||||
scope:
|
||||
type: item
|
||||
|
||||
compute:
|
||||
type: export
|
||||
command: create-export
|
||||
args:
|
||||
format: step
|
||||
output_key_template: "exports/{part_number}_rev{revision}.step"
|
||||
|
||||
runner:
|
||||
tags: [create]
|
||||
|
||||
timeout: 300
|
||||
max_retries: 1
|
||||
priority: 100
|
||||
@@ -1,67 +0,0 @@
|
||||
-- Dependency DAG: feature-level nodes and edges within items.
|
||||
-- Migration: 014_dag_nodes_edges
|
||||
-- Date: 2026-02
|
||||
|
||||
BEGIN;
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- DAG Nodes (feature-level nodes within an item's revision)
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
CREATE TABLE dag_nodes (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
item_id UUID NOT NULL REFERENCES items(id) ON DELETE CASCADE,
|
||||
revision_number INTEGER NOT NULL,
|
||||
node_key TEXT NOT NULL,
|
||||
node_type TEXT NOT NULL,
|
||||
properties_hash TEXT,
|
||||
validation_state TEXT NOT NULL DEFAULT 'clean',
|
||||
validation_msg TEXT,
|
||||
metadata JSONB DEFAULT '{}',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
||||
UNIQUE(item_id, revision_number, node_key)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_dag_nodes_item ON dag_nodes(item_id);
|
||||
CREATE INDEX idx_dag_nodes_item_rev ON dag_nodes(item_id, revision_number);
|
||||
CREATE INDEX idx_dag_nodes_state ON dag_nodes(validation_state)
|
||||
WHERE validation_state != 'clean';
|
||||
CREATE INDEX idx_dag_nodes_type ON dag_nodes(node_type);
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- DAG Edges (dependencies between nodes within a single item)
|
||||
-- Direction: source → target means "target depends on source"
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
CREATE TABLE dag_edges (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
source_node_id UUID NOT NULL REFERENCES dag_nodes(id) ON DELETE CASCADE,
|
||||
target_node_id UUID NOT NULL REFERENCES dag_nodes(id) ON DELETE CASCADE,
|
||||
edge_type TEXT NOT NULL DEFAULT 'depends_on',
|
||||
metadata JSONB DEFAULT '{}',
|
||||
UNIQUE(source_node_id, target_node_id, edge_type),
|
||||
CONSTRAINT no_self_edge CHECK (source_node_id != target_node_id)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_dag_edges_source ON dag_edges(source_node_id);
|
||||
CREATE INDEX idx_dag_edges_target ON dag_edges(target_node_id);
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- Cross-item DAG edges (linking feature nodes across BOM boundaries)
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
CREATE TABLE dag_cross_edges (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
source_node_id UUID NOT NULL REFERENCES dag_nodes(id) ON DELETE CASCADE,
|
||||
target_node_id UUID NOT NULL REFERENCES dag_nodes(id) ON DELETE CASCADE,
|
||||
relationship_id UUID REFERENCES relationships(id) ON DELETE SET NULL,
|
||||
edge_type TEXT NOT NULL DEFAULT 'assembly_ref',
|
||||
metadata JSONB DEFAULT '{}',
|
||||
UNIQUE(source_node_id, target_node_id)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_dag_cross_source ON dag_cross_edges(source_node_id);
|
||||
CREATE INDEX idx_dag_cross_target ON dag_cross_edges(target_node_id);
|
||||
|
||||
COMMIT;
|
||||
@@ -1,109 +0,0 @@
|
||||
-- Worker system: runners, job definitions, jobs, and job log.
|
||||
-- Migration: 015_jobs_runners
|
||||
-- Date: 2026-02
|
||||
|
||||
BEGIN;
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- Runners (registered compute workers)
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
CREATE TABLE runners (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
name TEXT UNIQUE NOT NULL,
|
||||
token_hash TEXT NOT NULL,
|
||||
token_prefix TEXT NOT NULL,
|
||||
tags TEXT[] NOT NULL DEFAULT '{}',
|
||||
status TEXT NOT NULL DEFAULT 'offline',
|
||||
last_heartbeat TIMESTAMPTZ,
|
||||
last_job_id UUID,
|
||||
metadata JSONB DEFAULT '{}',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT now()
|
||||
);
|
||||
|
||||
CREATE INDEX idx_runners_status ON runners(status);
|
||||
CREATE INDEX idx_runners_token ON runners(token_hash);
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- Job Definitions (parsed from YAML, stored for reference and FK)
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
CREATE TABLE job_definitions (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
name TEXT UNIQUE NOT NULL,
|
||||
version INTEGER NOT NULL DEFAULT 1,
|
||||
trigger_type TEXT NOT NULL,
|
||||
scope_type TEXT NOT NULL,
|
||||
compute_type TEXT NOT NULL,
|
||||
runner_tags TEXT[] NOT NULL DEFAULT '{}',
|
||||
timeout_seconds INTEGER NOT NULL DEFAULT 600,
|
||||
max_retries INTEGER NOT NULL DEFAULT 1,
|
||||
priority INTEGER NOT NULL DEFAULT 100,
|
||||
definition JSONB NOT NULL,
|
||||
enabled BOOLEAN NOT NULL DEFAULT true,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT now()
|
||||
);
|
||||
|
||||
CREATE INDEX idx_job_defs_trigger ON job_definitions(trigger_type);
|
||||
CREATE INDEX idx_job_defs_enabled ON job_definitions(enabled) WHERE enabled = true;
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- Jobs (individual compute job instances)
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
CREATE TYPE job_status AS ENUM (
|
||||
'pending', 'claimed', 'running', 'completed', 'failed', 'cancelled'
|
||||
);
|
||||
|
||||
CREATE TABLE jobs (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
job_definition_id UUID REFERENCES job_definitions(id) ON DELETE SET NULL,
|
||||
definition_name TEXT NOT NULL,
|
||||
status job_status NOT NULL DEFAULT 'pending',
|
||||
priority INTEGER NOT NULL DEFAULT 100,
|
||||
item_id UUID REFERENCES items(id) ON DELETE CASCADE,
|
||||
project_id UUID REFERENCES projects(id) ON DELETE SET NULL,
|
||||
scope_metadata JSONB DEFAULT '{}',
|
||||
runner_id UUID REFERENCES runners(id) ON DELETE SET NULL,
|
||||
runner_tags TEXT[] NOT NULL DEFAULT '{}',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
||||
claimed_at TIMESTAMPTZ,
|
||||
started_at TIMESTAMPTZ,
|
||||
completed_at TIMESTAMPTZ,
|
||||
timeout_seconds INTEGER NOT NULL DEFAULT 600,
|
||||
expires_at TIMESTAMPTZ,
|
||||
progress INTEGER DEFAULT 0,
|
||||
progress_message TEXT,
|
||||
result JSONB,
|
||||
error_message TEXT,
|
||||
retry_count INTEGER NOT NULL DEFAULT 0,
|
||||
max_retries INTEGER NOT NULL DEFAULT 1,
|
||||
created_by TEXT,
|
||||
cancelled_by TEXT
|
||||
);
|
||||
|
||||
CREATE INDEX idx_jobs_status ON jobs(status);
|
||||
CREATE INDEX idx_jobs_pending ON jobs(status, priority, created_at)
|
||||
WHERE status = 'pending';
|
||||
CREATE INDEX idx_jobs_item ON jobs(item_id);
|
||||
CREATE INDEX idx_jobs_runner ON jobs(runner_id);
|
||||
CREATE INDEX idx_jobs_definition ON jobs(job_definition_id);
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- Job Log (append-only progress entries)
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
CREATE TABLE job_log (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
job_id UUID NOT NULL REFERENCES jobs(id) ON DELETE CASCADE,
|
||||
timestamp TIMESTAMPTZ NOT NULL DEFAULT now(),
|
||||
level TEXT NOT NULL DEFAULT 'info',
|
||||
message TEXT NOT NULL,
|
||||
metadata JSONB DEFAULT '{}'
|
||||
);
|
||||
|
||||
CREATE INDEX idx_job_log_job ON job_log(job_id, timestamp);
|
||||
|
||||
COMMIT;
|
||||
@@ -100,7 +100,7 @@ export function AppShell() {
|
||||
<span
|
||||
style={{
|
||||
display: "inline-block",
|
||||
padding: "0.25rem 0.5rem",
|
||||
padding: "0.15rem 0.5rem",
|
||||
borderRadius: "1rem",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 600,
|
||||
@@ -115,7 +115,7 @@ export function AppShell() {
|
||||
style={{
|
||||
padding: "0.25rem 0.5rem",
|
||||
fontSize: "var(--font-sm)",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
cursor: "pointer",
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
background: "var(--ctp-surface0)",
|
||||
@@ -129,9 +129,9 @@ export function AppShell() {
|
||||
<button
|
||||
onClick={logout}
|
||||
style={{
|
||||
padding: "0.25rem 0.75rem",
|
||||
padding: "0.35rem 0.75rem",
|
||||
fontSize: "var(--font-table)",
|
||||
borderRadius: "0.5rem",
|
||||
borderRadius: "0.4rem",
|
||||
cursor: "pointer",
|
||||
border: "none",
|
||||
background: "var(--ctp-surface1)",
|
||||
|
||||
@@ -80,7 +80,7 @@ export function ContextMenu({ x, y, items, onClose }: ContextMenuProps) {
|
||||
alignItems: "center",
|
||||
gap: "0.5rem",
|
||||
width: "100%",
|
||||
padding: "0.25rem 0.75rem",
|
||||
padding: "0.35rem 0.75rem",
|
||||
background: "none",
|
||||
border: "none",
|
||||
color: item.disabled ? "var(--ctp-overlay0)" : "var(--ctp-text)",
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import type { ReactNode } from "react";
|
||||
import type { ReactNode } from 'react';
|
||||
|
||||
interface PageFooterProps {
|
||||
stats?: ReactNode;
|
||||
@@ -8,40 +8,32 @@ interface PageFooterProps {
|
||||
onPageChange?: (page: number) => void;
|
||||
}
|
||||
|
||||
export function PageFooter({
|
||||
stats,
|
||||
page,
|
||||
pageSize,
|
||||
itemCount,
|
||||
onPageChange,
|
||||
}: PageFooterProps) {
|
||||
export function PageFooter({ stats, page, pageSize, itemCount, onPageChange }: PageFooterProps) {
|
||||
const hasPagination = page !== undefined && onPageChange !== undefined;
|
||||
|
||||
return (
|
||||
<div
|
||||
style={{
|
||||
position: "fixed",
|
||||
bottom: 0,
|
||||
left: 0,
|
||||
right: 0,
|
||||
height: "var(--d-footer-h)",
|
||||
backgroundColor: "var(--ctp-surface0)",
|
||||
borderTop: "1px solid var(--ctp-surface1)",
|
||||
display: "flex",
|
||||
alignItems: "center",
|
||||
justifyContent: "space-between",
|
||||
padding: "0 var(--d-footer-px)",
|
||||
fontSize: "var(--d-footer-font)",
|
||||
color: "var(--ctp-subtext0)",
|
||||
zIndex: 100,
|
||||
}}
|
||||
>
|
||||
<div style={{ display: "flex", gap: "1.5rem", alignItems: "center" }}>
|
||||
<div style={{
|
||||
position: 'fixed',
|
||||
bottom: 0,
|
||||
left: 0,
|
||||
right: 0,
|
||||
height: 'var(--d-footer-h)',
|
||||
backgroundColor: 'var(--ctp-surface0)',
|
||||
borderTop: '1px solid var(--ctp-surface1)',
|
||||
display: 'flex',
|
||||
alignItems: 'center',
|
||||
justifyContent: 'space-between',
|
||||
padding: '0 var(--d-footer-px)',
|
||||
fontSize: 'var(--d-footer-font)',
|
||||
color: 'var(--ctp-subtext0)',
|
||||
zIndex: 100,
|
||||
}}>
|
||||
<div style={{ display: 'flex', gap: '1.5rem', alignItems: 'center' }}>
|
||||
{stats}
|
||||
</div>
|
||||
|
||||
{hasPagination && (
|
||||
<div style={{ display: "flex", gap: "0.5rem", alignItems: "center" }}>
|
||||
<div style={{ display: 'flex', gap: '0.5rem', alignItems: 'center' }}>
|
||||
<button
|
||||
onClick={() => onPageChange(Math.max(1, page - 1))}
|
||||
disabled={page <= 1}
|
||||
@@ -55,11 +47,7 @@ export function PageFooter({
|
||||
</span>
|
||||
<button
|
||||
onClick={() => onPageChange(page + 1)}
|
||||
disabled={
|
||||
pageSize !== undefined &&
|
||||
itemCount !== undefined &&
|
||||
itemCount < pageSize
|
||||
}
|
||||
disabled={pageSize !== undefined && itemCount !== undefined && itemCount < pageSize}
|
||||
style={pageBtnStyle}
|
||||
>
|
||||
Next
|
||||
@@ -71,11 +59,11 @@ export function PageFooter({
|
||||
}
|
||||
|
||||
const pageBtnStyle: React.CSSProperties = {
|
||||
padding: "0.25rem 0.5rem",
|
||||
fontSize: "inherit",
|
||||
border: "none",
|
||||
borderRadius: "0.25rem",
|
||||
backgroundColor: "var(--ctp-surface1)",
|
||||
color: "var(--ctp-text)",
|
||||
cursor: "pointer",
|
||||
padding: '0.15rem 0.4rem',
|
||||
fontSize: 'inherit',
|
||||
border: 'none',
|
||||
borderRadius: '0.25rem',
|
||||
backgroundColor: 'var(--ctp-surface1)',
|
||||
color: 'var(--ctp-text)',
|
||||
cursor: 'pointer',
|
||||
};
|
||||
|
||||
@@ -124,7 +124,7 @@ export function TagInput({
|
||||
padding: "0.25rem 0.5rem",
|
||||
backgroundColor: "var(--ctp-base)",
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
cursor: "text",
|
||||
minHeight: "1.8rem",
|
||||
}}
|
||||
@@ -137,7 +137,7 @@ export function TagInput({
|
||||
display: "inline-flex",
|
||||
alignItems: "center",
|
||||
gap: "0.25rem",
|
||||
padding: "0.25rem 0.5rem",
|
||||
padding: "0.15rem 0.5rem",
|
||||
borderRadius: "1rem",
|
||||
backgroundColor: "rgba(203,166,247,0.15)",
|
||||
color: "var(--ctp-mauve)",
|
||||
@@ -187,7 +187,7 @@ export function TagInput({
|
||||
background: "transparent",
|
||||
color: "var(--ctp-text)",
|
||||
fontSize: "var(--font-body)",
|
||||
padding: "0.25rem 0",
|
||||
padding: "0.15rem 0",
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
@@ -202,7 +202,7 @@ export function TagInput({
|
||||
marginTop: "0.25rem",
|
||||
backgroundColor: "var(--ctp-surface0)",
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
maxHeight: "160px",
|
||||
overflowY: "auto",
|
||||
}}
|
||||
|
||||
@@ -218,7 +218,7 @@ export function AuditDetailPanel({
|
||||
<span
|
||||
style={{
|
||||
display: "inline-block",
|
||||
padding: "0.25rem 0.5rem",
|
||||
padding: "0.15rem 0.5rem",
|
||||
borderRadius: "1rem",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 600,
|
||||
@@ -477,10 +477,10 @@ function FieldRow({
|
||||
placeholder="---"
|
||||
style={{
|
||||
flex: 1,
|
||||
padding: "0.25rem 0.5rem",
|
||||
padding: "0.25rem 0.4rem",
|
||||
fontSize: "var(--font-table)",
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
backgroundColor: "var(--ctp-surface0)",
|
||||
color: "var(--ctp-text)",
|
||||
outline: "none",
|
||||
@@ -495,7 +495,7 @@ const closeBtnStyle: React.CSSProperties = {
|
||||
padding: "0.25rem 0.5rem",
|
||||
fontSize: "var(--font-table)",
|
||||
border: "none",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
backgroundColor: "var(--ctp-surface1)",
|
||||
color: "var(--ctp-subtext1)",
|
||||
cursor: "pointer",
|
||||
|
||||
@@ -70,7 +70,7 @@ export function AuditSummaryBar({
|
||||
style={{
|
||||
display: "flex",
|
||||
gap: "1.5rem",
|
||||
marginTop: "0.5rem",
|
||||
marginTop: "0.4rem",
|
||||
fontSize: "var(--font-table)",
|
||||
color: "var(--ctp-subtext0)",
|
||||
}}
|
||||
|
||||
@@ -103,7 +103,7 @@ export function AuditTable({
|
||||
<span
|
||||
style={{
|
||||
display: "inline-block",
|
||||
padding: "0.25rem 0.5rem",
|
||||
padding: "0.15rem 0.5rem",
|
||||
borderRadius: "1rem",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 600,
|
||||
|
||||
@@ -97,7 +97,7 @@ export function AuditToolbar({
|
||||
const selectStyle: React.CSSProperties = {
|
||||
padding: "var(--d-input-py) var(--d-input-px)",
|
||||
fontSize: "var(--d-input-font)",
|
||||
borderRadius: "0.5rem",
|
||||
borderRadius: "0.4rem",
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
backgroundColor: "var(--ctp-surface0)",
|
||||
color: "var(--ctp-text)",
|
||||
@@ -106,7 +106,7 @@ const selectStyle: React.CSSProperties = {
|
||||
const btnStyle: React.CSSProperties = {
|
||||
padding: "var(--d-input-py) var(--d-input-px)",
|
||||
fontSize: "var(--d-input-font)",
|
||||
borderRadius: "0.5rem",
|
||||
borderRadius: "0.4rem",
|
||||
border: "none",
|
||||
backgroundColor: "var(--ctp-surface1)",
|
||||
color: "var(--ctp-subtext1)",
|
||||
|
||||
@@ -118,11 +118,11 @@ export function BOMTab({ partNumber, isEditor }: BOMTabProps) {
|
||||
};
|
||||
|
||||
const inputStyle: React.CSSProperties = {
|
||||
padding: "0.25rem 0.5rem",
|
||||
padding: "0.25rem 0.4rem",
|
||||
fontSize: "var(--font-table)",
|
||||
backgroundColor: "var(--ctp-base)",
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
color: "var(--ctp-text)",
|
||||
width: "100%",
|
||||
};
|
||||
@@ -240,7 +240,7 @@ export function BOMTab({ partNumber, isEditor }: BOMTabProps) {
|
||||
...toolBtnStyle,
|
||||
display: "inline-flex",
|
||||
alignItems: "center",
|
||||
gap: "0.25rem",
|
||||
gap: "0.35rem",
|
||||
}}
|
||||
>
|
||||
<Download size={14} /> Export CSV
|
||||
@@ -256,7 +256,7 @@ export function BOMTab({ partNumber, isEditor }: BOMTabProps) {
|
||||
...toolBtnStyle,
|
||||
display: "inline-flex",
|
||||
alignItems: "center",
|
||||
gap: "0.25rem",
|
||||
gap: "0.35rem",
|
||||
}}
|
||||
>
|
||||
<Plus size={14} /> Add
|
||||
@@ -267,9 +267,9 @@ export function BOMTab({ partNumber, isEditor }: BOMTabProps) {
|
||||
{isEditor && assemblyCount > 0 && (
|
||||
<div
|
||||
style={{
|
||||
padding: "0.25rem 0.5rem",
|
||||
padding: "0.35rem 0.5rem",
|
||||
marginBottom: "0.5rem",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
backgroundColor: "rgba(148,226,213,0.1)",
|
||||
border: "1px solid rgba(148,226,213,0.3)",
|
||||
fontSize: "0.75rem",
|
||||
@@ -438,7 +438,7 @@ const toolBtnStyle: React.CSSProperties = {
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
border: "none",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
backgroundColor: "var(--ctp-surface1)",
|
||||
color: "var(--ctp-text)",
|
||||
cursor: "pointer",
|
||||
@@ -451,16 +451,16 @@ const actionBtnStyle: React.CSSProperties = {
|
||||
cursor: "pointer",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
padding: "0.25rem 0.25rem",
|
||||
borderRadius: "0.25rem",
|
||||
padding: "0.15rem 0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
};
|
||||
|
||||
const saveBtnStyle: React.CSSProperties = {
|
||||
padding: "0.25rem 0.5rem",
|
||||
padding: "0.25rem 0.4rem",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
border: "none",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
backgroundColor: "var(--ctp-green)",
|
||||
color: "var(--ctp-crust)",
|
||||
cursor: "pointer",
|
||||
@@ -468,7 +468,7 @@ const saveBtnStyle: React.CSSProperties = {
|
||||
};
|
||||
|
||||
const sourceBadgeBase: React.CSSProperties = {
|
||||
padding: "0.25rem 0.5rem",
|
||||
padding: "0.15rem 0.4rem",
|
||||
borderRadius: "1rem",
|
||||
fontSize: "var(--font-sm)",
|
||||
fontWeight: 500,
|
||||
@@ -487,11 +487,11 @@ const manualBadge: React.CSSProperties = {
|
||||
};
|
||||
|
||||
const cancelBtnStyle: React.CSSProperties = {
|
||||
padding: "0.25rem 0.5rem",
|
||||
padding: "0.25rem 0.4rem",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
border: "none",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
backgroundColor: "var(--ctp-surface1)",
|
||||
color: "var(--ctp-subtext1)",
|
||||
cursor: "pointer",
|
||||
|
||||
@@ -62,7 +62,7 @@ export function CategoryPicker({
|
||||
<div
|
||||
style={{
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
borderRadius: "0.5rem",
|
||||
borderRadius: "0.4rem",
|
||||
backgroundColor: "var(--ctp-base)",
|
||||
overflow: "hidden",
|
||||
}}
|
||||
@@ -74,7 +74,7 @@ export function CategoryPicker({
|
||||
display: "flex",
|
||||
flexWrap: "wrap",
|
||||
gap: "0.25rem",
|
||||
padding: "0.5rem 0.5rem",
|
||||
padding: "0.4rem 0.5rem",
|
||||
borderBottom: "1px solid var(--ctp-surface1)",
|
||||
backgroundColor: "var(--ctp-mantle)",
|
||||
}}
|
||||
@@ -99,7 +99,7 @@ export function CategoryPicker({
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
border: "none",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
cursor: "pointer",
|
||||
backgroundColor: isActive
|
||||
? "rgba(203,166,247,0.2)"
|
||||
@@ -133,7 +133,7 @@ export function CategoryPicker({
|
||||
disabled={isMultiStage && !selectedDomain}
|
||||
style={{
|
||||
width: "100%",
|
||||
padding: "0.5rem 0.5rem",
|
||||
padding: "0.4rem 0.5rem",
|
||||
fontSize: "var(--font-table)",
|
||||
border: "none",
|
||||
borderBottom: "1px solid var(--ctp-surface1)",
|
||||
|
||||
@@ -382,7 +382,7 @@ export function CreateItemPane({ onCreated, onCancel }: CreateItemPaneProps) {
|
||||
onClick={handleThumbnailSelect}
|
||||
style={{
|
||||
aspectRatio: "4/3",
|
||||
borderRadius: "0.5rem",
|
||||
borderRadius: "0.4rem",
|
||||
border: "1px dashed var(--ctp-surface1)",
|
||||
display: "flex",
|
||||
alignItems: "center",
|
||||
@@ -619,7 +619,7 @@ function SidebarSection({
|
||||
textTransform: "uppercase",
|
||||
letterSpacing: "0.05em",
|
||||
color: "var(--ctp-subtext0)",
|
||||
marginBottom: "0.5rem",
|
||||
marginBottom: "0.4rem",
|
||||
}}
|
||||
>
|
||||
{title}
|
||||
@@ -636,7 +636,7 @@ function MetaRow({ label, value }: { label: string; value: string }) {
|
||||
display: "flex",
|
||||
justifyContent: "space-between",
|
||||
fontSize: "var(--font-table)",
|
||||
padding: "0.25rem 0",
|
||||
padding: "0.15rem 0",
|
||||
}}
|
||||
>
|
||||
<span style={{ color: "var(--ctp-subtext0)" }}>{label}</span>
|
||||
@@ -686,7 +686,7 @@ const actionBtnStyle: React.CSSProperties = {
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
border: "none",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
color: "var(--ctp-crust)",
|
||||
cursor: "pointer",
|
||||
};
|
||||
@@ -698,17 +698,17 @@ const cancelBtnStyle: React.CSSProperties = {
|
||||
color: "var(--ctp-subtext1)",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
padding: "0.25rem 0.5rem",
|
||||
borderRadius: "0.25rem",
|
||||
padding: "0.25rem 0.4rem",
|
||||
borderRadius: "0.375rem",
|
||||
};
|
||||
|
||||
const inputStyle: React.CSSProperties = {
|
||||
width: "100%",
|
||||
padding: "0.25rem 0.5rem",
|
||||
padding: "0.35rem 0.5rem",
|
||||
fontSize: "var(--font-body)",
|
||||
backgroundColor: "var(--ctp-base)",
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
color: "var(--ctp-text)",
|
||||
boxSizing: "border-box",
|
||||
};
|
||||
@@ -723,7 +723,7 @@ const errorStyle: React.CSSProperties = {
|
||||
color: "var(--ctp-red)",
|
||||
backgroundColor: "rgba(243,139,168,0.1)",
|
||||
padding: "0.5rem",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
marginBottom: "0.5rem",
|
||||
fontSize: "var(--font-body)",
|
||||
};
|
||||
|
||||
@@ -73,7 +73,7 @@ export function DeleteItemPane({
|
||||
color: "var(--ctp-red)",
|
||||
backgroundColor: "rgba(243,139,168,0.1)",
|
||||
padding: "0.5rem 1rem",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
fontSize: "var(--font-body)",
|
||||
width: "100%",
|
||||
textAlign: "center",
|
||||
@@ -125,7 +125,7 @@ export function DeleteItemPane({
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
border: "none",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
backgroundColor: "var(--ctp-surface1)",
|
||||
color: "var(--ctp-text)",
|
||||
cursor: "pointer",
|
||||
@@ -141,7 +141,7 @@ export function DeleteItemPane({
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
border: "none",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
backgroundColor: "var(--ctp-red)",
|
||||
color: "var(--ctp-crust)",
|
||||
cursor: "pointer",
|
||||
@@ -163,6 +163,6 @@ const headerBtnStyle: React.CSSProperties = {
|
||||
color: "var(--ctp-subtext1)",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
padding: "0.25rem 0.5rem",
|
||||
borderRadius: "0.25rem",
|
||||
padding: "0.25rem 0.4rem",
|
||||
borderRadius: "0.375rem",
|
||||
};
|
||||
|
||||
@@ -93,7 +93,7 @@ export function EditItemPane({
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
border: "none",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
backgroundColor: "var(--ctp-blue)",
|
||||
color: "var(--ctp-crust)",
|
||||
cursor: "pointer",
|
||||
@@ -114,7 +114,7 @@ export function EditItemPane({
|
||||
color: "var(--ctp-red)",
|
||||
backgroundColor: "rgba(243,139,168,0.1)",
|
||||
padding: "0.5rem",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
marginBottom: "0.5rem",
|
||||
fontSize: "var(--font-body)",
|
||||
}}
|
||||
@@ -208,11 +208,11 @@ function FormGroup({
|
||||
|
||||
const inputStyle: React.CSSProperties = {
|
||||
width: "100%",
|
||||
padding: "0.25rem 0.5rem",
|
||||
padding: "0.35rem 0.5rem",
|
||||
fontSize: "var(--font-body)",
|
||||
backgroundColor: "var(--ctp-base)",
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
color: "var(--ctp-text)",
|
||||
};
|
||||
|
||||
@@ -223,6 +223,6 @@ const headerBtnStyle: React.CSSProperties = {
|
||||
color: "var(--ctp-subtext1)",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
padding: "0.25rem 0.5rem",
|
||||
borderRadius: "0.25rem",
|
||||
padding: "0.25rem 0.4rem",
|
||||
borderRadius: "0.375rem",
|
||||
};
|
||||
|
||||
@@ -143,8 +143,8 @@ function FileRow({
|
||||
display: "flex",
|
||||
alignItems: "center",
|
||||
gap: "0.5rem",
|
||||
padding: "0.25rem 0.5rem",
|
||||
borderRadius: "0.25rem",
|
||||
padding: "0.25rem 0.4rem",
|
||||
borderRadius: "0.375rem",
|
||||
position: "relative",
|
||||
}}
|
||||
>
|
||||
@@ -153,14 +153,14 @@ function FileRow({
|
||||
style={{
|
||||
width: 28,
|
||||
height: 28,
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
backgroundColor: color,
|
||||
opacity: 0.8,
|
||||
display: "flex",
|
||||
alignItems: "center",
|
||||
justifyContent: "center",
|
||||
fontSize: "var(--font-xs)",
|
||||
fontWeight: 600,
|
||||
fontWeight: 700,
|
||||
color: "var(--ctp-crust)",
|
||||
flexShrink: 0,
|
||||
}}
|
||||
@@ -239,7 +239,7 @@ function FileRow({
|
||||
cursor: "pointer",
|
||||
fontSize: "var(--font-table)",
|
||||
color: hovered ? "var(--ctp-red)" : "var(--ctp-overlay0)",
|
||||
padding: "0 0.25rem",
|
||||
padding: "0 0.2rem",
|
||||
flexShrink: 0,
|
||||
transition: "all 0.15s ease",
|
||||
}}
|
||||
|
||||
@@ -90,7 +90,7 @@ export function ImportItemsPane({
|
||||
color: "var(--ctp-red)",
|
||||
backgroundColor: "rgba(243,139,168,0.1)",
|
||||
padding: "0.5rem",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
marginBottom: "0.5rem",
|
||||
fontSize: "var(--font-body)",
|
||||
}}
|
||||
@@ -164,7 +164,7 @@ export function ImportItemsPane({
|
||||
style={{
|
||||
display: "flex",
|
||||
alignItems: "center",
|
||||
gap: "0.5rem",
|
||||
gap: "0.4rem",
|
||||
fontSize: "var(--font-body)",
|
||||
color: "var(--ctp-subtext1)",
|
||||
marginBottom: "0.75rem",
|
||||
@@ -187,11 +187,11 @@ export function ImportItemsPane({
|
||||
onClick={() => void doImport(true)}
|
||||
disabled={!file || importing}
|
||||
style={{
|
||||
padding: "0.5rem 0.75rem",
|
||||
padding: "0.4rem 0.75rem",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
border: "none",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
backgroundColor: "var(--ctp-yellow)",
|
||||
color: "var(--ctp-crust)",
|
||||
cursor: "pointer",
|
||||
@@ -205,11 +205,11 @@ export function ImportItemsPane({
|
||||
onClick={() => void doImport(false)}
|
||||
disabled={importing || (result?.error_count ?? 0) > 0}
|
||||
style={{
|
||||
padding: "0.5rem 0.75rem",
|
||||
padding: "0.4rem 0.75rem",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
border: "none",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
backgroundColor: "var(--ctp-green)",
|
||||
color: "var(--ctp-crust)",
|
||||
cursor: "pointer",
|
||||
@@ -227,7 +227,7 @@ export function ImportItemsPane({
|
||||
style={{
|
||||
padding: "0.5rem",
|
||||
backgroundColor: "var(--ctp-surface0)",
|
||||
borderRadius: "0.5rem",
|
||||
borderRadius: "0.4rem",
|
||||
fontSize: "var(--font-table)",
|
||||
}}
|
||||
>
|
||||
@@ -262,7 +262,7 @@ export function ImportItemsPane({
|
||||
style={{
|
||||
color: "var(--ctp-red)",
|
||||
fontSize: "0.75rem",
|
||||
padding: "0.25rem 0",
|
||||
padding: "0.15rem 0",
|
||||
}}
|
||||
>
|
||||
Row {err.row}
|
||||
@@ -296,6 +296,6 @@ const headerBtnStyle: React.CSSProperties = {
|
||||
color: "var(--ctp-subtext1)",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
padding: "0.25rem 0.5rem",
|
||||
borderRadius: "0.25rem",
|
||||
padding: "0.25rem 0.4rem",
|
||||
borderRadius: "0.375rem",
|
||||
};
|
||||
|
||||
@@ -103,7 +103,7 @@ export function ItemDetail({
|
||||
</span>
|
||||
<span
|
||||
style={{
|
||||
padding: "0.25rem 0.5rem",
|
||||
padding: "0.15rem 0.5rem",
|
||||
borderRadius: "1rem",
|
||||
fontSize: "var(--font-sm)",
|
||||
fontWeight: 500,
|
||||
@@ -157,7 +157,7 @@ export function ItemDetail({
|
||||
key={tab.key}
|
||||
onClick={() => setActiveTab(tab.key)}
|
||||
style={{
|
||||
padding: "0.5rem 0.75rem",
|
||||
padding: "0.4rem 0.75rem",
|
||||
fontSize: "var(--font-table)",
|
||||
border: "none",
|
||||
borderBottom:
|
||||
@@ -205,5 +205,5 @@ const headerBtnStyle: React.CSSProperties = {
|
||||
cursor: "pointer",
|
||||
color: "var(--ctp-subtext1)",
|
||||
fontSize: "var(--font-table)",
|
||||
padding: "0.25rem 0.5rem",
|
||||
padding: "0.25rem 0.4rem",
|
||||
};
|
||||
|
||||
@@ -268,7 +268,7 @@ export function ItemTable({
|
||||
<td key={col.key} style={tdStyle}>
|
||||
<span
|
||||
style={{
|
||||
padding: "0.25rem 0.5rem",
|
||||
padding: "0.15rem 0.5rem",
|
||||
borderRadius: "1rem",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
@@ -398,6 +398,6 @@ const actionBtnStyle: React.CSSProperties = {
|
||||
cursor: "pointer",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
padding: "0.25rem 0.5rem",
|
||||
borderRadius: "0.25rem",
|
||||
padding: "0.15rem 0.4rem",
|
||||
borderRadius: "0.375rem",
|
||||
};
|
||||
|
||||
@@ -41,7 +41,7 @@ export function ItemsToolbar({
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
border: "none",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
cursor: "pointer",
|
||||
backgroundColor:
|
||||
filters.searchScope === scope
|
||||
@@ -81,7 +81,7 @@ export function ItemsToolbar({
|
||||
padding: "var(--d-input-py) var(--d-input-px)",
|
||||
backgroundColor: "var(--ctp-surface0)",
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
borderRadius: "0.5rem",
|
||||
borderRadius: "0.4rem",
|
||||
color: "var(--ctp-text)",
|
||||
fontSize: "var(--d-input-font)",
|
||||
}}
|
||||
@@ -144,7 +144,7 @@ export function ItemsToolbar({
|
||||
...toolBtnStyle,
|
||||
display: "inline-flex",
|
||||
alignItems: "center",
|
||||
gap: "0.25rem",
|
||||
gap: "0.35rem",
|
||||
}}
|
||||
title="Export CSV"
|
||||
>
|
||||
@@ -159,7 +159,7 @@ export function ItemsToolbar({
|
||||
...toolBtnStyle,
|
||||
display: "inline-flex",
|
||||
alignItems: "center",
|
||||
gap: "0.25rem",
|
||||
gap: "0.35rem",
|
||||
}}
|
||||
title="Import CSV"
|
||||
>
|
||||
@@ -177,7 +177,7 @@ export function ItemsToolbar({
|
||||
color: "var(--ctp-crust)",
|
||||
display: "inline-flex",
|
||||
alignItems: "center",
|
||||
gap: "0.25rem",
|
||||
gap: "0.35rem",
|
||||
}}
|
||||
>
|
||||
<Plus size={14} /> New
|
||||
@@ -191,7 +191,7 @@ const selectStyle: React.CSSProperties = {
|
||||
padding: "var(--d-input-py) var(--d-input-px)",
|
||||
backgroundColor: "var(--ctp-surface0)",
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
borderRadius: "0.5rem",
|
||||
borderRadius: "0.4rem",
|
||||
color: "var(--ctp-text)",
|
||||
fontSize: "var(--d-input-font)",
|
||||
};
|
||||
@@ -200,7 +200,7 @@ const toolBtnStyle: React.CSSProperties = {
|
||||
padding: "var(--d-input-py) var(--d-input-px)",
|
||||
backgroundColor: "var(--ctp-surface1)",
|
||||
border: "none",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
color: "var(--ctp-text)",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
|
||||
@@ -134,7 +134,7 @@ export function MainTab({ item, onReload, isEditor }: MainTabProps) {
|
||||
marginTop: "0.75rem",
|
||||
padding: "0.5rem",
|
||||
backgroundColor: "var(--ctp-surface0)",
|
||||
borderRadius: "0.5rem",
|
||||
borderRadius: "0.4rem",
|
||||
fontSize: "var(--font-body)",
|
||||
}}
|
||||
>
|
||||
@@ -177,7 +177,7 @@ export function MainTab({ item, onReload, isEditor }: MainTabProps) {
|
||||
display: "inline-flex",
|
||||
alignItems: "center",
|
||||
gap: "0.25rem",
|
||||
padding: "0.25rem 0.5rem",
|
||||
padding: "0.15rem 0.5rem",
|
||||
borderRadius: "1rem",
|
||||
backgroundColor: "rgba(203,166,247,0.15)",
|
||||
color: "var(--ctp-mauve)",
|
||||
@@ -208,11 +208,11 @@ export function MainTab({ item, onReload, isEditor }: MainTabProps) {
|
||||
value={addProject}
|
||||
onChange={(e) => setAddProject(e.target.value)}
|
||||
style={{
|
||||
padding: "0.25rem 0.25rem",
|
||||
padding: "0.15rem 0.25rem",
|
||||
fontSize: "0.75rem",
|
||||
backgroundColor: "var(--ctp-surface0)",
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
color: "var(--ctp-text)",
|
||||
}}
|
||||
>
|
||||
@@ -229,12 +229,12 @@ export function MainTab({ item, onReload, isEditor }: MainTabProps) {
|
||||
<button
|
||||
onClick={() => void handleAddProject()}
|
||||
style={{
|
||||
padding: "0.25rem 0.5rem",
|
||||
padding: "0.15rem 0.4rem",
|
||||
fontSize: "var(--font-sm)",
|
||||
border: "none",
|
||||
backgroundColor: "var(--ctp-mauve)",
|
||||
color: "var(--ctp-crust)",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
cursor: "pointer",
|
||||
}}
|
||||
>
|
||||
@@ -253,7 +253,7 @@ export function MainTab({ item, onReload, isEditor }: MainTabProps) {
|
||||
marginTop: "0.75rem",
|
||||
padding: "0.5rem",
|
||||
backgroundColor: "var(--ctp-surface0)",
|
||||
borderRadius: "0.5rem",
|
||||
borderRadius: "0.4rem",
|
||||
}}
|
||||
>
|
||||
<div
|
||||
@@ -298,7 +298,7 @@ export function MainTab({ item, onReload, isEditor }: MainTabProps) {
|
||||
border: "none",
|
||||
backgroundColor: "var(--ctp-surface1)",
|
||||
color: "var(--ctp-text)",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
cursor: "pointer",
|
||||
}}
|
||||
>
|
||||
|
||||
@@ -125,11 +125,11 @@ export function PropertiesTab({
|
||||
};
|
||||
|
||||
const inputStyle: React.CSSProperties = {
|
||||
padding: "0.25rem 0.5rem",
|
||||
padding: "0.25rem 0.4rem",
|
||||
fontSize: "var(--font-table)",
|
||||
backgroundColor: "var(--ctp-base)",
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
color: "var(--ctp-text)",
|
||||
};
|
||||
|
||||
@@ -165,7 +165,7 @@ export function PropertiesTab({
|
||||
padding: "0.25rem 0.75rem",
|
||||
fontSize: "var(--font-table)",
|
||||
border: "none",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
backgroundColor: "var(--ctp-mauve)",
|
||||
color: "var(--ctp-crust)",
|
||||
cursor: "pointer",
|
||||
@@ -250,7 +250,7 @@ export function PropertiesTab({
|
||||
marginTop: "0.25rem",
|
||||
display: "inline-flex",
|
||||
alignItems: "center",
|
||||
gap: "0.25rem",
|
||||
gap: "0.35rem",
|
||||
}}
|
||||
>
|
||||
<Plus size={14} /> Add Property
|
||||
@@ -274,7 +274,7 @@ export function PropertiesTab({
|
||||
fontSize: "var(--font-table)",
|
||||
backgroundColor: "var(--ctp-base)",
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
borderRadius: "0.5rem",
|
||||
borderRadius: "0.4rem",
|
||||
color: "var(--ctp-text)",
|
||||
resize: "vertical",
|
||||
}}
|
||||
@@ -300,7 +300,7 @@ const tabBtn: React.CSSProperties = {
|
||||
padding: "0.25rem 0.5rem",
|
||||
fontSize: "var(--font-table)",
|
||||
border: "none",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
backgroundColor: "var(--ctp-surface0)",
|
||||
color: "var(--ctp-subtext1)",
|
||||
cursor: "pointer",
|
||||
|
||||
@@ -97,11 +97,11 @@ export function RevisionsTab({ partNumber, isEditor }: RevisionsTabProps) {
|
||||
);
|
||||
|
||||
const selectStyle: React.CSSProperties = {
|
||||
padding: "0.25rem 0.5rem",
|
||||
padding: "0.25rem 0.4rem",
|
||||
fontSize: "var(--font-table)",
|
||||
backgroundColor: "var(--ctp-surface0)",
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
color: "var(--ctp-text)",
|
||||
};
|
||||
|
||||
@@ -147,7 +147,7 @@ export function RevisionsTab({ partNumber, isEditor }: RevisionsTabProps) {
|
||||
padding: "0.25rem 0.5rem",
|
||||
fontSize: "var(--font-table)",
|
||||
border: "none",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
backgroundColor: "var(--ctp-mauve)",
|
||||
color: "var(--ctp-crust)",
|
||||
cursor: "pointer",
|
||||
@@ -164,7 +164,7 @@ export function RevisionsTab({ partNumber, isEditor }: RevisionsTabProps) {
|
||||
style={{
|
||||
padding: "0.5rem",
|
||||
backgroundColor: "var(--ctp-surface0)",
|
||||
borderRadius: "0.5rem",
|
||||
borderRadius: "0.4rem",
|
||||
fontSize: "var(--font-table)",
|
||||
marginBottom: "0.75rem",
|
||||
fontFamily: "'JetBrains Mono', monospace",
|
||||
@@ -250,10 +250,10 @@ export function RevisionsTab({ partNumber, isEditor }: RevisionsTabProps) {
|
||||
)
|
||||
}
|
||||
style={{
|
||||
padding: "0.25rem 0.25rem",
|
||||
padding: "0.15rem 0.25rem",
|
||||
fontSize: "0.75rem",
|
||||
border: "none",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
backgroundColor: "transparent",
|
||||
color: statusColors[rev.status] ?? "var(--ctp-text)",
|
||||
cursor: "pointer",
|
||||
|
||||
@@ -116,7 +116,7 @@ const titleStyle: React.CSSProperties = {
|
||||
color: "var(--ctp-mauve)",
|
||||
textAlign: "center",
|
||||
fontSize: "2rem",
|
||||
fontWeight: 600,
|
||||
fontWeight: 700,
|
||||
marginBottom: "0.25rem",
|
||||
};
|
||||
|
||||
@@ -164,7 +164,7 @@ const btnPrimaryStyle: React.CSSProperties = {
|
||||
display: "block",
|
||||
width: "100%",
|
||||
padding: "0.75rem 1.5rem",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
fontWeight: 500,
|
||||
fontSize: "0.75rem",
|
||||
cursor: "pointer",
|
||||
@@ -189,7 +189,7 @@ const btnOidcStyle: React.CSSProperties = {
|
||||
display: "block",
|
||||
width: "100%",
|
||||
padding: "0.75rem 1.5rem",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
fontWeight: 500,
|
||||
fontSize: "0.75rem",
|
||||
cursor: "pointer",
|
||||
|
||||
@@ -216,7 +216,7 @@ export function ProjectsPage() {
|
||||
...btnPrimaryStyle,
|
||||
display: "inline-flex",
|
||||
alignItems: "center",
|
||||
gap: "0.25rem",
|
||||
gap: "0.35rem",
|
||||
}}
|
||||
>
|
||||
<Plus size={14} /> New Project
|
||||
@@ -465,7 +465,7 @@ export function ProjectsPage() {
|
||||
// Styles
|
||||
const btnPrimaryStyle: React.CSSProperties = {
|
||||
padding: "0.5rem 1rem",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
border: "none",
|
||||
backgroundColor: "var(--ctp-mauve)",
|
||||
color: "var(--ctp-crust)",
|
||||
@@ -476,7 +476,7 @@ const btnPrimaryStyle: React.CSSProperties = {
|
||||
|
||||
const btnSecondaryStyle: React.CSSProperties = {
|
||||
padding: "0.5rem 1rem",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
border: "none",
|
||||
backgroundColor: "var(--ctp-surface1)",
|
||||
color: "var(--ctp-text)",
|
||||
@@ -487,7 +487,7 @@ const btnSecondaryStyle: React.CSSProperties = {
|
||||
|
||||
const btnDangerStyle: React.CSSProperties = {
|
||||
padding: "0.5rem 1rem",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
border: "none",
|
||||
backgroundColor: "var(--ctp-red)",
|
||||
color: "var(--ctp-crust)",
|
||||
@@ -498,7 +498,7 @@ const btnDangerStyle: React.CSSProperties = {
|
||||
|
||||
const btnSmallStyle: React.CSSProperties = {
|
||||
padding: "0.25rem 0.5rem",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
border: "none",
|
||||
backgroundColor: "var(--ctp-surface1)",
|
||||
color: "var(--ctp-text)",
|
||||
@@ -530,7 +530,7 @@ const formCloseStyle: React.CSSProperties = {
|
||||
cursor: "pointer",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
};
|
||||
|
||||
const errorBannerStyle: React.CSSProperties = {
|
||||
@@ -538,7 +538,7 @@ const errorBannerStyle: React.CSSProperties = {
|
||||
background: "rgba(243, 139, 168, 0.1)",
|
||||
border: "1px solid rgba(243, 139, 168, 0.2)",
|
||||
padding: "0.5rem 0.75rem",
|
||||
borderRadius: "0.5rem",
|
||||
borderRadius: "0.4rem",
|
||||
marginBottom: "0.75rem",
|
||||
fontSize: "var(--font-body)",
|
||||
};
|
||||
@@ -549,7 +549,7 @@ const fieldStyle: React.CSSProperties = {
|
||||
|
||||
const labelStyle: React.CSSProperties = {
|
||||
display: "block",
|
||||
marginBottom: "0.25rem",
|
||||
marginBottom: "0.35rem",
|
||||
fontWeight: 500,
|
||||
color: "var(--ctp-subtext1)",
|
||||
fontSize: "var(--font-body)",
|
||||
@@ -560,7 +560,7 @@ const inputStyle: React.CSSProperties = {
|
||||
padding: "0.5rem 0.75rem",
|
||||
backgroundColor: "var(--ctp-base)",
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
borderRadius: "0.5rem",
|
||||
borderRadius: "0.4rem",
|
||||
color: "var(--ctp-text)",
|
||||
fontSize: "var(--font-body)",
|
||||
boxSizing: "border-box",
|
||||
@@ -587,7 +587,7 @@ const thStyle: React.CSSProperties = {
|
||||
};
|
||||
|
||||
const tdStyle: React.CSSProperties = {
|
||||
padding: "0.25rem 0.75rem",
|
||||
padding: "0.35rem 0.75rem",
|
||||
borderBottom: "1px solid var(--ctp-surface1)",
|
||||
fontSize: "var(--font-body)",
|
||||
};
|
||||
|
||||
@@ -657,7 +657,7 @@ function SegmentBlock({
|
||||
marginTop: "0.5rem",
|
||||
display: "inline-flex",
|
||||
alignItems: "center",
|
||||
gap: "0.25rem",
|
||||
gap: "0.35rem",
|
||||
}}
|
||||
>
|
||||
<Plus size={14} /> Add Value
|
||||
@@ -692,7 +692,7 @@ const segmentStyle: React.CSSProperties = {
|
||||
|
||||
const typeBadgeStyle: React.CSSProperties = {
|
||||
display: "inline-block",
|
||||
padding: "0.25rem 0.5rem",
|
||||
padding: "0.15rem 0.5rem",
|
||||
borderRadius: "0.25rem",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 600,
|
||||
@@ -707,7 +707,7 @@ const emptyStyle: React.CSSProperties = {
|
||||
};
|
||||
|
||||
const thStyle: React.CSSProperties = {
|
||||
padding: "0.5rem 0.75rem",
|
||||
padding: "0.4rem 0.75rem",
|
||||
textAlign: "left",
|
||||
borderBottom: "1px solid var(--ctp-surface1)",
|
||||
color: "var(--ctp-overlay1)",
|
||||
@@ -725,7 +725,7 @@ const tdStyle: React.CSSProperties = {
|
||||
|
||||
const btnTinyStyle: React.CSSProperties = {
|
||||
padding: "0.25rem 0.5rem",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
border: "none",
|
||||
backgroundColor: "var(--ctp-surface1)",
|
||||
color: "var(--ctp-text)",
|
||||
@@ -736,7 +736,7 @@ const btnTinyStyle: React.CSSProperties = {
|
||||
|
||||
const btnTinyPrimaryStyle: React.CSSProperties = {
|
||||
padding: "0.25rem 0.5rem",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
border: "none",
|
||||
backgroundColor: "var(--ctp-mauve)",
|
||||
color: "var(--ctp-crust)",
|
||||
|
||||
@@ -114,7 +114,7 @@ export function SettingsPage() {
|
||||
<span
|
||||
style={{
|
||||
display: "inline-block",
|
||||
padding: "0.25rem 0.5rem",
|
||||
padding: "0.15rem 0.5rem",
|
||||
borderRadius: "1rem",
|
||||
fontSize: "var(--font-table)",
|
||||
fontWeight: 600,
|
||||
@@ -386,7 +386,7 @@ const createFormStyle: React.CSSProperties = {
|
||||
|
||||
const labelStyle: React.CSSProperties = {
|
||||
display: "block",
|
||||
marginBottom: "0.25rem",
|
||||
marginBottom: "0.35rem",
|
||||
fontWeight: 500,
|
||||
color: "var(--ctp-subtext1)",
|
||||
fontSize: "var(--font-body)",
|
||||
@@ -397,7 +397,7 @@ const inputStyle: React.CSSProperties = {
|
||||
padding: "0.5rem 0.75rem",
|
||||
backgroundColor: "var(--ctp-base)",
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
borderRadius: "0.5rem",
|
||||
borderRadius: "0.4rem",
|
||||
color: "var(--ctp-text)",
|
||||
fontSize: "var(--font-body)",
|
||||
boxSizing: "border-box",
|
||||
@@ -405,7 +405,7 @@ const inputStyle: React.CSSProperties = {
|
||||
|
||||
const btnPrimaryStyle: React.CSSProperties = {
|
||||
padding: "0.5rem 1rem",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
border: "none",
|
||||
backgroundColor: "var(--ctp-mauve)",
|
||||
color: "var(--ctp-crust)",
|
||||
@@ -416,10 +416,10 @@ const btnPrimaryStyle: React.CSSProperties = {
|
||||
};
|
||||
|
||||
const btnCopyStyle: React.CSSProperties = {
|
||||
padding: "0.5rem 0.75rem",
|
||||
padding: "0.4rem 0.75rem",
|
||||
background: "var(--ctp-surface1)",
|
||||
border: "none",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
color: "var(--ctp-text)",
|
||||
cursor: "pointer",
|
||||
fontSize: "0.75rem",
|
||||
@@ -427,10 +427,10 @@ const btnCopyStyle: React.CSSProperties = {
|
||||
};
|
||||
|
||||
const btnDismissStyle: React.CSSProperties = {
|
||||
padding: "0.5rem 0.75rem",
|
||||
padding: "0.4rem 0.75rem",
|
||||
background: "none",
|
||||
border: "none",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
color: "var(--ctp-subtext0)",
|
||||
cursor: "pointer",
|
||||
fontSize: "0.75rem",
|
||||
@@ -442,7 +442,7 @@ const btnDangerStyle: React.CSSProperties = {
|
||||
color: "var(--ctp-red)",
|
||||
border: "none",
|
||||
padding: "0.25rem 0.5rem",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
cursor: "pointer",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
@@ -453,7 +453,7 @@ const btnRevokeConfirmStyle: React.CSSProperties = {
|
||||
color: "var(--ctp-crust)",
|
||||
border: "none",
|
||||
padding: "0.25rem 0.5rem",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
cursor: "pointer",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
@@ -461,7 +461,7 @@ const btnRevokeConfirmStyle: React.CSSProperties = {
|
||||
|
||||
const btnTinyStyle: React.CSSProperties = {
|
||||
padding: "0.25rem 0.5rem",
|
||||
borderRadius: "0.25rem",
|
||||
borderRadius: "0.375rem",
|
||||
border: "none",
|
||||
backgroundColor: "var(--ctp-surface1)",
|
||||
color: "var(--ctp-text)",
|
||||
@@ -488,7 +488,7 @@ const thStyle: React.CSSProperties = {
|
||||
};
|
||||
|
||||
const tdStyle: React.CSSProperties = {
|
||||
padding: "0.5rem 0.75rem",
|
||||
padding: "0.4rem 0.75rem",
|
||||
borderBottom: "1px solid var(--ctp-surface1)",
|
||||
fontSize: "var(--font-body)",
|
||||
};
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
/* Focus and hover states for form inputs */
|
||||
.silo-input {
|
||||
transition:
|
||||
border-color 0.15s ease,
|
||||
box-shadow 0.15s ease;
|
||||
transition: border-color 0.15s ease, box-shadow 0.15s ease;
|
||||
}
|
||||
|
||||
.silo-input:hover {
|
||||
@@ -11,6 +9,6 @@
|
||||
|
||||
.silo-input:focus {
|
||||
border-color: var(--ctp-mauve);
|
||||
box-shadow: 0 0 0 0.25rem rgba(203, 166, 247, 0.25);
|
||||
box-shadow: 0 0 0 0.2rem rgba(203, 166, 247, 0.25);
|
||||
outline: none;
|
||||
}
|
||||
|
||||
@@ -44,13 +44,13 @@
|
||||
--d-header-px: 2rem;
|
||||
--d-header-logo: 1.25rem;
|
||||
--d-nav-gap: 1rem;
|
||||
--d-nav-py: 0.25rem;
|
||||
--d-nav-py: 0.35rem;
|
||||
--d-nav-px: 0.75rem;
|
||||
--d-nav-radius: 0.5rem;
|
||||
--d-user-gap: 0.5rem;
|
||||
--d-nav-radius: 0.4rem;
|
||||
--d-user-gap: 0.6rem;
|
||||
--d-user-font: var(--font-body);
|
||||
|
||||
--d-th-py: 0.25rem;
|
||||
--d-th-py: 0.35rem;
|
||||
--d-th-px: 0.75rem;
|
||||
--d-th-font: var(--font-table);
|
||||
--d-td-py: 0.25rem;
|
||||
@@ -59,9 +59,9 @@
|
||||
|
||||
--d-toolbar-gap: 0.5rem;
|
||||
--d-toolbar-py: 0.5rem;
|
||||
--d-toolbar-mb: 0.25rem;
|
||||
--d-input-py: 0.25rem;
|
||||
--d-input-px: 0.5rem;
|
||||
--d-toolbar-mb: 0.35rem;
|
||||
--d-input-py: 0.35rem;
|
||||
--d-input-px: 0.6rem;
|
||||
--d-input-font: var(--font-body);
|
||||
|
||||
--d-footer-h: 28px;
|
||||
@@ -71,28 +71,28 @@
|
||||
|
||||
/* ── Density: compact ── */
|
||||
[data-density="compact"] {
|
||||
--d-header-py: 0.25rem;
|
||||
--d-header-py: 0.35rem;
|
||||
--d-header-px: 1.25rem;
|
||||
--d-header-logo: 1.1rem;
|
||||
--d-nav-gap: 0.5rem;
|
||||
--d-nav-py: 0.25rem;
|
||||
--d-nav-py: 0.2rem;
|
||||
--d-nav-px: 0.5rem;
|
||||
--d-nav-radius: 0.25rem;
|
||||
--d-user-gap: 0.25rem;
|
||||
--d-nav-radius: 0.3rem;
|
||||
--d-user-gap: 0.35rem;
|
||||
--d-user-font: var(--font-table);
|
||||
|
||||
--d-th-py: 0.25rem;
|
||||
--d-th-py: 0.2rem;
|
||||
--d-th-px: 0.5rem;
|
||||
--d-th-font: var(--font-sm);
|
||||
--d-td-py: 0.25rem;
|
||||
--d-td-py: 0.125rem;
|
||||
--d-td-px: 0.5rem;
|
||||
--d-td-font: var(--font-table);
|
||||
|
||||
--d-toolbar-gap: 0.25rem;
|
||||
--d-toolbar-gap: 0.35rem;
|
||||
--d-toolbar-py: 0.25rem;
|
||||
--d-toolbar-mb: 0.25rem;
|
||||
--d-input-py: 0.25rem;
|
||||
--d-input-px: 0.5rem;
|
||||
--d-toolbar-mb: 0.15rem;
|
||||
--d-input-py: 0.2rem;
|
||||
--d-input-px: 0.4rem;
|
||||
--d-input-font: var(--font-table);
|
||||
|
||||
--d-footer-h: 24px;
|
||||
|
||||
Reference in New Issue
Block a user