Compare commits
66 Commits
issue-dedu
...
feat-modul
| Author | SHA1 | Date | |
|---|---|---|---|
| a6267ba3d5 | |||
|
|
c98eed0b13 | ||
|
|
ca71153c15 | ||
|
|
6e49fade8b | ||
|
|
138ce16010 | ||
|
|
690ad73161 | ||
|
|
b8abd8859d | ||
|
|
4fd4013360 | ||
|
|
3adc155b14 | ||
|
|
9d8afa5981 | ||
|
|
f91cf2bc6f | ||
| ef44523ae8 | |||
|
|
ba92dd363c | ||
|
|
c7857fdfc9 | ||
| defb3af56f | |||
|
|
6d7a85cfac | ||
|
|
22c778f8b0 | ||
|
|
ad4224aa8f | ||
|
|
b6ac5133c3 | ||
|
|
2732554cd2 | ||
|
|
df073709ce | ||
|
|
0eb891667b | ||
|
|
1952dea00c | ||
|
|
6becfd82d4 | ||
|
|
671a0aeefe | ||
|
|
f60c25983b | ||
|
|
83e0d6821c | ||
|
|
9a8b3150ff | ||
| 376fa3db31 | |||
|
|
257e3d99ac | ||
|
|
384b137148 | ||
|
|
7c838bdf5e | ||
|
|
c9b081b8f8 | ||
| bc1149d4ba | |||
|
|
07c4aa1c28 | ||
|
|
679b730e74 | ||
|
|
b53ce94274 | ||
| 8316ac085c | |||
|
|
d5f1b4e587 | ||
|
|
f4a1c8004b | ||
|
|
a9614e704e | ||
|
|
289d488469 | ||
|
|
2585305590 | ||
| 65063c9ee7 | |||
|
|
1f7960db50 | ||
|
|
648c659e2b | ||
|
|
d4ea6d2739 | ||
| e20da25405 | |||
|
|
30bb3ee56e | ||
| a517a95912 | |||
| 6f1504021c | |||
| d93770c551 | |||
| 606316204d | |||
| 3d9ef9e99e | |||
| fb13795ef7 | |||
| 1c1cd144dc | |||
| 460b0f37fd | |||
| 73195be6a1 | |||
| 127836f7ce | |||
| a258152175 | |||
| efb3ccdfb5 | |||
| a80e99e500 | |||
| 485675b020 | |||
| a49680b274 | |||
| 32bc00caef | |||
| 2b7a9ae73a |
24
.env.example
24
.env.example
@@ -1,12 +1,26 @@
|
||||
# Silo Environment Configuration
|
||||
# Copy this file to .env and update values as needed
|
||||
# Copy to .env (or deployments/.env) and update values as needed.
|
||||
# For automated setup, run: ./scripts/setup-docker.sh
|
||||
|
||||
# PostgreSQL
|
||||
POSTGRES_PASSWORD=silodev
|
||||
|
||||
# MinIO
|
||||
MINIO_ACCESS_KEY=minioadmin
|
||||
MINIO_SECRET_KEY=minioadmin
|
||||
MINIO_ACCESS_KEY=silominio
|
||||
MINIO_SECRET_KEY=silominiosecret
|
||||
|
||||
# Silo API (optional overrides)
|
||||
# SILO_SERVER_PORT=8080
|
||||
# OpenLDAP
|
||||
LDAP_ADMIN_PASSWORD=ldapadmin
|
||||
LDAP_USERS=siloadmin
|
||||
LDAP_PASSWORDS=siloadmin
|
||||
|
||||
# Silo Authentication
|
||||
SILO_SESSION_SECRET=change-me-in-production
|
||||
SILO_ADMIN_USERNAME=admin
|
||||
SILO_ADMIN_PASSWORD=admin
|
||||
|
||||
# Optional: OIDC (Keycloak)
|
||||
# SILO_OIDC_CLIENT_SECRET=
|
||||
|
||||
# Optional: LDAP service account
|
||||
# SILO_LDAP_BIND_PASSWORD=
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -29,6 +29,7 @@ Thumbs.db
|
||||
# Config with secrets
|
||||
config.yaml
|
||||
*.env
|
||||
deployments/config.docker.yaml
|
||||
|
||||
# Python
|
||||
__pycache__/
|
||||
|
||||
3
Makefile
3
Makefile
@@ -11,6 +11,7 @@
|
||||
build: web-build
|
||||
go build -o silo ./cmd/silo
|
||||
go build -o silod ./cmd/silod
|
||||
go build -o silorunner ./cmd/silorunner
|
||||
|
||||
# Run the API server locally
|
||||
run:
|
||||
@@ -30,7 +31,7 @@ test-integration:
|
||||
|
||||
# Clean build artifacts
|
||||
clean:
|
||||
rm -f silo silod
|
||||
rm -f silo silod silorunner
|
||||
rm -f *.out
|
||||
rm -rf web/dist
|
||||
|
||||
|
||||
28
README.md
28
README.md
@@ -25,7 +25,7 @@ silo/
|
||||
│ ├── silo/ # CLI tool
|
||||
│ └── silod/ # API server
|
||||
├── internal/
|
||||
│ ├── api/ # HTTP handlers and routes (75 endpoints)
|
||||
│ ├── api/ # HTTP handlers and routes (78 endpoints)
|
||||
│ ├── auth/ # Authentication (local, LDAP, OIDC)
|
||||
│ ├── config/ # Configuration loading
|
||||
│ ├── db/ # PostgreSQL repositories
|
||||
@@ -53,15 +53,20 @@ silo/
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Docker Compose (quickest)
|
||||
cp config.example.yaml config.yaml
|
||||
# Edit config.yaml with your database, MinIO, and auth settings
|
||||
make docker-up
|
||||
See the **[Installation Guide](docs/INSTALL.md)** for complete setup instructions.
|
||||
|
||||
# Or manual setup
|
||||
psql -h localhost -U silo -d silo -f migrations/*.sql
|
||||
go run ./cmd/silod -config config.yaml
|
||||
**Docker Compose (quickest — includes PostgreSQL, MinIO, OpenLDAP, and Silo):**
|
||||
|
||||
```bash
|
||||
./scripts/setup-docker.sh
|
||||
docker compose -f deployments/docker-compose.allinone.yaml up -d
|
||||
```
|
||||
|
||||
**Development (local Go + Docker services):**
|
||||
|
||||
```bash
|
||||
make docker-up # Start PostgreSQL + MinIO in Docker
|
||||
make run # Run silo locally with Go
|
||||
```
|
||||
|
||||
When auth is enabled, a default admin account is created on first startup using the credentials in `config.yaml` under `auth.local.default_admin_username` and `auth.local.default_admin_password`.
|
||||
@@ -104,15 +109,16 @@ The server provides the REST API and ODS endpoints consumed by these clients.
|
||||
|
||||
| Document | Description |
|
||||
|----------|-------------|
|
||||
| [docs/INSTALL.md](docs/INSTALL.md) | Installation guide (Docker Compose and daemon) |
|
||||
| [docs/SPECIFICATION.md](docs/SPECIFICATION.md) | Full design specification and API reference |
|
||||
| [docs/STATUS.md](docs/STATUS.md) | Implementation status |
|
||||
| [docs/DEPLOYMENT.md](docs/DEPLOYMENT.md) | Production deployment guide |
|
||||
| [docs/DEPLOYMENT.md](docs/DEPLOYMENT.md) | Production deployment and operations guide |
|
||||
| [docs/CONFIGURATION.md](docs/CONFIGURATION.md) | Configuration reference (all `config.yaml` options) |
|
||||
| [docs/AUTH.md](docs/AUTH.md) | Authentication system design |
|
||||
| [docs/AUTH_USER_GUIDE.md](docs/AUTH_USER_GUIDE.md) | User guide for login, tokens, and roles |
|
||||
| [docs/GAP_ANALYSIS.md](docs/GAP_ANALYSIS.md) | Gap analysis and revision control roadmap |
|
||||
| [docs/COMPONENT_AUDIT.md](docs/COMPONENT_AUDIT.md) | Component audit tool design |
|
||||
| [ROADMAP.md](ROADMAP.md) | Feature roadmap and SOLIDWORKS PDM comparison |
|
||||
| [docs/ROADMAP.md](docs/ROADMAP.md) | Platform roadmap, dependency tiers, and gap summary |
|
||||
| [frontend-spec.md](frontend-spec.md) | React SPA frontend specification |
|
||||
|
||||
## License
|
||||
|
||||
536
ROADMAP.md
536
ROADMAP.md
@@ -1,536 +0,0 @@
|
||||
# Silo Roadmap
|
||||
|
||||
**Version:** 1.1
|
||||
**Date:** February 2026
|
||||
**Purpose:** Project inventory, SOLIDWORKS PDM gap analysis, and development roadmap
|
||||
|
||||
---
|
||||
|
||||
## Table of Contents
|
||||
|
||||
1. [Executive Summary](#executive-summary)
|
||||
2. [Current Project Inventory](#current-project-inventory)
|
||||
3. [SOLIDWORKS PDM Gap Analysis](#solidworks-pdm-gap-analysis)
|
||||
4. [Feature Roadmap](#feature-roadmap)
|
||||
5. [Implementation Phases](#implementation-phases)
|
||||
|
||||
---
|
||||
|
||||
## Executive Summary
|
||||
|
||||
Silo is an R&D-oriented item database and part management system. It provides configurable part number generation, revision tracking, BOM management, and file versioning through MinIO storage. CAD integration (FreeCAD workbench, LibreOffice Calc extension) is maintained in separate repositories ([silo-mod](https://git.kindred-systems.com/kindred/silo-mod), [silo-calc](https://git.kindred-systems.com/kindred/silo-calc)).
|
||||
|
||||
This document compares Silo's current capabilities against SOLIDWORKS PDM—the industry-leading product data management solution—to identify gaps and prioritize future development.
|
||||
|
||||
### Key Differentiators
|
||||
|
||||
| Aspect | Silo | SOLIDWORKS PDM |
|
||||
|--------|------|----------------|
|
||||
| **Target CAD** | FreeCAD / Kindred Create (open source) | SOLIDWORKS (proprietary) |
|
||||
| **Part Numbering** | Schema-as-configuration (YAML) | Fixed format with some customization |
|
||||
| **Licensing** | Open source / Kindred Proprietary | Commercial ($3,000-$10,000+ per seat) |
|
||||
| **Storage** | PostgreSQL + MinIO (S3-compatible) | SQL Server + File Archive |
|
||||
| **Philosophy** | R&D-oriented, lightweight | Enterprise-grade, comprehensive |
|
||||
|
||||
---
|
||||
|
||||
## Current Project Inventory
|
||||
|
||||
### Implemented Features (MVP Complete)
|
||||
|
||||
#### Core Database System
|
||||
- PostgreSQL schema with 11 migrations
|
||||
- UUID-based identifiers throughout
|
||||
- Soft delete support via `archived_at` timestamps
|
||||
- Atomic sequence generation for part numbers
|
||||
|
||||
#### Part Number Generation
|
||||
- YAML schema parser with validation
|
||||
- Segment types: `string`, `enum`, `serial`, `constant`
|
||||
- Scope templates for serial counters (e.g., `{category}`, `{project}`)
|
||||
- Format templates for custom output
|
||||
|
||||
#### Item Management
|
||||
- Full CRUD operations for items
|
||||
- Item types: part, assembly, drawing, document, tooling, purchased, electrical, software
|
||||
- Custom properties via JSONB storage
|
||||
- Project tagging with many-to-many relationships
|
||||
|
||||
#### Revision Control
|
||||
- Append-only revision history
|
||||
- Revision metadata: properties, file reference, checksum, comment
|
||||
- Status tracking: draft, review, released, obsolete
|
||||
- Labels/tags per revision
|
||||
- Revision comparison (diff)
|
||||
- Rollback functionality
|
||||
|
||||
#### File Management
|
||||
- MinIO integration with versioning
|
||||
- File upload/download via REST API
|
||||
- SHA256 checksums for integrity
|
||||
- Storage path: `items/{partNumber}/rev{N}.FCStd`
|
||||
|
||||
#### Bill of Materials (BOM)
|
||||
- Relationship types: component, alternate, reference
|
||||
- Multi-level BOM (recursive expansion with configurable depth)
|
||||
- Where-used queries (reverse parent lookup)
|
||||
- BOM CSV and ODS export/import with cycle detection
|
||||
- Reference designators for electronics
|
||||
- Quantity tracking with units
|
||||
- Revision-specific child linking
|
||||
|
||||
#### Project Management
|
||||
- Project CRUD operations
|
||||
- Unique project codes (2-10 characters)
|
||||
- Item-to-project tagging
|
||||
- Project-filtered queries
|
||||
|
||||
#### Data Import/Export
|
||||
- CSV export with configurable properties
|
||||
- CSV import with dry-run validation
|
||||
- ODS spreadsheet import/export (items, BOMs, project sheets)
|
||||
- Template generation for import formatting
|
||||
|
||||
#### API & Web Interface
|
||||
- REST API with 75 endpoints
|
||||
- Authentication: local (bcrypt), LDAP/FreeIPA, OIDC/Keycloak
|
||||
- Role-based access control (admin > editor > viewer)
|
||||
- API token management (SHA-256 hashed)
|
||||
- Session management (PostgreSQL-backed, 24h lifetime)
|
||||
- CSRF protection (nosurf on web forms)
|
||||
- Middleware: logging, CORS, recovery, request ID
|
||||
- Web UI — React SPA (Vite + TypeScript, Catppuccin Mocha theme)
|
||||
- Fuzzy search
|
||||
- Health and readiness probes
|
||||
|
||||
#### Audit & Completeness
|
||||
- Audit logging (database table with user/action/resource tracking)
|
||||
- Item completeness scoring with weighted fields
|
||||
- Category-specific property validation
|
||||
- Tier classification (critical/low/partial/good/complete)
|
||||
|
||||
#### Configuration
|
||||
- YAML configuration with environment variable overrides
|
||||
- Multi-schema support
|
||||
- Docker Compose deployment ready
|
||||
|
||||
### Partially Implemented
|
||||
|
||||
| Feature | Status | Notes |
|
||||
|---------|--------|-------|
|
||||
| Odoo ERP integration | Partial | Config and sync-log CRUD functional; push/pull sync operations are stubs |
|
||||
| Date segment type | Not started | Schema parser placeholder exists |
|
||||
| Part number validation | Not started | API accepts but doesn't validate format |
|
||||
| Location hierarchy CRUD | Schema only | Tables exist, no API endpoints |
|
||||
| Inventory tracking | Schema only | Tables exist, no API endpoints |
|
||||
| Unit tests | Partial | 9 Go test files across api, db, ods, partnum, schema packages |
|
||||
|
||||
### Infrastructure Status
|
||||
|
||||
| Component | Status |
|
||||
|-----------|--------|
|
||||
| PostgreSQL | Running (psql.kindred.internal) |
|
||||
| MinIO | Configured in Docker Compose |
|
||||
| Silo API Server | Builds successfully |
|
||||
| Docker Compose | Complete (dev and production) |
|
||||
| systemd service | Unit file and env template ready |
|
||||
| Deployment scripts | setup-host, deploy, init-db, setup-ipa-nginx |
|
||||
|
||||
---
|
||||
|
||||
## SOLIDWORKS PDM Gap Analysis
|
||||
|
||||
This section compares Silo's capabilities against SOLIDWORKS PDM features. Gaps are categorized by priority and implementation complexity.
|
||||
|
||||
### Legend
|
||||
- **Silo Status:** Full / Partial / None
|
||||
- **Priority:** Critical / High / Medium / Low
|
||||
- **Complexity:** Simple / Moderate / Complex
|
||||
|
||||
---
|
||||
|
||||
### 1. Version Control & Revision Management
|
||||
|
||||
| Feature | SOLIDWORKS PDM | Silo Status | Priority | Complexity |
|
||||
|---------|---------------|-------------|----------|------------|
|
||||
| Check-in/check-out | Full pessimistic locking | None | High | Moderate |
|
||||
| Version history | Complete with branching | Full (linear) | - | - |
|
||||
| Revision labels | A, B, C or custom schemes | Full (custom labels) | - | - |
|
||||
| Rollback/restore | Full | Full | - | - |
|
||||
| Compare revisions | Visual + metadata diff | Metadata diff only | Medium | Complex |
|
||||
| Get Latest Revision | One-click retrieval | Partial (API only) | Medium | Simple |
|
||||
|
||||
**Gap Analysis:**
|
||||
Silo lacks pessimistic locking (check-out), which is critical for multi-user CAD environments where file merging is impractical. Visual diff comparison would require FreeCAD integration for CAD file visualization.
|
||||
|
||||
---
|
||||
|
||||
### 2. Workflow Management
|
||||
|
||||
| Feature | SOLIDWORKS PDM | Silo Status | Priority | Complexity |
|
||||
|---------|---------------|-------------|----------|------------|
|
||||
| Custom workflows | Full visual designer | None | Critical | Complex |
|
||||
| State transitions | Configurable with permissions | Basic (status field only) | Critical | Complex |
|
||||
| Parallel approvals | Multiple approvers required | None | High | Complex |
|
||||
| Automatic transitions | Timer/condition-based | None | Medium | Moderate |
|
||||
| Email notifications | On state change | None | High | Moderate |
|
||||
| ECO process | Built-in change management | None | High | Complex |
|
||||
| Child state conditions | Block parent if children invalid | None | Medium | Moderate |
|
||||
|
||||
**Gap Analysis:**
|
||||
Workflow management is the largest functional gap. SOLIDWORKS PDM offers sophisticated state machines with parallel approvals, automatic transitions, and deep integration with engineering change processes. Silo currently has only a simple status field (draft/review/released/obsolete) with no transition rules or approval processes.
|
||||
|
||||
---
|
||||
|
||||
### 3. User Management & Security
|
||||
|
||||
| Feature | SOLIDWORKS PDM | Silo Status | Priority | Complexity |
|
||||
|---------|---------------|-------------|----------|------------|
|
||||
| User authentication | Windows AD, LDAP | Full (local, LDAP, OIDC) | - | - |
|
||||
| Role-based permissions | Granular per folder/state | Partial (3-tier role model) | Medium | Moderate |
|
||||
| Group management | Full | None | Medium | Moderate |
|
||||
| Folder permissions | Read/write/delete per folder | None | Medium | Moderate |
|
||||
| State permissions | Actions allowed per state | None | High | Moderate |
|
||||
| Audit trail | Complete action logging | Full | - | - |
|
||||
| Private files | Pre-check-in visibility control | None | Low | Simple |
|
||||
|
||||
**Gap Analysis:**
|
||||
Authentication is implemented with three backends (local, LDAP/FreeIPA, OIDC/Keycloak) and a 3-tier role model (admin > editor > viewer). Audit logging captures user actions. Remaining gaps: group management, folder-level permissions, and state-based permission rules.
|
||||
|
||||
---
|
||||
|
||||
### 4. Search & Discovery
|
||||
|
||||
| Feature | SOLIDWORKS PDM | Silo Status | Priority | Complexity |
|
||||
|---------|---------------|-------------|----------|------------|
|
||||
| Metadata search | Full with custom cards | Partial (API query params + fuzzy) | High | Moderate |
|
||||
| Full-text content search | iFilters for Office, CAD | None | Medium | Complex |
|
||||
| Quick search | Toolbar with history | Partial (fuzzy search API) | Medium | Simple |
|
||||
| Saved searches | User-defined favorites | None | Medium | Simple |
|
||||
| Advanced operators | AND, OR, NOT, wildcards | None | Medium | Simple |
|
||||
| Multi-variable search | Search across multiple fields | None | Medium | Simple |
|
||||
| Where-used search | Find all assemblies using part | Full | - | - |
|
||||
|
||||
**Gap Analysis:**
|
||||
Silo has API-level filtering, fuzzy search, and where-used queries. Remaining gaps: saved searches, advanced search operators, and a richer search UI. Content search (searching within CAD files) is not planned for the server.
|
||||
|
||||
---
|
||||
|
||||
### 5. BOM Management
|
||||
|
||||
| Feature | SOLIDWORKS PDM | Silo Status | Priority | Complexity |
|
||||
|---------|---------------|-------------|----------|------------|
|
||||
| Single-level BOM | Yes | Full | - | - |
|
||||
| Multi-level BOM | Indented/exploded views | Full (recursive, configurable depth) | - | - |
|
||||
| BOM comparison | Between revisions | None | Medium | Moderate |
|
||||
| BOM export | Excel, XML, ERP formats | Full (CSV, ODS) | - | - |
|
||||
| BOM import | Bulk BOM loading | Full (CSV with upsert) | - | - |
|
||||
| Calculated BOMs | Quantities rolled up | None | Medium | Moderate |
|
||||
| Reference designators | Full support | Full | - | - |
|
||||
| Alternate parts | Substitute tracking | Full | - | - |
|
||||
|
||||
**Gap Analysis:**
|
||||
Multi-level BOM retrieval (recursive CTE with configurable depth) and BOM export (CSV, ODS) are implemented. BOM import supports CSV with upsert and cycle detection. Remaining gap: BOM comparison between revisions.
|
||||
|
||||
---
|
||||
|
||||
### 6. CAD Integration
|
||||
|
||||
| Feature | SOLIDWORKS PDM | Silo Status | Priority | Complexity |
|
||||
|---------|---------------|-------------|----------|------------|
|
||||
| Native CAD add-in | Deep SOLIDWORKS integration | FreeCAD workbench (silo-mod) | Medium | Complex |
|
||||
| Property mapping | Bi-directional sync | Planned (silo-mod) | Medium | Moderate |
|
||||
| Task pane | Embedded in CAD UI | Auth dock panel (silo-mod) | Medium | Complex |
|
||||
| Lightweight components | Handle without full load | N/A | - | - |
|
||||
| Drawing/model linking | Automatic association | Manual | Medium | Moderate |
|
||||
| Multi-CAD support | Third-party formats | FreeCAD only | Low | - |
|
||||
|
||||
**Gap Analysis:**
|
||||
CAD integration is maintained in separate repositories ([silo-mod](https://git.kindred-systems.com/kindred/silo-mod), [silo-calc](https://git.kindred-systems.com/kindred/silo-calc)). The Silo server provides the REST API endpoints consumed by those clients.
|
||||
|
||||
---
|
||||
|
||||
### 7. External Integrations
|
||||
|
||||
| Feature | SOLIDWORKS PDM | Silo Status | Priority | Complexity |
|
||||
|---------|---------------|-------------|----------|------------|
|
||||
| ERP integration | SAP, Dynamics, etc. | Partial (Odoo stubs) | Medium | Complex |
|
||||
| API access | Full COM/REST API | Full REST API (75 endpoints) | - | - |
|
||||
| Dispatch scripts | Automation without coding | None | Medium | Moderate |
|
||||
| Task scheduler | Background processing | None | Medium | Moderate |
|
||||
| Email system | SMTP integration | None | High | Simple |
|
||||
| Web portal | Browser access | Full (React SPA + auth) | - | - |
|
||||
|
||||
**Gap Analysis:**
|
||||
Silo has a comprehensive REST API (75 endpoints) and a full web UI with authentication. Odoo ERP integration has config/sync-log scaffolding but push/pull operations are stubs. Remaining gaps: email notifications, task scheduler, dispatch automation.
|
||||
|
||||
---
|
||||
|
||||
### 8. Reporting & Analytics
|
||||
|
||||
| Feature | SOLIDWORKS PDM | Silo Status | Priority | Complexity |
|
||||
|---------|---------------|-------------|----------|------------|
|
||||
| Standard reports | Inventory, usage, activity | None | Medium | Moderate |
|
||||
| Custom reports | User-defined queries | None | Medium | Moderate |
|
||||
| Dashboard | Visual KPIs | None | Low | Moderate |
|
||||
| Export formats | PDF, Excel, CSV | CSV and ODS | Medium | Simple |
|
||||
|
||||
**Gap Analysis:**
|
||||
Reporting capabilities are absent. Basic reports (item counts, revision activity, where-used) would provide immediate value.
|
||||
|
||||
---
|
||||
|
||||
### 9. File Handling
|
||||
|
||||
| Feature | SOLIDWORKS PDM | Silo Status | Priority | Complexity |
|
||||
|---------|---------------|-------------|----------|------------|
|
||||
| File versioning | Automatic | Full (MinIO) | - | - |
|
||||
| File preview | Thumbnails, 3D preview | None | Medium | Complex |
|
||||
| File conversion | PDF, DXF generation | None | Medium | Complex |
|
||||
| Replication | Multi-site sync | None | Low | Complex |
|
||||
| File copy with refs | Copy tree with references | None | Medium | Moderate |
|
||||
|
||||
**Gap Analysis:**
|
||||
File storage works well. Thumbnail generation and file preview would significantly improve the web UI experience. Automatic conversion to PDF/DXF is valuable for sharing with non-CAD users.
|
||||
|
||||
---
|
||||
|
||||
### Gap Summary by Priority
|
||||
|
||||
#### Completed (Previously Critical/High)
|
||||
1. ~~**User authentication**~~ - Implemented: local, LDAP, OIDC
|
||||
2. ~~**Role-based permissions**~~ - Implemented: 3-tier role model (admin/editor/viewer)
|
||||
3. ~~**Audit trail**~~ - Implemented: audit_log table with completeness scoring
|
||||
4. ~~**Where-used search**~~ - Implemented: reverse parent lookup API
|
||||
5. ~~**Multi-level BOM API**~~ - Implemented: recursive expansion with configurable depth
|
||||
6. ~~**BOM export**~~ - Implemented: CSV and ODS formats
|
||||
|
||||
#### Critical Gaps (Required for Team Use)
|
||||
1. **Workflow engine** - State machines with transitions and approvals
|
||||
2. **Check-out locking** - Pessimistic locking for CAD files
|
||||
|
||||
#### High Priority Gaps (Significant Value)
|
||||
1. **Email notifications** - Alert users on state changes
|
||||
2. **Web UI search** - Advanced search interface with saved searches
|
||||
3. **Folder/state permissions** - Granular access control beyond role model
|
||||
|
||||
#### Medium Priority Gaps (Nice to Have)
|
||||
1. **Saved searches** - Frequently used queries
|
||||
2. **File preview/thumbnails** - Visual browsing
|
||||
3. **Reporting** - Activity and inventory reports
|
||||
4. **Scheduled tasks** - Background automation
|
||||
5. **BOM comparison** - Revision diff for assemblies
|
||||
|
||||
---
|
||||
|
||||
## Feature Roadmap
|
||||
|
||||
### Phase 1: Foundation (Current - Q2 2026)
|
||||
*Complete MVP and stabilize core functionality*
|
||||
|
||||
| Feature | Description | Status |
|
||||
|---------|-------------|--------|
|
||||
| MinIO integration | File upload/download with versioning and checksums | Complete |
|
||||
| Revision control | Rollback, comparison, status/labels | Complete |
|
||||
| CSV import/export | Dry-run validation, template generation | Complete |
|
||||
| ODS import/export | Items, BOMs, project sheets, templates | Complete |
|
||||
| Project management | CRUD, many-to-many item tagging | Complete |
|
||||
| Multi-level BOM | Recursive expansion, where-used, export | Complete |
|
||||
| Authentication | Local, LDAP, OIDC with role-based access | Complete |
|
||||
| Audit logging | Action logging, completeness scoring | Complete |
|
||||
| Unit tests | Core API and database operations | Not Started |
|
||||
| Date segment type | Support date-based part number segments | Not Started |
|
||||
| Part number validation | Validate format on creation | Not Started |
|
||||
| Location CRUD API | Expose location hierarchy via REST | Not Started |
|
||||
| Inventory API | Expose inventory operations via REST | Not Started |
|
||||
|
||||
### Phase 2: Multi-User (Q2-Q3 2026)
|
||||
*Enable team collaboration*
|
||||
|
||||
| Feature | Description | Status |
|
||||
|---------|-------------|--------|
|
||||
| LDAP authentication | Integrate with FreeIPA/Active Directory | **Complete** |
|
||||
| OIDC authentication | Keycloak / OpenID Connect | **Complete** |
|
||||
| Audit logging | Record all user actions with timestamps | **Complete** |
|
||||
| Session management | Token-based and session-based API authentication | **Complete** |
|
||||
| User/group management | Create, assign, manage users and groups | Not Started |
|
||||
| Folder permissions | Read/write/delete per folder hierarchy | Not Started |
|
||||
| Check-out locking | Pessimistic locks with timeout | Not Started |
|
||||
|
||||
### Phase 3: Workflow Engine (Q3-Q4 2026)
|
||||
*Implement engineering change processes*
|
||||
|
||||
| Feature | Description | Complexity |
|
||||
|---------|-------------|------------|
|
||||
| Workflow designer | YAML-defined state machines | Complex |
|
||||
| State transitions | Configurable transition rules | Complex |
|
||||
| Transition permissions | Who can execute which transitions | Moderate |
|
||||
| Single approvals | Basic approval workflow | Moderate |
|
||||
| Parallel approvals | Multi-approver gates | Complex |
|
||||
| Automatic transitions | Timer and condition-based | Complex |
|
||||
| Email notifications | SMTP integration for alerts | Simple |
|
||||
| Child state conditions | Block parent transitions | Moderate |
|
||||
|
||||
### Phase 4: Search & Discovery (Q4 2026 - Q1 2027)
|
||||
*Improve findability and navigation*
|
||||
|
||||
| Feature | Description | Status |
|
||||
|---------|-------------|--------|
|
||||
| Where-used queries | Find parent assemblies | **Complete** |
|
||||
| Fuzzy search | Quick search across items | **Complete** |
|
||||
| Advanced search UI | Web interface with filters | Not Started |
|
||||
| Search operators | AND, OR, NOT, wildcards | Not Started |
|
||||
| Saved searches | User favorites | Not Started |
|
||||
| Content search | Search within file content | Not Started |
|
||||
|
||||
### Phase 5: BOM & Reporting (Q1-Q2 2027)
|
||||
*Enhanced BOM management and analytics*
|
||||
|
||||
| Feature | Description | Status |
|
||||
|---------|-------------|--------|
|
||||
| Multi-level BOM API | Recursive assembly retrieval | **Complete** |
|
||||
| BOM export | CSV and ODS formats | **Complete** |
|
||||
| BOM import | CSV with upsert and cycle detection | **Complete** |
|
||||
| BOM comparison | Diff between revisions | Not Started |
|
||||
| Standard reports | Activity, inventory, usage | Not Started |
|
||||
| Custom queries | User-defined report builder | Not Started |
|
||||
| Dashboard | Visual KPIs and metrics | Not Started |
|
||||
|
||||
### Phase 6: Advanced Features (Q2-Q4 2027)
|
||||
*Enterprise capabilities*
|
||||
|
||||
| Feature | Description | Complexity |
|
||||
|---------|-------------|------------|
|
||||
| File preview | Thumbnail generation | Complex |
|
||||
| File conversion | Auto-generate PDF/DXF | Complex |
|
||||
| ERP integration | Adapter framework | Complex |
|
||||
| Task scheduler | Background job processing | Moderate |
|
||||
| Webhooks | Event notifications to external systems | Moderate |
|
||||
| API rate limiting | Protect against abuse | Simple |
|
||||
|
||||
---
|
||||
|
||||
## Implementation Phases
|
||||
|
||||
### Phase 1 Detailed Tasks
|
||||
|
||||
#### 1.1 MinIO Integration -- COMPLETE
|
||||
- [x] MinIO service configured in Docker Compose
|
||||
- [x] File upload via REST API
|
||||
- [x] File download via REST API (latest and by revision)
|
||||
- [x] SHA256 checksums on upload
|
||||
|
||||
#### 1.2 Authentication & Authorization -- COMPLETE
|
||||
- [x] Local authentication (bcrypt)
|
||||
- [x] LDAP/FreeIPA authentication
|
||||
- [x] OIDC/Keycloak authentication
|
||||
- [x] Role-based access control (admin/editor/viewer)
|
||||
- [x] API token management (SHA-256 hashed)
|
||||
- [x] Session management (PostgreSQL-backed)
|
||||
- [x] CSRF protection (nosurf)
|
||||
- [x] Audit logging (database table)
|
||||
|
||||
#### 1.3 Multi-level BOM & Export -- COMPLETE
|
||||
- [x] Recursive BOM expansion with configurable depth
|
||||
- [x] Where-used reverse lookup
|
||||
- [x] BOM CSV export/import with cycle detection
|
||||
- [x] BOM ODS export
|
||||
- [x] ODS item export/import/template
|
||||
|
||||
#### 1.4 Unit Test Suite
|
||||
- [ ] Database connection and transaction tests
|
||||
- [ ] Item CRUD operation tests
|
||||
- [ ] Revision creation and retrieval tests
|
||||
- [ ] Part number generation tests
|
||||
- [ ] File upload/download tests
|
||||
- [ ] CSV import/export tests
|
||||
- [ ] API endpoint tests
|
||||
|
||||
#### 1.5 Missing Segment Types
|
||||
- [ ] Implement date segment type
|
||||
- [ ] Add strftime-style format support
|
||||
|
||||
#### 1.6 Location & Inventory APIs
|
||||
- [ ] `GET /api/locations` - List locations
|
||||
- [ ] `POST /api/locations` - Create location
|
||||
- [ ] `GET /api/locations/{path}` - Get location
|
||||
- [ ] `DELETE /api/locations/{path}` - Delete location
|
||||
- [ ] `GET /api/inventory/{partNumber}` - Get inventory
|
||||
- [ ] `POST /api/inventory/{partNumber}/adjust` - Adjust quantity
|
||||
- [ ] `POST /api/inventory/{partNumber}/move` - Move between locations
|
||||
|
||||
---
|
||||
|
||||
## Success Metrics
|
||||
|
||||
### Phase 1 (Foundation)
|
||||
- All existing tests pass
|
||||
- File upload/download works end-to-end
|
||||
- FreeCAD users can checkout, modify, commit parts
|
||||
|
||||
### Phase 2 (Multi-User)
|
||||
- 5+ concurrent users supported
|
||||
- No data corruption under concurrent access
|
||||
- Audit log captures all modifications
|
||||
|
||||
### Phase 3 (Workflow)
|
||||
- Engineering change process completable in Silo
|
||||
- Email notifications delivered reliably
|
||||
- Workflow state visible in web UI
|
||||
|
||||
### Phase 4+ (Advanced)
|
||||
- Search returns results in <2 seconds
|
||||
- Where-used queries complete in <5 seconds
|
||||
- BOM export matches assembly structure
|
||||
|
||||
---
|
||||
|
||||
## References
|
||||
|
||||
### SOLIDWORKS PDM Documentation
|
||||
- [SOLIDWORKS PDM Product Page](https://www.solidworks.com/product/solidworks-pdm)
|
||||
- [What's New in SOLIDWORKS PDM 2025](https://blogs.solidworks.com/solidworksblog/2024/10/whats-new-in-solidworks-pdm-2025.html)
|
||||
- [Top 5 Enhancements in SOLIDWORKS PDM 2024](https://blogs.solidworks.com/solidworksblog/2023/10/top-5-enhancements-in-solidworks-pdm-2024.html)
|
||||
- [SOLIDWORKS PDM Workflow Transitions](https://help.solidworks.com/2023/english/EnterprisePDM/Admin/c_workflow_transition.htm)
|
||||
- [Ultimate Guide to SOLIDWORKS PDM Permissions](https://www.goengineer.com/blog/ultimate-guide-to-solidworks-pdm-permissions)
|
||||
- [Searching in SOLIDWORKS PDM](https://help.solidworks.com/2021/english/EnterprisePDM/fileexplorer/c_searches.htm)
|
||||
- [SOLIDWORKS PDM API Getting Started](https://3dswym.3dexperience.3ds.com/wiki/solidworks-news-info/getting-started-with-the-solidworks-pdm-api-solidpractices_gBCYaM75RgORBcpSO1m_Mw)
|
||||
|
||||
### Silo Documentation
|
||||
- [Specification](docs/SPECIFICATION.md)
|
||||
- [Development Status](docs/STATUS.md)
|
||||
- [Deployment Guide](docs/DEPLOYMENT.md)
|
||||
- [Gap Analysis](docs/GAP_ANALYSIS.md)
|
||||
|
||||
---
|
||||
|
||||
## Appendix: Feature Comparison Matrix
|
||||
|
||||
| Category | Feature | SW PDM Standard | SW PDM Pro | Silo Current | Silo Planned |
|
||||
|----------|---------|-----------------|------------|--------------|--------------|
|
||||
| **Version Control** | Check-in/out | Yes | Yes | No | Phase 2 |
|
||||
| | Version history | Yes | Yes | Yes | - |
|
||||
| | Rollback | Yes | Yes | Yes | - |
|
||||
| | Revision labels/status | Yes | Yes | Yes | - |
|
||||
| | Revision comparison | Yes | Yes | Yes (metadata) | - |
|
||||
| **Workflow** | Custom workflows | Limited | Yes | No | Phase 3 |
|
||||
| | Parallel approval | No | Yes | No | Phase 3 |
|
||||
| | Notifications | No | Yes | No | Phase 3 |
|
||||
| **Security** | User auth | Windows | Windows/LDAP | Yes (local, LDAP, OIDC) | - |
|
||||
| | Permissions | Basic | Granular | Partial (role-based) | Phase 2 |
|
||||
| | Audit trail | Basic | Full | Yes | - |
|
||||
| **Search** | Metadata search | Yes | Yes | Partial (API + fuzzy) | Phase 4 |
|
||||
| | Content search | No | Yes | No | Phase 4 |
|
||||
| | Where-used | Yes | Yes | Yes | - |
|
||||
| **BOM** | Single-level | Yes | Yes | Yes | - |
|
||||
| | Multi-level | Yes | Yes | Yes (recursive) | - |
|
||||
| | BOM export | Yes | Yes | Yes (CSV, ODS) | - |
|
||||
| **Data** | CSV import/export | Yes | Yes | Yes | - |
|
||||
| | ODS import/export | No | No | Yes | - |
|
||||
| | Project management | Yes | Yes | Yes | - |
|
||||
| **Integration** | API | Limited | Full | Full REST (75) | - |
|
||||
| | ERP connectors | No | Yes | Partial (Odoo stubs) | Phase 6 |
|
||||
| | Web access | No | Yes | Yes (React SPA + auth) | - |
|
||||
| **Files** | Versioning | Yes | Yes | Yes | - |
|
||||
| | Preview | Yes | Yes | No | Phase 6 |
|
||||
| | Multi-site | No | Yes | No | Not Planned |
|
||||
@@ -66,7 +66,7 @@ Token subcommands:
|
||||
silo token revoke <id> Revoke a token
|
||||
|
||||
Environment variables for API access:
|
||||
SILO_API_URL Base URL of the Silo server (e.g., https://silo.kindred.internal)
|
||||
SILO_API_URL Base URL of the Silo server (e.g., https://silo.example.internal)
|
||||
SILO_API_TOKEN API token for authentication
|
||||
|
||||
Examples:
|
||||
|
||||
@@ -3,6 +3,7 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
@@ -13,10 +14,13 @@ import (
|
||||
|
||||
"github.com/alexedwards/scs/pgxstore"
|
||||
"github.com/alexedwards/scs/v2"
|
||||
|
||||
"github.com/kindredsystems/silo/internal/api"
|
||||
"github.com/kindredsystems/silo/internal/auth"
|
||||
"github.com/kindredsystems/silo/internal/config"
|
||||
"github.com/kindredsystems/silo/internal/db"
|
||||
"github.com/kindredsystems/silo/internal/jobdef"
|
||||
"github.com/kindredsystems/silo/internal/modules"
|
||||
"github.com/kindredsystems/silo/internal/schema"
|
||||
"github.com/kindredsystems/silo/internal/storage"
|
||||
"github.com/rs/zerolog"
|
||||
@@ -178,6 +182,54 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
// Load job definitions (optional — directory may not exist yet)
|
||||
var jobDefs map[string]*jobdef.Definition
|
||||
if _, err := os.Stat(cfg.Jobs.Directory); err == nil {
|
||||
jobDefs, err = jobdef.LoadAll(cfg.Jobs.Directory)
|
||||
if err != nil {
|
||||
logger.Fatal().Err(err).Str("directory", cfg.Jobs.Directory).Msg("failed to load job definitions")
|
||||
}
|
||||
logger.Info().Int("count", len(jobDefs)).Msg("loaded job definitions")
|
||||
} else {
|
||||
jobDefs = make(map[string]*jobdef.Definition)
|
||||
logger.Info().Str("directory", cfg.Jobs.Directory).Msg("job definitions directory not found, skipping")
|
||||
}
|
||||
|
||||
// Upsert job definitions into database
|
||||
jobRepo := db.NewJobRepository(database)
|
||||
for _, def := range jobDefs {
|
||||
defJSON, _ := json.Marshal(def)
|
||||
var defMap map[string]any
|
||||
json.Unmarshal(defJSON, &defMap)
|
||||
|
||||
rec := &db.JobDefinitionRecord{
|
||||
Name: def.Name,
|
||||
Version: def.Version,
|
||||
TriggerType: def.Trigger.Type,
|
||||
ScopeType: def.Scope.Type,
|
||||
ComputeType: def.Compute.Type,
|
||||
RunnerTags: def.Runner.Tags,
|
||||
TimeoutSeconds: def.Timeout,
|
||||
MaxRetries: def.MaxRetries,
|
||||
Priority: def.Priority,
|
||||
Definition: defMap,
|
||||
Enabled: true,
|
||||
}
|
||||
if err := jobRepo.UpsertDefinition(ctx, rec); err != nil {
|
||||
logger.Fatal().Err(err).Str("name", def.Name).Msg("failed to upsert job definition")
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize module registry
|
||||
registry := modules.NewRegistry()
|
||||
if err := modules.LoadState(registry, cfg, database.Pool()); err != nil {
|
||||
logger.Fatal().Err(err).Msg("failed to load module state")
|
||||
}
|
||||
for _, m := range registry.All() {
|
||||
logger.Info().Str("module", m.ID).Bool("enabled", registry.IsEnabled(m.ID)).
|
||||
Bool("required", m.Required).Msg("module")
|
||||
}
|
||||
|
||||
// Create SSE broker and server state
|
||||
broker := api.NewBroker(logger)
|
||||
serverState := api.NewServerState(logger, store, broker)
|
||||
@@ -190,9 +242,32 @@ func main() {
|
||||
|
||||
// Create API server
|
||||
server := api.NewServer(logger, database, schemas, cfg.Schemas.Directory, store,
|
||||
authService, sessionManager, oidcBackend, &cfg.Auth, broker, serverState)
|
||||
authService, sessionManager, oidcBackend, &cfg.Auth, broker, serverState,
|
||||
jobDefs, cfg.Jobs.Directory, registry, cfg)
|
||||
router := api.NewRouter(server, logger)
|
||||
|
||||
// Start background sweepers for job/runner timeouts (only when jobs module enabled)
|
||||
if registry.IsEnabled(modules.Jobs) {
|
||||
go func() {
|
||||
ticker := time.NewTicker(time.Duration(cfg.Jobs.JobTimeoutCheck) * time.Second)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
if n, err := jobRepo.TimeoutExpiredJobs(ctx); err != nil {
|
||||
logger.Error().Err(err).Msg("job timeout sweep failed")
|
||||
} else if n > 0 {
|
||||
logger.Info().Int64("count", n).Msg("timed out expired jobs")
|
||||
}
|
||||
|
||||
if n, err := jobRepo.ExpireStaleRunners(ctx, time.Duration(cfg.Jobs.RunnerTimeout)*time.Second); err != nil {
|
||||
logger.Error().Err(err).Msg("runner expiry sweep failed")
|
||||
} else if n > 0 {
|
||||
logger.Info().Int64("count", n).Msg("expired stale runners")
|
||||
}
|
||||
}
|
||||
}()
|
||||
logger.Info().Msg("job/runner sweepers started")
|
||||
}
|
||||
|
||||
// Create HTTP server
|
||||
addr := fmt.Sprintf("%s:%d", cfg.Server.Host, cfg.Server.Port)
|
||||
httpServer := &http.Server{
|
||||
|
||||
330
cmd/silorunner/main.go
Normal file
330
cmd/silorunner/main.go
Normal file
@@ -0,0 +1,330 @@
|
||||
// Command silorunner is a compute worker that polls the Silo server for jobs
|
||||
// and executes them using Headless Create with silo-mod installed.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// RunnerConfig holds runner configuration.
|
||||
type RunnerConfig struct {
|
||||
ServerURL string `yaml:"server_url"`
|
||||
Token string `yaml:"token"`
|
||||
Name string `yaml:"name"`
|
||||
Tags []string `yaml:"tags"`
|
||||
PollInterval int `yaml:"poll_interval"` // seconds, default 5
|
||||
CreatePath string `yaml:"create_path"` // path to Headless Create binary
|
||||
}
|
||||
|
||||
func main() {
|
||||
configPath := flag.String("config", "runner.yaml", "Path to runner config file")
|
||||
flag.Parse()
|
||||
|
||||
logger := zerolog.New(os.Stdout).With().Timestamp().Str("component", "silorunner").Logger()
|
||||
|
||||
// Load config
|
||||
cfg, err := loadConfig(*configPath)
|
||||
if err != nil {
|
||||
logger.Fatal().Err(err).Msg("failed to load config")
|
||||
}
|
||||
|
||||
if cfg.ServerURL == "" {
|
||||
logger.Fatal().Msg("server_url is required")
|
||||
}
|
||||
if cfg.Token == "" {
|
||||
logger.Fatal().Msg("token is required")
|
||||
}
|
||||
if cfg.Name == "" {
|
||||
hostname, _ := os.Hostname()
|
||||
cfg.Name = "runner-" + hostname
|
||||
}
|
||||
if cfg.PollInterval <= 0 {
|
||||
cfg.PollInterval = 5
|
||||
}
|
||||
|
||||
logger.Info().
|
||||
Str("server", cfg.ServerURL).
|
||||
Str("name", cfg.Name).
|
||||
Strs("tags", cfg.Tags).
|
||||
Int("poll_interval", cfg.PollInterval).
|
||||
Msg("starting runner")
|
||||
|
||||
client := &http.Client{Timeout: 30 * time.Second}
|
||||
|
||||
// Graceful shutdown
|
||||
quit := make(chan os.Signal, 1)
|
||||
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
// Heartbeat goroutine
|
||||
go func() {
|
||||
ticker := time.NewTicker(30 * time.Second)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if err := heartbeat(client, cfg); err != nil {
|
||||
logger.Error().Err(err).Msg("heartbeat failed")
|
||||
}
|
||||
case <-quit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Initial heartbeat
|
||||
if err := heartbeat(client, cfg); err != nil {
|
||||
logger.Warn().Err(err).Msg("initial heartbeat failed")
|
||||
}
|
||||
|
||||
// Poll loop
|
||||
ticker := time.NewTicker(time.Duration(cfg.PollInterval) * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
job, definition, err := claimJob(client, cfg)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("claim failed")
|
||||
continue
|
||||
}
|
||||
if job == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
jobID, _ := job["id"].(string)
|
||||
defName, _ := job["definition_name"].(string)
|
||||
logger.Info().Str("job_id", jobID).Str("definition", defName).Msg("claimed job")
|
||||
|
||||
// Start the job
|
||||
if err := startJob(client, cfg, jobID); err != nil {
|
||||
logger.Error().Err(err).Str("job_id", jobID).Msg("failed to start job")
|
||||
continue
|
||||
}
|
||||
|
||||
// Execute the job
|
||||
executeJob(logger, client, cfg, jobID, job, definition)
|
||||
|
||||
case <-quit:
|
||||
logger.Info().Msg("shutting down")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func loadConfig(path string) (*RunnerConfig, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading config: %w", err)
|
||||
}
|
||||
data = []byte(os.ExpandEnv(string(data)))
|
||||
|
||||
var cfg RunnerConfig
|
||||
if err := yaml.Unmarshal(data, &cfg); err != nil {
|
||||
return nil, fmt.Errorf("parsing config: %w", err)
|
||||
}
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
func heartbeat(client *http.Client, cfg *RunnerConfig) error {
|
||||
req, err := http.NewRequest("POST", cfg.ServerURL+"/api/runner/heartbeat", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+cfg.Token)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return fmt.Errorf("heartbeat: %d %s", resp.StatusCode, string(body))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func claimJob(client *http.Client, cfg *RunnerConfig) (map[string]any, map[string]any, error) {
|
||||
req, err := http.NewRequest("POST", cfg.ServerURL+"/api/runner/claim", nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+cfg.Token)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == http.StatusNoContent {
|
||||
return nil, nil, nil // No jobs available
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return nil, nil, fmt.Errorf("claim: %d %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Job map[string]any `json:"job"`
|
||||
Definition map[string]any `json:"definition"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return nil, nil, fmt.Errorf("decoding claim response: %w", err)
|
||||
}
|
||||
|
||||
return result.Job, result.Definition, nil
|
||||
}
|
||||
|
||||
func startJob(client *http.Client, cfg *RunnerConfig, jobID string) error {
|
||||
req, err := http.NewRequest("POST", cfg.ServerURL+"/api/runner/jobs/"+jobID+"/start", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+cfg.Token)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return fmt.Errorf("start: %d %s", resp.StatusCode, string(body))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func reportProgress(client *http.Client, cfg *RunnerConfig, jobID string, progress int, message string) {
|
||||
body, _ := json.Marshal(map[string]any{
|
||||
"progress": progress,
|
||||
"message": message,
|
||||
})
|
||||
req, _ := http.NewRequest("PUT", cfg.ServerURL+"/api/runner/jobs/"+jobID+"/progress", bytes.NewReader(body))
|
||||
req.Header.Set("Authorization", "Bearer "+cfg.Token)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
||||
func completeJob(client *http.Client, cfg *RunnerConfig, jobID string, result map[string]any) error {
|
||||
body, _ := json.Marshal(map[string]any{"result": result})
|
||||
req, err := http.NewRequest("POST", cfg.ServerURL+"/api/runner/jobs/"+jobID+"/complete", bytes.NewReader(body))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+cfg.Token)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
respBody, _ := io.ReadAll(resp.Body)
|
||||
return fmt.Errorf("complete: %d %s", resp.StatusCode, string(respBody))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func failJob(client *http.Client, cfg *RunnerConfig, jobID string, errMsg string) {
|
||||
body, _ := json.Marshal(map[string]string{"error": errMsg})
|
||||
req, _ := http.NewRequest("POST", cfg.ServerURL+"/api/runner/jobs/"+jobID+"/fail", bytes.NewReader(body))
|
||||
req.Header.Set("Authorization", "Bearer "+cfg.Token)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
||||
func appendLog(client *http.Client, cfg *RunnerConfig, jobID, level, message string) {
|
||||
body, _ := json.Marshal(map[string]string{
|
||||
"level": level,
|
||||
"message": message,
|
||||
})
|
||||
req, _ := http.NewRequest("POST", cfg.ServerURL+"/api/runner/jobs/"+jobID+"/log", bytes.NewReader(body))
|
||||
req.Header.Set("Authorization", "Bearer "+cfg.Token)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
||||
// executeJob dispatches the job based on its compute command.
|
||||
// For now, this is a stub that demonstrates the lifecycle.
|
||||
// Real execution will shell out to Headless Create with silo-mod.
|
||||
func executeJob(logger zerolog.Logger, client *http.Client, cfg *RunnerConfig, jobID string, job, definition map[string]any) {
|
||||
defName, _ := job["definition_name"].(string)
|
||||
|
||||
// Extract compute config from definition
|
||||
var command string
|
||||
if definition != nil {
|
||||
if compute, ok := definition["compute"].(map[string]any); ok {
|
||||
command, _ = compute["command"].(string)
|
||||
}
|
||||
}
|
||||
|
||||
appendLog(client, cfg, jobID, "info", fmt.Sprintf("starting execution: %s (command: %s)", defName, command))
|
||||
reportProgress(client, cfg, jobID, 10, "preparing")
|
||||
|
||||
switch command {
|
||||
case "create-validate", "create-export", "create-dag-extract", "create-thumbnail":
|
||||
if cfg.CreatePath == "" {
|
||||
failJob(client, cfg, jobID, "create_path not configured")
|
||||
return
|
||||
}
|
||||
|
||||
appendLog(client, cfg, jobID, "info", fmt.Sprintf("would execute: %s --console with silo-mod", cfg.CreatePath))
|
||||
reportProgress(client, cfg, jobID, 50, "executing")
|
||||
|
||||
// TODO: Actual Create execution:
|
||||
// 1. Download item file from Silo API
|
||||
// 2. Shell out: create --console -e "from silo.runner import <entry>; <entry>(...)"
|
||||
// 3. Parse output JSON
|
||||
// 4. Upload results / sync DAG
|
||||
// For now, complete with a placeholder result.
|
||||
|
||||
reportProgress(client, cfg, jobID, 90, "finalizing")
|
||||
|
||||
if err := completeJob(client, cfg, jobID, map[string]any{
|
||||
"status": "placeholder",
|
||||
"message": "Create execution not yet implemented - runner lifecycle verified",
|
||||
"command": command,
|
||||
}); err != nil {
|
||||
logger.Error().Err(err).Str("job_id", jobID).Msg("failed to complete job")
|
||||
} else {
|
||||
logger.Info().Str("job_id", jobID).Msg("job completed (placeholder)")
|
||||
}
|
||||
|
||||
default:
|
||||
failJob(client, cfg, jobID, fmt.Sprintf("unknown compute command: %s", command))
|
||||
logger.Warn().Str("job_id", jobID).Str("command", command).Msg("unknown compute command")
|
||||
}
|
||||
}
|
||||
@@ -8,20 +8,20 @@ server:
|
||||
# read_only: false # Reject all write operations; toggle at runtime with SIGUSR1
|
||||
|
||||
database:
|
||||
host: "psql.kindred.internal"
|
||||
host: "localhost" # Use "postgres" for Docker Compose
|
||||
port: 5432
|
||||
name: "silo"
|
||||
user: "silo"
|
||||
password: "" # Use SILO_DB_PASSWORD env var
|
||||
sslmode: "require"
|
||||
sslmode: "require" # Use "disable" for Docker Compose (internal network)
|
||||
max_connections: 10
|
||||
|
||||
storage:
|
||||
endpoint: "minio.kindred.internal:9000"
|
||||
endpoint: "localhost:9000" # Use "minio:9000" for Docker Compose
|
||||
access_key: "" # Use SILO_MINIO_ACCESS_KEY env var
|
||||
secret_key: "" # Use SILO_MINIO_SECRET_KEY env var
|
||||
bucket: "silo-files"
|
||||
use_ssl: true
|
||||
use_ssl: true # Use false for Docker Compose (internal network)
|
||||
region: "us-east-1"
|
||||
|
||||
schemas:
|
||||
@@ -53,7 +53,7 @@ auth:
|
||||
# LDAP / FreeIPA
|
||||
ldap:
|
||||
enabled: false
|
||||
url: "ldaps://ipa.kindred.internal"
|
||||
url: "ldaps://ipa.example.internal"
|
||||
base_dn: "dc=kindred,dc=internal"
|
||||
user_search_dn: "cn=users,cn=accounts,dc=kindred,dc=internal"
|
||||
# Optional service account for user search (omit for direct user bind)
|
||||
@@ -77,10 +77,10 @@ auth:
|
||||
# OIDC / Keycloak
|
||||
oidc:
|
||||
enabled: false
|
||||
issuer_url: "https://keycloak.kindred.internal/realms/silo"
|
||||
issuer_url: "https://keycloak.example.internal/realms/silo"
|
||||
client_id: "silo"
|
||||
client_secret: "" # Use SILO_OIDC_CLIENT_SECRET env var
|
||||
redirect_url: "https://silo.kindred.internal/auth/callback"
|
||||
redirect_url: "https://silo.example.internal/auth/callback"
|
||||
scopes: ["openid", "profile", "email"]
|
||||
# Map Keycloak realm roles to Silo roles
|
||||
admin_role: "silo-admin"
|
||||
@@ -90,4 +90,4 @@ auth:
|
||||
# CORS origins (locked down when auth is enabled)
|
||||
cors:
|
||||
allowed_origins:
|
||||
- "https://silo.kindred.internal"
|
||||
- "https://silo.example.internal"
|
||||
|
||||
35
deployments/config.dev.yaml
Normal file
35
deployments/config.dev.yaml
Normal file
@@ -0,0 +1,35 @@
|
||||
# Silo Development Configuration
|
||||
# Used by deployments/docker-compose.yaml — works with zero setup via `make docker-up`.
|
||||
# For production Docker installs, run scripts/setup-docker.sh instead.
|
||||
|
||||
server:
|
||||
host: "0.0.0.0"
|
||||
port: 8080
|
||||
base_url: "http://localhost:8080"
|
||||
|
||||
database:
|
||||
host: "postgres"
|
||||
port: 5432
|
||||
name: "silo"
|
||||
user: "silo"
|
||||
password: "${POSTGRES_PASSWORD:-silodev}"
|
||||
sslmode: "disable"
|
||||
max_connections: 10
|
||||
|
||||
storage:
|
||||
endpoint: "minio:9000"
|
||||
access_key: "${MINIO_ACCESS_KEY:-silominio}"
|
||||
secret_key: "${MINIO_SECRET_KEY:-silominiosecret}"
|
||||
bucket: "silo-files"
|
||||
use_ssl: false
|
||||
region: "us-east-1"
|
||||
|
||||
schemas:
|
||||
directory: "/etc/silo/schemas"
|
||||
default: "kindred-rd"
|
||||
|
||||
freecad:
|
||||
uri_scheme: "silo"
|
||||
|
||||
auth:
|
||||
enabled: false
|
||||
@@ -1,7 +1,7 @@
|
||||
# Silo Production Configuration
|
||||
# Single-binary deployment: silod serves API + React SPA
|
||||
#
|
||||
# Layout on silo.kindred.internal:
|
||||
# Layout on silo.example.internal:
|
||||
# /opt/silo/bin/silod - server binary
|
||||
# /opt/silo/web/dist/ - built React frontend (served automatically)
|
||||
# /opt/silo/schemas/ - part number schemas
|
||||
@@ -18,10 +18,10 @@
|
||||
server:
|
||||
host: "0.0.0.0"
|
||||
port: 8080
|
||||
base_url: "https://silo.kindred.internal"
|
||||
base_url: "https://silo.example.internal"
|
||||
|
||||
database:
|
||||
host: "psql.kindred.internal"
|
||||
host: "psql.example.internal"
|
||||
port: 5432
|
||||
name: "silo"
|
||||
user: "silo"
|
||||
@@ -30,7 +30,7 @@ database:
|
||||
max_connections: 20
|
||||
|
||||
storage:
|
||||
endpoint: "minio.kindred.internal:9000"
|
||||
endpoint: "minio.example.internal:9000"
|
||||
access_key: "" # Set via SILO_MINIO_ACCESS_KEY
|
||||
secret_key: "" # Set via SILO_MINIO_SECRET_KEY
|
||||
bucket: "silo-files"
|
||||
@@ -53,7 +53,7 @@ auth:
|
||||
default_admin_password: "" # Set via SILO_ADMIN_PASSWORD
|
||||
ldap:
|
||||
enabled: true
|
||||
url: "ldaps://ipa.kindred.internal"
|
||||
url: "ldaps://ipa.example.internal"
|
||||
base_dn: "dc=kindred,dc=internal"
|
||||
user_search_dn: "cn=users,cn=accounts,dc=kindred,dc=internal"
|
||||
user_attr: "uid"
|
||||
@@ -73,4 +73,4 @@ auth:
|
||||
enabled: false
|
||||
cors:
|
||||
allowed_origins:
|
||||
- "https://silo.kindred.internal"
|
||||
- "https://silo.example.internal"
|
||||
|
||||
172
deployments/docker-compose.allinone.yaml
Normal file
172
deployments/docker-compose.allinone.yaml
Normal file
@@ -0,0 +1,172 @@
|
||||
# Silo All-in-One Stack
|
||||
# PostgreSQL + MinIO + OpenLDAP + Silo API + Nginx (optional)
|
||||
#
|
||||
# Quick start:
|
||||
# ./scripts/setup-docker.sh
|
||||
# docker compose -f deployments/docker-compose.allinone.yaml up -d
|
||||
#
|
||||
# With nginx reverse proxy:
|
||||
# docker compose -f deployments/docker-compose.allinone.yaml --profile nginx up -d
|
||||
#
|
||||
# View logs:
|
||||
# docker compose -f deployments/docker-compose.allinone.yaml logs -f
|
||||
#
|
||||
# Stop:
|
||||
# docker compose -f deployments/docker-compose.allinone.yaml down
|
||||
#
|
||||
# Stop and delete data:
|
||||
# docker compose -f deployments/docker-compose.allinone.yaml down -v
|
||||
|
||||
services:
|
||||
# ---------------------------------------------------------------------------
|
||||
# PostgreSQL 16
|
||||
# ---------------------------------------------------------------------------
|
||||
postgres:
|
||||
image: postgres:16-alpine
|
||||
container_name: silo-postgres
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_DB: silo
|
||||
POSTGRES_USER: silo
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:?Run ./scripts/setup-docker.sh first}
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
- ../migrations:/docker-entrypoint-initdb.d:ro
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U silo -d silo"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
networks:
|
||||
- silo-net
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# MinIO (S3-compatible object storage)
|
||||
# ---------------------------------------------------------------------------
|
||||
minio:
|
||||
image: minio/minio:latest
|
||||
container_name: silo-minio
|
||||
restart: unless-stopped
|
||||
command: server /data --console-address ":9001"
|
||||
environment:
|
||||
MINIO_ROOT_USER: ${MINIO_ACCESS_KEY:?Run ./scripts/setup-docker.sh first}
|
||||
MINIO_ROOT_PASSWORD: ${MINIO_SECRET_KEY:?Run ./scripts/setup-docker.sh first}
|
||||
volumes:
|
||||
- minio_data:/data
|
||||
ports:
|
||||
- "9001:9001" # MinIO console (remove in hardened setups)
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
networks:
|
||||
- silo-net
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# OpenLDAP (user directory for LDAP authentication)
|
||||
# ---------------------------------------------------------------------------
|
||||
openldap:
|
||||
image: bitnami/openldap:2.6
|
||||
container_name: silo-openldap
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
LDAP_ROOT: "dc=silo,dc=local"
|
||||
LDAP_ADMIN_USERNAME: "admin"
|
||||
LDAP_ADMIN_PASSWORD: ${LDAP_ADMIN_PASSWORD:?Run ./scripts/setup-docker.sh first}
|
||||
LDAP_USERS: ${LDAP_USERS:-siloadmin}
|
||||
LDAP_PASSWORDS: ${LDAP_PASSWORDS:?Run ./scripts/setup-docker.sh first}
|
||||
LDAP_GROUP: "silo-users"
|
||||
LDAP_USER_OU: "users"
|
||||
LDAP_GROUP_OU: "groups"
|
||||
volumes:
|
||||
- openldap_data:/bitnami/openldap
|
||||
- ./ldap:/docker-entrypoint-initdb.d:ro
|
||||
ports:
|
||||
- "1389:1389" # LDAP access for debugging (remove in hardened setups)
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "ldapsearch -x -H ldap://localhost:1389 -b dc=silo,dc=local -D cn=admin,dc=silo,dc=local -w $${LDAP_ADMIN_PASSWORD} '(objectClass=organization)' >/dev/null 2>&1"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
networks:
|
||||
- silo-net
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Silo API Server
|
||||
# ---------------------------------------------------------------------------
|
||||
silo:
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: build/package/Dockerfile
|
||||
container_name: silo-api
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
minio:
|
||||
condition: service_healthy
|
||||
openldap:
|
||||
condition: service_healthy
|
||||
env_file:
|
||||
- .env
|
||||
environment:
|
||||
# These override values in config.docker.yaml via the Go config loader's
|
||||
# direct env var support (see internal/config/config.go).
|
||||
SILO_DB_HOST: postgres
|
||||
SILO_DB_NAME: silo
|
||||
SILO_DB_USER: silo
|
||||
SILO_DB_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
SILO_MINIO_ENDPOINT: minio:9000
|
||||
SILO_MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY}
|
||||
SILO_MINIO_SECRET_KEY: ${MINIO_SECRET_KEY}
|
||||
ports:
|
||||
- "${SILO_PORT:-8080}:8080"
|
||||
volumes:
|
||||
- ../schemas:/etc/silo/schemas:ro
|
||||
- ./config.docker.yaml:/etc/silo/config.yaml:ro
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-qO-", "http://localhost:8080/health"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 15s
|
||||
networks:
|
||||
- silo-net
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Nginx reverse proxy (optional — enable with --profile nginx)
|
||||
# ---------------------------------------------------------------------------
|
||||
nginx:
|
||||
image: nginx:alpine
|
||||
container_name: silo-nginx
|
||||
restart: unless-stopped
|
||||
profiles:
|
||||
- nginx
|
||||
depends_on:
|
||||
silo:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
volumes:
|
||||
- ./nginx/nginx.conf:/etc/nginx/conf.d/default.conf:ro
|
||||
# Uncomment to mount TLS certificates:
|
||||
# - /path/to/cert.pem:/etc/nginx/ssl/cert.pem:ro
|
||||
# - /path/to/key.pem:/etc/nginx/ssl/key.pem:ro
|
||||
networks:
|
||||
- silo-net
|
||||
|
||||
volumes:
|
||||
postgres_data:
|
||||
minio_data:
|
||||
openldap_data:
|
||||
|
||||
networks:
|
||||
silo-net:
|
||||
driver: bridge
|
||||
@@ -1,5 +1,5 @@
|
||||
# Production Docker Compose for Silo
|
||||
# Uses external PostgreSQL (psql.kindred.internal) and MinIO (minio.kindred.internal)
|
||||
# Uses external PostgreSQL (psql.example.internal) and MinIO (minio.example.internal)
|
||||
#
|
||||
# Usage:
|
||||
# export SILO_DB_PASSWORD=<your-password>
|
||||
@@ -15,23 +15,23 @@ services:
|
||||
container_name: silod
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
# Database connection (psql.kindred.internal)
|
||||
SILO_DB_HOST: psql.kindred.internal
|
||||
SILO_DB_PORT: 5432
|
||||
# Database connection (psql.example.internal)
|
||||
# Supported as direct env var overrides in the Go config loader:
|
||||
SILO_DB_HOST: psql.example.internal
|
||||
SILO_DB_NAME: silo
|
||||
SILO_DB_USER: silo
|
||||
SILO_DB_PASSWORD: ${SILO_DB_PASSWORD:?Database password required}
|
||||
SILO_DB_SSLMODE: require
|
||||
# Note: SILO_DB_PORT and SILO_DB_SSLMODE are NOT supported as direct
|
||||
# env var overrides. Set these in config.yaml instead, or use ${VAR}
|
||||
# syntax in the YAML file. See docs/CONFIGURATION.md for details.
|
||||
|
||||
# MinIO storage (minio.kindred.internal)
|
||||
SILO_MINIO_ENDPOINT: minio.kindred.internal:9000
|
||||
# MinIO storage (minio.example.internal)
|
||||
# Supported as direct env var overrides:
|
||||
SILO_MINIO_ENDPOINT: minio.example.internal:9000
|
||||
SILO_MINIO_ACCESS_KEY: ${SILO_MINIO_ACCESS_KEY:?MinIO access key required}
|
||||
SILO_MINIO_SECRET_KEY: ${SILO_MINIO_SECRET_KEY:?MinIO secret key required}
|
||||
SILO_MINIO_BUCKET: silo-files
|
||||
SILO_MINIO_USE_SSL: "true"
|
||||
|
||||
# Server settings
|
||||
SILO_SERVER_BASE_URL: ${SILO_BASE_URL:-http://silo.kindred.internal:8080}
|
||||
# Note: SILO_MINIO_BUCKET and SILO_MINIO_USE_SSL are NOT supported as
|
||||
# direct env var overrides. Set these in config.yaml instead.
|
||||
ports:
|
||||
- "8080:8080"
|
||||
volumes:
|
||||
|
||||
@@ -69,7 +69,7 @@ services:
|
||||
- "8080:8080"
|
||||
volumes:
|
||||
- ../schemas:/etc/silo/schemas:ro
|
||||
- ../configs/config.yaml:/etc/silo/config.yaml:ro
|
||||
- ./config.dev.yaml:/etc/silo/config.yaml:ro
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-qO-", "http://localhost:8080/health"]
|
||||
interval: 10s
|
||||
|
||||
36
deployments/ldap/memberof.ldif
Normal file
36
deployments/ldap/memberof.ldif
Normal file
@@ -0,0 +1,36 @@
|
||||
# Enable the memberOf overlay for OpenLDAP.
|
||||
# When a user is added to a groupOfNames, their entry automatically
|
||||
# gets a memberOf attribute pointing to the group DN.
|
||||
# This is required for Silo's LDAP role mapping.
|
||||
#
|
||||
# Loaded automatically by bitnami/openldap from /docker-entrypoint-initdb.d/
|
||||
|
||||
dn: cn=module{0},cn=config
|
||||
changetype: modify
|
||||
add: olcModuleLoad
|
||||
olcModuleLoad: memberof
|
||||
|
||||
dn: olcOverlay=memberof,olcDatabase={2}mdb,cn=config
|
||||
changetype: add
|
||||
objectClass: olcOverlayConfig
|
||||
objectClass: olcMemberOf
|
||||
olcOverlay: memberof
|
||||
olcMemberOfRefInt: TRUE
|
||||
olcMemberOfDangling: ignore
|
||||
olcMemberOfGroupOC: groupOfNames
|
||||
olcMemberOfMemberAD: member
|
||||
olcMemberOfMemberOfAD: memberOf
|
||||
|
||||
# Enable refint overlay to maintain referential integrity
|
||||
# (removes memberOf when a user is removed from a group)
|
||||
dn: cn=module{0},cn=config
|
||||
changetype: modify
|
||||
add: olcModuleLoad
|
||||
olcModuleLoad: refint
|
||||
|
||||
dn: olcOverlay=refint,olcDatabase={2}mdb,cn=config
|
||||
changetype: add
|
||||
objectClass: olcOverlayConfig
|
||||
objectClass: olcRefintConfig
|
||||
olcOverlay: refint
|
||||
olcRefintAttribute: memberOf member
|
||||
34
deployments/ldap/silo-groups.ldif
Normal file
34
deployments/ldap/silo-groups.ldif
Normal file
@@ -0,0 +1,34 @@
|
||||
# Create Silo role groups for LDAP-based access control.
|
||||
# These groups map to Silo roles via auth.ldap.role_mapping in config.yaml.
|
||||
#
|
||||
# Group hierarchy:
|
||||
# silo-admins -> admin role (full access)
|
||||
# silo-users -> editor role (create/modify items)
|
||||
# silo-viewers -> viewer role (read-only)
|
||||
#
|
||||
# The initial LDAP user (set via LDAP_USERS env var) is added to silo-admins.
|
||||
# Additional users can be added with ldapadd or ldapmodify.
|
||||
#
|
||||
# Loaded automatically by bitnami/openldap from /docker-entrypoint-initdb.d/
|
||||
# Note: This runs after the default tree is created (users/groups OUs exist).
|
||||
|
||||
# Admin group — initial user is a member
|
||||
dn: cn=silo-admins,ou=groups,dc=silo,dc=local
|
||||
objectClass: groupOfNames
|
||||
cn: silo-admins
|
||||
description: Silo administrators (full access)
|
||||
member: cn=siloadmin,ou=users,dc=silo,dc=local
|
||||
|
||||
# Editor group
|
||||
dn: cn=silo-users,ou=groups,dc=silo,dc=local
|
||||
objectClass: groupOfNames
|
||||
cn: silo-users
|
||||
description: Silo editors (create and modify items)
|
||||
member: cn=placeholder,ou=users,dc=silo,dc=local
|
||||
|
||||
# Viewer group
|
||||
dn: cn=silo-viewers,ou=groups,dc=silo,dc=local
|
||||
objectClass: groupOfNames
|
||||
cn: silo-viewers
|
||||
description: Silo viewers (read-only access)
|
||||
member: cn=placeholder,ou=users,dc=silo,dc=local
|
||||
44
deployments/nginx/nginx-nossl.conf
Normal file
44
deployments/nginx/nginx-nossl.conf
Normal file
@@ -0,0 +1,44 @@
|
||||
# Silo Nginx Reverse Proxy — HTTP only (no TLS)
|
||||
#
|
||||
# Use this when TLS is terminated by an external load balancer or when
|
||||
# running on a trusted internal network without HTTPS.
|
||||
|
||||
upstream silo_backend {
|
||||
server silo:8080;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
listen [::]:80;
|
||||
server_name _;
|
||||
|
||||
location / {
|
||||
proxy_pass http://silo_backend;
|
||||
proxy_http_version 1.1;
|
||||
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Host $host;
|
||||
proxy_set_header X-Forwarded-Port $server_port;
|
||||
|
||||
# SSE support
|
||||
proxy_set_header Connection "";
|
||||
proxy_buffering off;
|
||||
|
||||
# Timeouts
|
||||
proxy_connect_timeout 60s;
|
||||
proxy_send_timeout 60s;
|
||||
proxy_read_timeout 300s;
|
||||
|
||||
# File uploads (CAD files can be large)
|
||||
client_max_body_size 100M;
|
||||
}
|
||||
|
||||
location /nginx-health {
|
||||
access_log off;
|
||||
return 200 "OK\n";
|
||||
add_header Content-Type text/plain;
|
||||
}
|
||||
}
|
||||
103
deployments/nginx/nginx.conf
Normal file
103
deployments/nginx/nginx.conf
Normal file
@@ -0,0 +1,103 @@
|
||||
# Silo Nginx Reverse Proxy (Docker)
|
||||
#
|
||||
# HTTP reverse proxy with optional HTTPS. To enable TLS:
|
||||
# 1. Uncomment the ssl server block below
|
||||
# 2. Mount your certificate and key in docker-compose:
|
||||
# volumes:
|
||||
# - /path/to/cert.pem:/etc/nginx/ssl/cert.pem:ro
|
||||
# - /path/to/key.pem:/etc/nginx/ssl/key.pem:ro
|
||||
# 3. Uncomment the HTTP-to-HTTPS redirect in the port 80 block
|
||||
|
||||
upstream silo_backend {
|
||||
server silo:8080;
|
||||
}
|
||||
|
||||
# HTTP server
|
||||
server {
|
||||
listen 80;
|
||||
listen [::]:80;
|
||||
server_name _;
|
||||
|
||||
# Uncomment the next line to redirect all HTTP traffic to HTTPS
|
||||
# return 301 https://$host$request_uri;
|
||||
|
||||
location / {
|
||||
proxy_pass http://silo_backend;
|
||||
proxy_http_version 1.1;
|
||||
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Host $host;
|
||||
proxy_set_header X-Forwarded-Port $server_port;
|
||||
|
||||
# SSE support
|
||||
proxy_set_header Connection "";
|
||||
proxy_buffering off;
|
||||
|
||||
# Timeouts
|
||||
proxy_connect_timeout 60s;
|
||||
proxy_send_timeout 60s;
|
||||
proxy_read_timeout 300s;
|
||||
|
||||
# File uploads (CAD files can be large)
|
||||
client_max_body_size 100M;
|
||||
}
|
||||
|
||||
# Health check endpoint for monitoring
|
||||
location /nginx-health {
|
||||
access_log off;
|
||||
return 200 "OK\n";
|
||||
add_header Content-Type text/plain;
|
||||
}
|
||||
}
|
||||
|
||||
# Uncomment for HTTPS (mount certs in docker-compose volumes)
|
||||
# server {
|
||||
# listen 443 ssl http2;
|
||||
# listen [::]:443 ssl http2;
|
||||
# server_name _;
|
||||
#
|
||||
# ssl_certificate /etc/nginx/ssl/cert.pem;
|
||||
# ssl_certificate_key /etc/nginx/ssl/key.pem;
|
||||
#
|
||||
# ssl_protocols TLSv1.2 TLSv1.3;
|
||||
# ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305;
|
||||
# ssl_prefer_server_ciphers off;
|
||||
# ssl_session_timeout 1d;
|
||||
# ssl_session_cache shared:SSL:10m;
|
||||
# ssl_session_tickets off;
|
||||
#
|
||||
# # Security headers
|
||||
# add_header X-Frame-Options "SAMEORIGIN" always;
|
||||
# add_header X-Content-Type-Options "nosniff" always;
|
||||
# add_header Referrer-Policy "strict-origin-when-cross-origin" always;
|
||||
#
|
||||
# location / {
|
||||
# proxy_pass http://silo_backend;
|
||||
# proxy_http_version 1.1;
|
||||
#
|
||||
# proxy_set_header Host $host;
|
||||
# proxy_set_header X-Real-IP $remote_addr;
|
||||
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
# proxy_set_header X-Forwarded-Proto $scheme;
|
||||
# proxy_set_header X-Forwarded-Host $host;
|
||||
# proxy_set_header X-Forwarded-Port $server_port;
|
||||
#
|
||||
# proxy_set_header Connection "";
|
||||
# proxy_buffering off;
|
||||
#
|
||||
# proxy_connect_timeout 60s;
|
||||
# proxy_send_timeout 60s;
|
||||
# proxy_read_timeout 300s;
|
||||
#
|
||||
# client_max_body_size 100M;
|
||||
# }
|
||||
#
|
||||
# location /nginx-health {
|
||||
# access_log off;
|
||||
# return 200 "OK\n";
|
||||
# add_header Content-Type text/plain;
|
||||
# }
|
||||
# }
|
||||
@@ -2,11 +2,11 @@
|
||||
# Copy to /etc/silo/silod.env and fill in values
|
||||
# Permissions: chmod 600 /etc/silo/silod.env
|
||||
|
||||
# Database credentials (psql.kindred.internal)
|
||||
# Database credentials (psql.example.internal)
|
||||
# Database: silo, User: silo
|
||||
SILO_DB_PASSWORD=
|
||||
|
||||
# MinIO credentials (minio.kindred.internal)
|
||||
# MinIO credentials (minio.example.internal)
|
||||
# User: silouser
|
||||
SILO_MINIO_ACCESS_KEY=silouser
|
||||
SILO_MINIO_SECRET_KEY=
|
||||
@@ -23,4 +23,4 @@ SILO_ADMIN_PASSWORD=
|
||||
# SILO_LDAP_BIND_PASSWORD=
|
||||
|
||||
# Optional: Override server base URL
|
||||
# SILO_SERVER_BASE_URL=http://silo.kindred.internal:8080
|
||||
# SILO_SERVER_BASE_URL=http://silo.example.internal:8080
|
||||
|
||||
@@ -38,7 +38,7 @@ API tokens allow the FreeCAD plugin, scripts, and CI pipelines to authenticate w
|
||||
### Creating a Token (CLI)
|
||||
|
||||
```sh
|
||||
export SILO_API_URL=https://silo.kindred.internal
|
||||
export SILO_API_URL=https://silo.example.internal
|
||||
export SILO_API_TOKEN=silo_<your-existing-token>
|
||||
|
||||
silo token create --name "CI pipeline"
|
||||
@@ -140,7 +140,7 @@ auth:
|
||||
|
||||
ldap:
|
||||
enabled: true
|
||||
url: "ldaps://ipa.kindred.internal"
|
||||
url: "ldaps://ipa.example.internal"
|
||||
base_dn: "dc=kindred,dc=internal"
|
||||
user_search_dn: "cn=users,cn=accounts,dc=kindred,dc=internal"
|
||||
user_attr: "uid"
|
||||
@@ -170,10 +170,10 @@ auth:
|
||||
|
||||
oidc:
|
||||
enabled: true
|
||||
issuer_url: "https://keycloak.kindred.internal/realms/silo"
|
||||
issuer_url: "https://keycloak.example.internal/realms/silo"
|
||||
client_id: "silo"
|
||||
client_secret: "" # Set via SILO_OIDC_CLIENT_SECRET
|
||||
redirect_url: "https://silo.kindred.internal/auth/callback"
|
||||
redirect_url: "https://silo.example.internal/auth/callback"
|
||||
scopes: ["openid", "profile", "email"]
|
||||
admin_role: "silo-admin"
|
||||
editor_role: "silo-editor"
|
||||
@@ -186,7 +186,7 @@ auth:
|
||||
auth:
|
||||
cors:
|
||||
allowed_origins:
|
||||
- "https://silo.kindred.internal"
|
||||
- "https://silo.example.internal"
|
||||
```
|
||||
|
||||
## Environment Variables
|
||||
@@ -254,4 +254,4 @@ UPDATE users SET password_hash = '<bcrypt-hash>', is_active = true WHERE usernam
|
||||
|
||||
- Verify the token is set in FreeCAD preferences or `SILO_API_TOKEN`
|
||||
- Check the API URL points to the correct server
|
||||
- Test with curl: `curl -H "Authorization: Bearer silo_..." https://silo.kindred.internal/api/items`
|
||||
- Test with curl: `curl -H "Authorization: Bearer silo_..." https://silo.example.internal/api/items`
|
||||
|
||||
246
docs/DAG.md
Normal file
246
docs/DAG.md
Normal file
@@ -0,0 +1,246 @@
|
||||
# Dependency DAG Specification
|
||||
|
||||
**Status:** Draft
|
||||
**Last Updated:** 2026-02-13
|
||||
|
||||
---
|
||||
|
||||
## 1. Purpose
|
||||
|
||||
The Dependency DAG is a server-side graph that tracks how features, constraints, and assembly relationships depend on each other. It enables three capabilities described in [MULTI_USER_EDITS.md](MULTI_USER_EDITS.md):
|
||||
|
||||
1. **Interference detection** -- comparing dependency cones of concurrent edit sessions to classify conflicts as none, soft, or hard before the user encounters them.
|
||||
2. **Incremental validation** -- marking changed nodes dirty and propagating only through the affected subgraph, using input-hash memoization to stop early when inputs haven't changed.
|
||||
3. **Structured merge safety** -- walking the DAG to determine whether concurrent edits share upstream dependencies, deciding if auto-merge is safe or manual review is required.
|
||||
|
||||
---
|
||||
|
||||
## 2. Two-Tier Model
|
||||
|
||||
Silo maintains two levels of dependency graph:
|
||||
|
||||
### 2.1 BOM DAG (existing)
|
||||
|
||||
The assembly-to-part relationship graph already stored in the `relationships` table. Each row represents a parent item containing a child item with a quantity and relationship type (`component`, `alternate`, `reference`). This graph is queried via `GetBOM`, `GetExpandedBOM`, `GetWhereUsed`, and `HasCycle` in `internal/db/relationships.go`.
|
||||
|
||||
The BOM DAG is **not modified** by this specification. It continues to serve its existing purpose.
|
||||
|
||||
### 2.2 Feature DAG (new)
|
||||
|
||||
A finer-grained graph stored in `dag_nodes` and `dag_edges` tables. Each node represents a feature within a single item's revision -- a sketch, pad, fillet, pocket, constraint, body, or part-level container. Edges represent "depends on" relationships: if Pad003 depends on Sketch001, an edge runs from Sketch001 to Pad003.
|
||||
|
||||
The feature DAG is populated by clients (silo-mod) when users save, or by runners after compute jobs. Silo stores and queries it but does not generate it -- the Create client has access to the feature tree and is the authoritative source.
|
||||
|
||||
### 2.3 Cross-Item Edges
|
||||
|
||||
Assembly constraints often reference geometry on child parts (e.g., "mate Face6 of PartA to Face2 of PartB"). These cross-item dependencies are stored in `dag_cross_edges`, linking a node in one item to a node in another. Each cross-edge optionally references the `relationships` row that establishes the BOM connection.
|
||||
|
||||
---
|
||||
|
||||
## 3. Data Model
|
||||
|
||||
### 3.1 dag_nodes
|
||||
|
||||
| Column | Type | Description |
|
||||
|--------|------|-------------|
|
||||
| `id` | UUID | Primary key |
|
||||
| `item_id` | UUID | FK to `items.id` |
|
||||
| `revision_number` | INTEGER | Revision this DAG snapshot belongs to |
|
||||
| `node_key` | TEXT | Feature name from Create (e.g., `Sketch001`, `Pad003`, `Body`) |
|
||||
| `node_type` | TEXT | One of: `sketch`, `pad`, `pocket`, `fillet`, `chamfer`, `constraint`, `body`, `part`, `datum`, `mirror`, `pattern`, `boolean` |
|
||||
| `properties_hash` | TEXT | SHA-256 of the node's parametric inputs (sketch coordinates, fillet radius, constraint values). Used for memoization -- if the hash hasn't changed, validation can skip this node. |
|
||||
| `validation_state` | TEXT | One of: `clean`, `dirty`, `validating`, `failed` |
|
||||
| `validation_msg` | TEXT | Error message when `validation_state = 'failed'` |
|
||||
| `metadata` | JSONB | Type-specific data (sketch coords, feature params, constraint definitions) |
|
||||
| `created_at` | TIMESTAMPTZ | Row creation time |
|
||||
| `updated_at` | TIMESTAMPTZ | Last state change |
|
||||
|
||||
**Uniqueness:** `(item_id, revision_number, node_key)` -- one node per feature per revision.
|
||||
|
||||
### 3.2 dag_edges
|
||||
|
||||
| Column | Type | Description |
|
||||
|--------|------|-------------|
|
||||
| `id` | UUID | Primary key |
|
||||
| `source_node_id` | UUID | FK to `dag_nodes.id` -- the upstream node |
|
||||
| `target_node_id` | UUID | FK to `dag_nodes.id` -- the downstream node that depends on source |
|
||||
| `edge_type` | TEXT | `depends_on` (default), `references`, `constrains` |
|
||||
| `metadata` | JSONB | Optional edge metadata |
|
||||
|
||||
**Direction convention:** An edge from A to B means "B depends on A". A is upstream, B is downstream. Forward-cone traversal from A walks edges where A is the source.
|
||||
|
||||
**Uniqueness:** `(source_node_id, target_node_id, edge_type)`.
|
||||
|
||||
**Constraint:** `source_node_id != target_node_id` (no self-edges).
|
||||
|
||||
### 3.3 dag_cross_edges
|
||||
|
||||
| Column | Type | Description |
|
||||
|--------|------|-------------|
|
||||
| `id` | UUID | Primary key |
|
||||
| `source_node_id` | UUID | FK to `dag_nodes.id` -- node in item A |
|
||||
| `target_node_id` | UUID | FK to `dag_nodes.id` -- node in item B |
|
||||
| `relationship_id` | UUID | FK to `relationships.id` (nullable) -- the BOM entry connecting the two items |
|
||||
| `edge_type` | TEXT | `assembly_ref` (default) |
|
||||
| `metadata` | JSONB | Reference details (face ID, edge ID, etc.) |
|
||||
|
||||
**Uniqueness:** `(source_node_id, target_node_id)`.
|
||||
|
||||
---
|
||||
|
||||
## 4. Validation States
|
||||
|
||||
Each node has a `validation_state` that tracks whether its computed geometry is current:
|
||||
|
||||
| State | Meaning |
|
||||
|-------|---------|
|
||||
| `clean` | Node's geometry matches its `properties_hash`. No recompute needed. |
|
||||
| `dirty` | An upstream change has propagated to this node. Recompute required. |
|
||||
| `validating` | A compute job is currently revalidating this node. |
|
||||
| `failed` | Recompute failed. `validation_msg` contains the error. |
|
||||
|
||||
### 4.1 State Transitions
|
||||
|
||||
```
|
||||
clean → dirty (upstream change detected, or MarkDirty called)
|
||||
dirty → validating (compute job claims this node)
|
||||
validating → clean (recompute succeeded, properties_hash updated)
|
||||
validating → failed (recompute produced an error)
|
||||
failed → dirty (upstream change detected, retry possible)
|
||||
dirty → clean (properties_hash matches previous -- memoization shortcut)
|
||||
```
|
||||
|
||||
### 4.2 Dirty Propagation
|
||||
|
||||
When a node is marked dirty, all downstream nodes in its forward cone are also marked dirty. This is done atomically in a single recursive CTE:
|
||||
|
||||
```sql
|
||||
WITH RECURSIVE forward_cone AS (
|
||||
SELECT $1::uuid AS node_id
|
||||
UNION
|
||||
SELECT e.target_node_id
|
||||
FROM dag_edges e
|
||||
JOIN forward_cone fc ON fc.node_id = e.source_node_id
|
||||
)
|
||||
UPDATE dag_nodes SET validation_state = 'dirty', updated_at = now()
|
||||
WHERE id IN (SELECT node_id FROM forward_cone)
|
||||
AND validation_state = 'clean';
|
||||
```
|
||||
|
||||
### 4.3 Memoization
|
||||
|
||||
Before marking a node dirty, the system can compare the new `properties_hash` against the stored value. If they match, the change did not affect this node's inputs, and propagation stops. This is the memoization boundary described in MULTI_USER_EDITS.md Section 5.2.
|
||||
|
||||
---
|
||||
|
||||
## 5. Graph Queries
|
||||
|
||||
### 5.1 Forward Cone
|
||||
|
||||
Returns all nodes downstream of a given node -- everything that would be affected if the source node changes. Used for interference detection: if two users' forward cones overlap, there is potential interference.
|
||||
|
||||
```sql
|
||||
WITH RECURSIVE forward_cone AS (
|
||||
SELECT target_node_id AS node_id
|
||||
FROM dag_edges WHERE source_node_id = $1
|
||||
UNION
|
||||
SELECT e.target_node_id
|
||||
FROM dag_edges e
|
||||
JOIN forward_cone fc ON fc.node_id = e.source_node_id
|
||||
)
|
||||
SELECT n.* FROM dag_nodes n JOIN forward_cone fc ON n.id = fc.node_id;
|
||||
```
|
||||
|
||||
### 5.2 Backward Cone
|
||||
|
||||
Returns all nodes upstream of a given node -- everything the target node depends on.
|
||||
|
||||
### 5.3 Dirty Subgraph
|
||||
|
||||
Returns all nodes for a given item where `validation_state != 'clean'`, along with their edges. This is the input to an incremental validation job.
|
||||
|
||||
### 5.4 Cycle Detection
|
||||
|
||||
Before adding an edge, check that it would not create a cycle. Uses the same recursive ancestor-walk pattern as `HasCycle` in `internal/db/relationships.go`.
|
||||
|
||||
---
|
||||
|
||||
## 6. DAG Sync
|
||||
|
||||
Clients push the full feature DAG to Silo via `PUT /api/items/{partNumber}/dag`. The sync payload is a JSON document:
|
||||
|
||||
```json
|
||||
{
|
||||
"revision": 3,
|
||||
"nodes": [
|
||||
{
|
||||
"key": "Sketch001",
|
||||
"type": "sketch",
|
||||
"properties_hash": "a1b2c3...",
|
||||
"metadata": {
|
||||
"coordinates": [[0, 0], [10, 0], [10, 5]],
|
||||
"constraints": ["horizontal", "vertical"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"key": "Pad003",
|
||||
"type": "pad",
|
||||
"properties_hash": "d4e5f6...",
|
||||
"metadata": {
|
||||
"length": 15.0,
|
||||
"direction": [0, 0, 1]
|
||||
}
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{
|
||||
"source": "Sketch001",
|
||||
"target": "Pad003",
|
||||
"type": "depends_on"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
The server processes this within a single transaction:
|
||||
1. Upsert all nodes (matched by `item_id + revision_number + node_key`).
|
||||
2. Replace all edges for this item/revision.
|
||||
3. Compare new `properties_hash` values against stored values to detect changes.
|
||||
4. Mark changed nodes and their forward cones dirty.
|
||||
5. Publish `dag.updated` SSE event.
|
||||
|
||||
---
|
||||
|
||||
## 7. Interference Detection
|
||||
|
||||
When a user registers an edit context (MULTI_USER_EDITS.md Section 3.1), the server:
|
||||
|
||||
1. Looks up the node(s) being edited by `node_key` within the item's current revision.
|
||||
2. Computes the forward cone for those nodes.
|
||||
3. Compares the cone against all active edit sessions' cones.
|
||||
4. Classifies interference:
|
||||
- **No overlap** → no interference, fully concurrent.
|
||||
- **Overlap, different objects** → soft interference, visual indicator via SSE.
|
||||
- **Same object, same edit type** → hard interference, edit blocked.
|
||||
|
||||
---
|
||||
|
||||
## 8. REST API
|
||||
|
||||
All endpoints are under `/api/items/{partNumber}` and require authentication.
|
||||
|
||||
| Method | Path | Auth | Description |
|
||||
|--------|------|------|-------------|
|
||||
| `GET` | `/dag` | viewer | Get full feature DAG for current revision |
|
||||
| `GET` | `/dag/forward-cone/{nodeKey}` | viewer | Get forward dependency cone |
|
||||
| `GET` | `/dag/dirty` | viewer | Get dirty subgraph |
|
||||
| `PUT` | `/dag` | editor | Sync full feature tree (from client or runner) |
|
||||
| `POST` | `/dag/mark-dirty/{nodeKey}` | editor | Manually mark a node and its cone dirty |
|
||||
|
||||
---
|
||||
|
||||
## 9. References
|
||||
|
||||
- [MULTI_USER_EDITS.md](MULTI_USER_EDITS.md) -- Full multi-user editing specification
|
||||
- [WORKERS.md](WORKERS.md) -- Worker/runner system that executes validation jobs
|
||||
- [ROADMAP.md](ROADMAP.md) -- Tier 0 Dependency DAG entry
|
||||
395
docs/DAG_CLIENT_INTEGRATION.md
Normal file
395
docs/DAG_CLIENT_INTEGRATION.md
Normal file
@@ -0,0 +1,395 @@
|
||||
# DAG Client Integration Contract
|
||||
|
||||
**Status:** Draft
|
||||
**Last Updated:** 2026-02-13
|
||||
|
||||
This document describes what silo-mod and Headless Create runners need to implement to integrate with the Silo dependency DAG and worker system.
|
||||
|
||||
---
|
||||
|
||||
## 1. Overview
|
||||
|
||||
The DAG system has two client-side integration points:
|
||||
|
||||
1. **silo-mod workbench** (desktop) -- pushes DAG data to Silo on save or revision create.
|
||||
2. **silorunner + silo-mod** (headless) -- extracts DAGs, validates features, and exports geometry as compute jobs.
|
||||
|
||||
Both share the same Python codebase in the silo-mod repository. Desktop users call the code interactively; runners call it headlessly via `create --console`.
|
||||
|
||||
---
|
||||
|
||||
## 2. DAG Sync Payload
|
||||
|
||||
Clients push feature trees to Silo via:
|
||||
|
||||
```
|
||||
PUT /api/items/{partNumber}/dag
|
||||
Authorization: Bearer <user_token or runner_token>
|
||||
Content-Type: application/json
|
||||
```
|
||||
|
||||
### 2.1 Request Body
|
||||
|
||||
```json
|
||||
{
|
||||
"revision_number": 3,
|
||||
"nodes": [
|
||||
{
|
||||
"node_key": "Sketch001",
|
||||
"node_type": "sketch",
|
||||
"properties_hash": "a1b2c3d4e5f6...",
|
||||
"metadata": {
|
||||
"label": "Base Profile",
|
||||
"constraint_count": 12
|
||||
}
|
||||
},
|
||||
{
|
||||
"node_key": "Pad001",
|
||||
"node_type": "pad",
|
||||
"properties_hash": "f6e5d4c3b2a1...",
|
||||
"metadata": {
|
||||
"label": "Main Extrusion",
|
||||
"length": 25.0
|
||||
}
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{
|
||||
"source_key": "Sketch001",
|
||||
"target_key": "Pad001",
|
||||
"edge_type": "depends_on"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### 2.2 Field Reference
|
||||
|
||||
**Nodes:**
|
||||
|
||||
| Field | Type | Required | Description |
|
||||
|-------|------|----------|-------------|
|
||||
| `node_key` | string | yes | Unique within item+revision. Use Create's internal object name (e.g. `Sketch001`, `Pad003`). |
|
||||
| `node_type` | string | yes | One of: `sketch`, `pad`, `pocket`, `fillet`, `chamfer`, `constraint`, `body`, `part`, `datum`. |
|
||||
| `properties_hash` | string | no | SHA-256 hex digest of the node's parametric inputs. Used for memoization. |
|
||||
| `validation_state` | string | no | One of: `clean`, `dirty`, `validating`, `failed`. Defaults to `clean`. |
|
||||
| `metadata` | object | no | Arbitrary key-value pairs for display or debugging. |
|
||||
|
||||
**Edges:**
|
||||
|
||||
| Field | Type | Required | Description |
|
||||
|-------|------|----------|-------------|
|
||||
| `source_key` | string | yes | The node that is depended upon. |
|
||||
| `target_key` | string | yes | The node that depends on the source. |
|
||||
| `edge_type` | string | no | One of: `depends_on` (default), `references`, `constrains`. |
|
||||
|
||||
**Direction convention:** Edges point from dependency to dependent. If Pad001 depends on Sketch001, the edge is `source_key: "Sketch001"`, `target_key: "Pad001"`.
|
||||
|
||||
### 2.3 Response
|
||||
|
||||
```json
|
||||
{
|
||||
"synced": true,
|
||||
"node_count": 15,
|
||||
"edge_count": 14
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 3. Computing properties_hash
|
||||
|
||||
The `properties_hash` enables memoization -- if a node's inputs haven't changed since the last validation, it can be skipped. Computing it:
|
||||
|
||||
```python
|
||||
import hashlib
|
||||
import json
|
||||
|
||||
def compute_properties_hash(feature_obj):
|
||||
"""Hash the parametric inputs of a Create feature."""
|
||||
inputs = {}
|
||||
|
||||
if feature_obj.TypeId == "Sketcher::SketchObject":
|
||||
# Hash geometry + constraints
|
||||
inputs["geometry_count"] = feature_obj.GeometryCount
|
||||
inputs["constraint_count"] = feature_obj.ConstraintCount
|
||||
inputs["geometry"] = str(feature_obj.Shape.exportBrep())
|
||||
elif feature_obj.TypeId == "PartDesign::Pad":
|
||||
inputs["length"] = feature_obj.Length.Value
|
||||
inputs["type"] = str(feature_obj.Type)
|
||||
inputs["reversed"] = feature_obj.Reversed
|
||||
inputs["sketch"] = feature_obj.Profile[0].Name
|
||||
# ... other feature types
|
||||
|
||||
canonical = json.dumps(inputs, sort_keys=True)
|
||||
return hashlib.sha256(canonical.encode()).hexdigest()
|
||||
```
|
||||
|
||||
The exact inputs per feature type are determined by what parametric values affect the feature's geometry. Include anything that, if changed, would require recomputation.
|
||||
|
||||
---
|
||||
|
||||
## 4. Feature Tree Walking
|
||||
|
||||
To extract the DAG from a Create document:
|
||||
|
||||
```python
|
||||
import FreeCAD
|
||||
|
||||
def extract_dag(doc):
|
||||
"""Walk a Create document and return nodes + edges."""
|
||||
nodes = []
|
||||
edges = []
|
||||
|
||||
for obj in doc.Objects:
|
||||
# Skip non-feature objects
|
||||
if not hasattr(obj, "TypeId"):
|
||||
continue
|
||||
|
||||
node_type = classify_type(obj.TypeId)
|
||||
if node_type is None:
|
||||
continue
|
||||
|
||||
nodes.append({
|
||||
"node_key": obj.Name,
|
||||
"node_type": node_type,
|
||||
"properties_hash": compute_properties_hash(obj),
|
||||
"metadata": {
|
||||
"label": obj.Label,
|
||||
"type_id": obj.TypeId,
|
||||
}
|
||||
})
|
||||
|
||||
# Walk dependencies via InList (objects this one depends on)
|
||||
for dep in obj.InList:
|
||||
if hasattr(dep, "TypeId") and classify_type(dep.TypeId):
|
||||
edges.append({
|
||||
"source_key": dep.Name,
|
||||
"target_key": obj.Name,
|
||||
"edge_type": "depends_on",
|
||||
})
|
||||
|
||||
return nodes, edges
|
||||
|
||||
|
||||
def classify_type(type_id):
|
||||
"""Map Create TypeIds to DAG node types."""
|
||||
mapping = {
|
||||
"Sketcher::SketchObject": "sketch",
|
||||
"PartDesign::Pad": "pad",
|
||||
"PartDesign::Pocket": "pocket",
|
||||
"PartDesign::Fillet": "fillet",
|
||||
"PartDesign::Chamfer": "chamfer",
|
||||
"PartDesign::Body": "body",
|
||||
"Part::Feature": "part",
|
||||
"Sketcher::SketchConstraint": "constraint",
|
||||
}
|
||||
return mapping.get(type_id)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5. When to Push DAG Data
|
||||
|
||||
Push the DAG to Silo in these scenarios:
|
||||
|
||||
| Event | Trigger | Who |
|
||||
|-------|---------|-----|
|
||||
| User saves in silo-mod | On save callback | Desktop silo-mod workbench |
|
||||
| User creates a revision | After `POST /api/items/{pn}/revisions` succeeds | Desktop silo-mod workbench |
|
||||
| Runner extracts DAG | After `create-dag-extract` job completes | silorunner via `PUT /api/runner/jobs/{id}/dag` |
|
||||
| Runner validates | After `create-validate` job, push updated validation states | silorunner via `PUT /api/runner/jobs/{id}/dag` |
|
||||
|
||||
---
|
||||
|
||||
## 6. Runner Entry Points
|
||||
|
||||
silo-mod must provide these Python entry points for headless invocation:
|
||||
|
||||
### 6.1 silo.runner.dag_extract
|
||||
|
||||
Extracts the feature DAG from a Create file and writes it as JSON.
|
||||
|
||||
```python
|
||||
# silo/runner.py
|
||||
|
||||
def dag_extract(input_path, output_path):
|
||||
"""
|
||||
Extract feature DAG from a Create file.
|
||||
|
||||
Args:
|
||||
input_path: Path to the .kc (Kindred Create) file.
|
||||
output_path: Path to write the JSON output.
|
||||
|
||||
Output JSON format:
|
||||
{
|
||||
"nodes": [...], // Same format as DAG sync payload
|
||||
"edges": [...]
|
||||
}
|
||||
"""
|
||||
doc = FreeCAD.openDocument(input_path)
|
||||
nodes, edges = extract_dag(doc)
|
||||
|
||||
with open(output_path, 'w') as f:
|
||||
json.dump({"nodes": nodes, "edges": edges}, f)
|
||||
|
||||
FreeCAD.closeDocument(doc.Name)
|
||||
```
|
||||
|
||||
### 6.2 silo.runner.validate
|
||||
|
||||
Rebuilds all features and reports pass/fail per node.
|
||||
|
||||
```python
|
||||
def validate(input_path, output_path):
|
||||
"""
|
||||
Validate a Create file by rebuilding all features.
|
||||
|
||||
Output JSON format:
|
||||
{
|
||||
"valid": true/false,
|
||||
"nodes": [
|
||||
{
|
||||
"node_key": "Pad001",
|
||||
"state": "clean", // or "failed"
|
||||
"message": null, // error message if failed
|
||||
"properties_hash": "..."
|
||||
}
|
||||
]
|
||||
}
|
||||
"""
|
||||
doc = FreeCAD.openDocument(input_path)
|
||||
doc.recompute()
|
||||
|
||||
results = []
|
||||
all_valid = True
|
||||
for obj in doc.Objects:
|
||||
if not hasattr(obj, "TypeId"):
|
||||
continue
|
||||
node_type = classify_type(obj.TypeId)
|
||||
if node_type is None:
|
||||
continue
|
||||
|
||||
state = "clean"
|
||||
message = None
|
||||
if hasattr(obj, "isValid") and not obj.isValid():
|
||||
state = "failed"
|
||||
message = f"Feature {obj.Label} failed to recompute"
|
||||
all_valid = False
|
||||
|
||||
results.append({
|
||||
"node_key": obj.Name,
|
||||
"state": state,
|
||||
"message": message,
|
||||
"properties_hash": compute_properties_hash(obj),
|
||||
})
|
||||
|
||||
with open(output_path, 'w') as f:
|
||||
json.dump({"valid": all_valid, "nodes": results}, f)
|
||||
|
||||
FreeCAD.closeDocument(doc.Name)
|
||||
```
|
||||
|
||||
### 6.3 silo.runner.export
|
||||
|
||||
Exports geometry to STEP, IGES, or other formats.
|
||||
|
||||
```python
|
||||
def export(input_path, output_path, format="step"):
|
||||
"""
|
||||
Export a Create file to an external format.
|
||||
|
||||
Args:
|
||||
input_path: Path to the .kc file.
|
||||
output_path: Path to write the exported file.
|
||||
format: Export format ("step", "iges", "stl", "obj").
|
||||
"""
|
||||
doc = FreeCAD.openDocument(input_path)
|
||||
|
||||
import Part
|
||||
shapes = [obj.Shape for obj in doc.Objects if hasattr(obj, "Shape")]
|
||||
compound = Part.makeCompound(shapes)
|
||||
|
||||
format_map = {
|
||||
"step": "STEP",
|
||||
"iges": "IGES",
|
||||
"stl": "STL",
|
||||
"obj": "OBJ",
|
||||
}
|
||||
|
||||
Part.export([compound], output_path)
|
||||
FreeCAD.closeDocument(doc.Name)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 7. Headless Invocation
|
||||
|
||||
The `silorunner` binary shells out to Create (with silo-mod installed):
|
||||
|
||||
```bash
|
||||
# DAG extraction
|
||||
create --console -e "from silo.runner import dag_extract; dag_extract('/tmp/job/part.kc', '/tmp/job/dag.json')"
|
||||
|
||||
# Validation
|
||||
create --console -e "from silo.runner import validate; validate('/tmp/job/part.kc', '/tmp/job/result.json')"
|
||||
|
||||
# Export
|
||||
create --console -e "from silo.runner import export; export('/tmp/job/part.kc', '/tmp/job/output.step', 'step')"
|
||||
```
|
||||
|
||||
**Prerequisites:** The runner host must have:
|
||||
- Headless Create installed (Kindred's fork of FreeCAD)
|
||||
- silo-mod installed as a Create addon (so `from silo.runner import ...` works)
|
||||
- No display server required -- `--console` mode is headless
|
||||
|
||||
---
|
||||
|
||||
## 8. Validation Result Handling
|
||||
|
||||
After a runner completes a `create-validate` job, it should:
|
||||
|
||||
1. Read the result JSON.
|
||||
2. Push updated validation states via `PUT /api/runner/jobs/{jobID}/dag`:
|
||||
|
||||
```json
|
||||
{
|
||||
"revision_number": 3,
|
||||
"nodes": [
|
||||
{"node_key": "Sketch001", "node_type": "sketch", "validation_state": "clean", "properties_hash": "abc..."},
|
||||
{"node_key": "Pad001", "node_type": "pad", "validation_state": "failed", "properties_hash": "def..."}
|
||||
],
|
||||
"edges": [
|
||||
{"source_key": "Sketch001", "target_key": "Pad001"}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
3. Complete the job via `POST /api/runner/jobs/{jobID}/complete` with the summary result.
|
||||
|
||||
---
|
||||
|
||||
## 9. SSE Events
|
||||
|
||||
Clients should listen for these events on `GET /api/events`:
|
||||
|
||||
| Event | Payload | When |
|
||||
|-------|---------|------|
|
||||
| `dag.updated` | `{item_id, part_number, revision_number, node_count, edge_count}` | After any DAG sync |
|
||||
| `dag.validated` | `{item_id, part_number, valid, failed_count}` | After validation completes |
|
||||
| `job.created` | `{job_id, definition_name, trigger, item_id}` | Job auto-triggered or manually created |
|
||||
| `job.claimed` | `{job_id, runner_id, runner}` | Runner claims a job |
|
||||
| `job.progress` | `{job_id, progress, message}` | Runner reports progress |
|
||||
| `job.completed` | `{job_id, runner_id}` | Job finishes successfully |
|
||||
| `job.failed` | `{job_id, runner_id, error}` | Job fails |
|
||||
| `job.cancelled` | `{job_id, cancelled_by}` | Job cancelled by user |
|
||||
|
||||
---
|
||||
|
||||
## 10. Cross-Item Edges
|
||||
|
||||
For assembly constraints that reference geometry in child parts (e.g. a mate constraint between two parts), use the `dag_cross_edges` table. These edges bridge the BOM DAG and the feature DAG.
|
||||
|
||||
Cross-item edges are **not** included in the standard `PUT /dag` sync. They will be managed through a dedicated endpoint in a future iteration once the assembly constraint model in Create/silo-mod is finalized.
|
||||
|
||||
For now, the DAG sync covers intra-item dependencies only. Assembly-level interference detection uses the BOM DAG (`relationships` table) combined with per-item feature DAGs.
|
||||
@@ -1,5 +1,9 @@
|
||||
# Silo Production Deployment Guide
|
||||
|
||||
> **First-time setup?** See the [Installation Guide](INSTALL.md) for step-by-step
|
||||
> instructions. This document covers ongoing maintenance and operations for an
|
||||
> existing deployment.
|
||||
|
||||
This guide covers deploying Silo to a dedicated VM using external PostgreSQL and MinIO services.
|
||||
|
||||
## Table of Contents
|
||||
@@ -17,7 +21,7 @@ This guide covers deploying Silo to a dedicated VM using external PostgreSQL and
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ silo.kindred.internal │
|
||||
│ silo.example.internal │
|
||||
│ ┌───────────────────────────────────────────────────────────┐ │
|
||||
│ │ silod │ │
|
||||
│ │ (Silo API Server) │ │
|
||||
@@ -27,7 +31,7 @@ This guide covers deploying Silo to a dedicated VM using external PostgreSQL and
|
||||
│ │
|
||||
▼ ▼
|
||||
┌─────────────────────────┐ ┌─────────────────────────────────┐
|
||||
│ psql.kindred.internal │ │ minio.kindred.internal │
|
||||
│ psql.example.internal │ │ minio.example.internal │
|
||||
│ PostgreSQL 16 │ │ MinIO S3 │
|
||||
│ :5432 │ │ :9000 (API) │
|
||||
│ │ │ :9001 (Console) │
|
||||
@@ -40,8 +44,8 @@ The following external services are already configured:
|
||||
|
||||
| Service | Host | Database/Bucket | User |
|
||||
|---------|------|-----------------|------|
|
||||
| PostgreSQL | psql.kindred.internal:5432 | silo | silo |
|
||||
| MinIO | minio.kindred.internal:9000 | silo-files | silouser |
|
||||
| PostgreSQL | psql.example.internal:5432 | silo | silo |
|
||||
| MinIO | minio.example.internal:9000 | silo-files | silouser |
|
||||
|
||||
Migrations have been applied to the database.
|
||||
|
||||
@@ -53,10 +57,10 @@ For a fresh VM, run these commands:
|
||||
|
||||
```bash
|
||||
# 1. SSH to the target host
|
||||
ssh root@silo.kindred.internal
|
||||
ssh root@silo.example.internal
|
||||
|
||||
# 2. Download and run setup script
|
||||
curl -fsSL https://gitea.kindred.internal/kindred/silo-0062/raw/branch/main/scripts/setup-host.sh | bash
|
||||
curl -fsSL https://git.kindred-systems.com/kindred/silo/raw/branch/main/scripts/setup-host.sh | bash
|
||||
|
||||
# 3. Configure credentials
|
||||
nano /etc/silo/silod.env
|
||||
@@ -69,16 +73,16 @@ nano /etc/silo/silod.env
|
||||
|
||||
## Initial Setup
|
||||
|
||||
Run the setup script once on `silo.kindred.internal` to prepare the host:
|
||||
Run the setup script once on `silo.example.internal` to prepare the host:
|
||||
|
||||
```bash
|
||||
# Option 1: If you have the repo locally
|
||||
scp scripts/setup-host.sh root@silo.kindred.internal:/tmp/
|
||||
ssh root@silo.kindred.internal 'bash /tmp/setup-host.sh'
|
||||
scp scripts/setup-host.sh root@silo.example.internal:/tmp/
|
||||
ssh root@silo.example.internal 'bash /tmp/setup-host.sh'
|
||||
|
||||
# Option 2: Direct on the host
|
||||
ssh root@silo.kindred.internal
|
||||
curl -fsSL https://git.kindred.internal/kindred/silo/raw/branch/main/scripts/setup-host.sh -o /tmp/setup-host.sh
|
||||
ssh root@silo.example.internal
|
||||
curl -fsSL https://git.kindred-systems.com/kindred/silo/raw/branch/main/scripts/setup-host.sh -o /tmp/setup-host.sh
|
||||
bash /tmp/setup-host.sh
|
||||
```
|
||||
|
||||
@@ -100,10 +104,10 @@ sudo nano /etc/silo/silod.env
|
||||
Fill in the values:
|
||||
|
||||
```bash
|
||||
# Database credentials (psql.kindred.internal)
|
||||
# Database credentials (psql.example.internal)
|
||||
SILO_DB_PASSWORD=your-database-password
|
||||
|
||||
# MinIO credentials (minio.kindred.internal)
|
||||
# MinIO credentials (minio.example.internal)
|
||||
SILO_MINIO_ACCESS_KEY=silouser
|
||||
SILO_MINIO_SECRET_KEY=your-minio-secret-key
|
||||
```
|
||||
@@ -114,10 +118,10 @@ Before deploying, verify connectivity to external services:
|
||||
|
||||
```bash
|
||||
# Test PostgreSQL
|
||||
psql -h psql.kindred.internal -U silo -d silo -c 'SELECT 1'
|
||||
psql -h psql.example.internal -U silo -d silo -c 'SELECT 1'
|
||||
|
||||
# Test MinIO
|
||||
curl -I http://minio.kindred.internal:9000/minio/health/live
|
||||
curl -I http://minio.example.internal:9000/minio/health/live
|
||||
```
|
||||
|
||||
---
|
||||
@@ -129,7 +133,7 @@ curl -I http://minio.kindred.internal:9000/minio/health/live
|
||||
To deploy or update Silo, run the deploy script on the target host:
|
||||
|
||||
```bash
|
||||
ssh root@silo.kindred.internal
|
||||
ssh root@silo.example.internal
|
||||
/opt/silo/src/scripts/deploy.sh
|
||||
```
|
||||
|
||||
@@ -165,7 +169,7 @@ sudo /opt/silo/src/scripts/deploy.sh --status
|
||||
You can override the git repository URL and branch:
|
||||
|
||||
```bash
|
||||
export SILO_REPO_URL=https://git.kindred.internal/kindred/silo.git
|
||||
export SILO_REPO_URL=https://git.kindred-systems.com/kindred/silo.git
|
||||
export SILO_BRANCH=main
|
||||
sudo -E /opt/silo/src/scripts/deploy.sh
|
||||
```
|
||||
@@ -247,7 +251,7 @@ curl http://localhost:8080/ready
|
||||
To update to the latest version:
|
||||
|
||||
```bash
|
||||
ssh root@silo.kindred.internal
|
||||
ssh root@silo.example.internal
|
||||
/opt/silo/src/scripts/deploy.sh
|
||||
```
|
||||
|
||||
@@ -269,7 +273,7 @@ When new migrations are added, run them manually:
|
||||
ls -la /opt/silo/src/migrations/
|
||||
|
||||
# Run a specific migration
|
||||
psql -h psql.kindred.internal -U silo -d silo -f /opt/silo/src/migrations/008_new_feature.sql
|
||||
psql -h psql.example.internal -U silo -d silo -f /opt/silo/src/migrations/008_new_feature.sql
|
||||
```
|
||||
|
||||
---
|
||||
@@ -303,13 +307,13 @@ psql -h psql.kindred.internal -U silo -d silo -f /opt/silo/src/migrations/008_ne
|
||||
|
||||
1. Test network connectivity:
|
||||
```bash
|
||||
nc -zv psql.kindred.internal 5432
|
||||
nc -zv psql.example.internal 5432
|
||||
```
|
||||
|
||||
2. Test credentials:
|
||||
```bash
|
||||
source /etc/silo/silod.env
|
||||
PGPASSWORD=$SILO_DB_PASSWORD psql -h psql.kindred.internal -U silo -d silo -c 'SELECT 1'
|
||||
PGPASSWORD=$SILO_DB_PASSWORD psql -h psql.example.internal -U silo -d silo -c 'SELECT 1'
|
||||
```
|
||||
|
||||
3. Check `pg_hba.conf` on PostgreSQL server allows connections from this host.
|
||||
@@ -318,12 +322,12 @@ psql -h psql.kindred.internal -U silo -d silo -f /opt/silo/src/migrations/008_ne
|
||||
|
||||
1. Test network connectivity:
|
||||
```bash
|
||||
nc -zv minio.kindred.internal 9000
|
||||
nc -zv minio.example.internal 9000
|
||||
```
|
||||
|
||||
2. Test with curl:
|
||||
```bash
|
||||
curl -I http://minio.kindred.internal:9000/minio/health/live
|
||||
curl -I http://minio.example.internal:9000/minio/health/live
|
||||
```
|
||||
|
||||
3. Check SSL settings in config match MinIO setup:
|
||||
@@ -340,8 +344,8 @@ curl -v http://localhost:8080/health
|
||||
curl -v http://localhost:8080/ready
|
||||
|
||||
# If ready fails but health passes, check external services
|
||||
psql -h psql.kindred.internal -U silo -d silo -c 'SELECT 1'
|
||||
curl http://minio.kindred.internal:9000/minio/health/live
|
||||
psql -h psql.example.internal -U silo -d silo -c 'SELECT 1'
|
||||
curl http://minio.example.internal:9000/minio/health/live
|
||||
```
|
||||
|
||||
### Build Fails
|
||||
@@ -391,14 +395,14 @@ This script:
|
||||
getcert list
|
||||
```
|
||||
|
||||
2. The silo config is already updated to use `https://silo.kindred.internal` as base URL. Restart silo:
|
||||
2. The silo config is already updated to use `https://silo.example.internal` as base URL. Restart silo:
|
||||
```bash
|
||||
sudo systemctl restart silod
|
||||
```
|
||||
|
||||
3. Test the setup:
|
||||
```bash
|
||||
curl https://silo.kindred.internal/health
|
||||
curl https://silo.example.internal/health
|
||||
```
|
||||
|
||||
### Certificate Management
|
||||
@@ -422,7 +426,7 @@ For clients to trust the Silo HTTPS certificate, they need the IPA CA:
|
||||
|
||||
```bash
|
||||
# Download CA cert
|
||||
curl -o /tmp/ipa-ca.crt https://ipa.kindred.internal/ipa/config/ca.crt
|
||||
curl -o /tmp/ipa-ca.crt https://ipa.example.internal/ipa/config/ca.crt
|
||||
|
||||
# Ubuntu/Debian
|
||||
sudo cp /tmp/ipa-ca.crt /usr/local/share/ca-certificates/ipa-ca.crt
|
||||
|
||||
@@ -1,13 +1,15 @@
|
||||
# Silo Gap Analysis and Revision Control Roadmap
|
||||
# Silo Gap Analysis
|
||||
|
||||
**Date:** 2026-02-08
|
||||
**Date:** 2026-02-13
|
||||
**Status:** Analysis Complete (Updated)
|
||||
|
||||
---
|
||||
|
||||
## Executive Summary
|
||||
|
||||
This document analyzes the current state of the Silo project against its specification, identifies documentation and feature gaps, and outlines a roadmap for enhanced revision control capabilities.
|
||||
This document analyzes the current state of the Silo project against its specification and against SOLIDWORKS PDM (the industry-leading product data management solution). It identifies documentation gaps, feature gaps, and outlines a roadmap for enhanced revision control capabilities.
|
||||
|
||||
See [ROADMAP.md](ROADMAP.md) for the platform roadmap and dependency tier structure.
|
||||
|
||||
---
|
||||
|
||||
@@ -25,7 +27,7 @@ This document analyzes the current state of the Silo project against its specifi
|
||||
| `docs/AUTH.md` | Authentication system design | Current |
|
||||
| `docs/AUTH_USER_GUIDE.md` | User guide for login, tokens, and roles | Current |
|
||||
| `docs/GAP_ANALYSIS.md` | Revision control roadmap | Current |
|
||||
| `ROADMAP.md` | Feature roadmap and SOLIDWORKS PDM comparison | Current |
|
||||
| `docs/ROADMAP.md` | Platform roadmap and dependency tiers | Current |
|
||||
| `frontend-spec.md` | React SPA frontend specification | Current |
|
||||
|
||||
### 1.2 Documentation Gaps (Priority Order)
|
||||
@@ -365,7 +367,7 @@ internal/
|
||||
handlers.go # Items, schemas, projects, revisions
|
||||
middleware.go # Auth middleware
|
||||
odoo_handlers.go # Odoo integration endpoints
|
||||
routes.go # Route registration (75 endpoints)
|
||||
routes.go # Route registration (78 endpoints)
|
||||
search.go # Fuzzy search
|
||||
auth/
|
||||
auth.go # Auth service: local, LDAP, OIDC
|
||||
@@ -450,3 +452,163 @@ GET /api/releases/{name} # Get release details
|
||||
POST /api/releases/{name}/items # Add items to release
|
||||
GET /api/items/{pn}/thumbnail/{rev} # Get thumbnail
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Appendix C: SOLIDWORKS PDM Comparison
|
||||
|
||||
This section compares Silo's capabilities against SOLIDWORKS PDM features. Gaps are categorized by priority and implementation complexity.
|
||||
|
||||
**Legend:** Silo Status = Full / Partial / None | Priority = Critical / High / Medium / Low | Complexity = Simple / Moderate / Complex
|
||||
|
||||
### C.1 Version Control & Revision Management
|
||||
|
||||
| Feature | SOLIDWORKS PDM | Silo Status | Priority | Complexity |
|
||||
|---------|---------------|-------------|----------|------------|
|
||||
| Check-in/check-out | Full pessimistic locking | None | High | Moderate |
|
||||
| Version history | Complete with branching | Full (linear) | - | - |
|
||||
| Revision labels | A, B, C or custom schemes | Full (custom labels) | - | - |
|
||||
| Rollback/restore | Full | Full | - | - |
|
||||
| Compare revisions | Visual + metadata diff | Metadata diff only | Medium | Complex |
|
||||
| Get Latest Revision | One-click retrieval | Partial (API only) | Medium | Simple |
|
||||
|
||||
Silo lacks pessimistic locking (check-out), which is critical for multi-user CAD environments where file merging is impractical. Visual diff comparison would require FreeCAD integration for CAD file visualization.
|
||||
|
||||
### C.2 Workflow Management
|
||||
|
||||
| Feature | SOLIDWORKS PDM | Silo Status | Priority | Complexity |
|
||||
|---------|---------------|-------------|----------|------------|
|
||||
| Custom workflows | Full visual designer | None | Critical | Complex |
|
||||
| State transitions | Configurable with permissions | Basic (status field only) | Critical | Complex |
|
||||
| Parallel approvals | Multiple approvers required | None | High | Complex |
|
||||
| Automatic transitions | Timer/condition-based | None | Medium | Moderate |
|
||||
| Email notifications | On state change | None | High | Moderate |
|
||||
| ECO process | Built-in change management | None | High | Complex |
|
||||
| Child state conditions | Block parent if children invalid | None | Medium | Moderate |
|
||||
|
||||
Workflow management is the largest functional gap. SOLIDWORKS PDM offers sophisticated state machines with parallel approvals, automatic transitions, and deep integration with engineering change processes. Silo currently has only a simple status field (draft/review/released/obsolete) with no transition rules or approval processes.
|
||||
|
||||
### C.3 User Management & Security
|
||||
|
||||
| Feature | SOLIDWORKS PDM | Silo Status | Priority | Complexity |
|
||||
|---------|---------------|-------------|----------|------------|
|
||||
| User authentication | Windows AD, LDAP | Full (local, LDAP, OIDC) | - | - |
|
||||
| Role-based permissions | Granular per folder/state | Partial (3-tier role model) | Medium | Moderate |
|
||||
| Group management | Full | None | Medium | Moderate |
|
||||
| Folder permissions | Read/write/delete per folder | None | Medium | Moderate |
|
||||
| State permissions | Actions allowed per state | None | High | Moderate |
|
||||
| Audit trail | Complete action logging | Full | - | - |
|
||||
| Private files | Pre-check-in visibility control | None | Low | Simple |
|
||||
|
||||
Authentication is implemented with three backends (local, LDAP/FreeIPA, OIDC/Keycloak) and a 3-tier role model (admin > editor > viewer). Audit logging captures user actions. Remaining gaps: group management, folder-level permissions, and state-based permission rules.
|
||||
|
||||
### C.4 Search & Discovery
|
||||
|
||||
| Feature | SOLIDWORKS PDM | Silo Status | Priority | Complexity |
|
||||
|---------|---------------|-------------|----------|------------|
|
||||
| Metadata search | Full with custom cards | Partial (API query params + fuzzy) | High | Moderate |
|
||||
| Full-text content search | iFilters for Office, CAD | None | Medium | Complex |
|
||||
| Quick search | Toolbar with history | Partial (fuzzy search API) | Medium | Simple |
|
||||
| Saved searches | User-defined favorites | None | Medium | Simple |
|
||||
| Advanced operators | AND, OR, NOT, wildcards | None | Medium | Simple |
|
||||
| Multi-variable search | Search across multiple fields | None | Medium | Simple |
|
||||
| Where-used search | Find all assemblies using part | Full | - | - |
|
||||
|
||||
Silo has API-level filtering, fuzzy search, and where-used queries. Remaining gaps: saved searches, advanced search operators, and a richer search UI. Content search (searching within CAD files) is not planned for the server.
|
||||
|
||||
### C.5 BOM Management
|
||||
|
||||
| Feature | SOLIDWORKS PDM | Silo Status | Priority | Complexity |
|
||||
|---------|---------------|-------------|----------|------------|
|
||||
| Single-level BOM | Yes | Full | - | - |
|
||||
| Multi-level BOM | Indented/exploded views | Full (recursive, configurable depth) | - | - |
|
||||
| BOM comparison | Between revisions | None | Medium | Moderate |
|
||||
| BOM export | Excel, XML, ERP formats | Full (CSV, ODS) | - | - |
|
||||
| BOM import | Bulk BOM loading | Full (CSV with upsert) | - | - |
|
||||
| Calculated BOMs | Quantities rolled up | None | Medium | Moderate |
|
||||
| Reference designators | Full support | Full | - | - |
|
||||
| Alternate parts | Substitute tracking | Full | - | - |
|
||||
|
||||
Multi-level BOM retrieval (recursive CTE with configurable depth) and BOM export (CSV, ODS) are implemented. BOM import supports CSV with upsert and cycle detection. Remaining gap: BOM comparison between revisions.
|
||||
|
||||
### C.6 CAD Integration
|
||||
|
||||
| Feature | SOLIDWORKS PDM | Silo Status | Priority | Complexity |
|
||||
|---------|---------------|-------------|----------|------------|
|
||||
| Native CAD add-in | Deep SOLIDWORKS integration | FreeCAD workbench (silo-mod) | Medium | Complex |
|
||||
| Property mapping | Bi-directional sync | Planned (silo-mod) | Medium | Moderate |
|
||||
| Task pane | Embedded in CAD UI | Auth dock panel (silo-mod) | Medium | Complex |
|
||||
| Lightweight components | Handle without full load | N/A | - | - |
|
||||
| Drawing/model linking | Automatic association | Manual | Medium | Moderate |
|
||||
| Multi-CAD support | Third-party formats | FreeCAD only | Low | - |
|
||||
|
||||
CAD integration is maintained in separate repositories ([silo-mod](https://git.kindred-systems.com/kindred/silo-mod), [silo-calc](https://git.kindred-systems.com/kindred/silo-calc)). The Silo server provides the REST API endpoints consumed by those clients.
|
||||
|
||||
### C.7 External Integrations
|
||||
|
||||
| Feature | SOLIDWORKS PDM | Silo Status | Priority | Complexity |
|
||||
|---------|---------------|-------------|----------|------------|
|
||||
| ERP integration | SAP, Dynamics, etc. | Partial (Odoo stubs) | Medium | Complex |
|
||||
| API access | Full COM/REST API | Full REST API (78 endpoints) | - | - |
|
||||
| Dispatch scripts | Automation without coding | None | Medium | Moderate |
|
||||
| Task scheduler | Background processing | None | Medium | Moderate |
|
||||
| Email system | SMTP integration | None | High | Simple |
|
||||
| Web portal | Browser access | Full (React SPA + auth) | - | - |
|
||||
|
||||
Silo has a comprehensive REST API (78 endpoints) and a full web UI with authentication. Odoo ERP integration has config/sync-log scaffolding but push/pull operations are stubs. Remaining gaps: email notifications, task scheduler, dispatch automation.
|
||||
|
||||
### C.8 Reporting & Analytics
|
||||
|
||||
| Feature | SOLIDWORKS PDM | Silo Status | Priority | Complexity |
|
||||
|---------|---------------|-------------|----------|------------|
|
||||
| Standard reports | Inventory, usage, activity | None | Medium | Moderate |
|
||||
| Custom reports | User-defined queries | None | Medium | Moderate |
|
||||
| Dashboard | Visual KPIs | None | Low | Moderate |
|
||||
| Export formats | PDF, Excel, CSV | CSV and ODS | Medium | Simple |
|
||||
|
||||
Reporting capabilities are absent. Basic reports (item counts, revision activity, where-used) would provide immediate value.
|
||||
|
||||
### C.9 File Handling
|
||||
|
||||
| Feature | SOLIDWORKS PDM | Silo Status | Priority | Complexity |
|
||||
|---------|---------------|-------------|----------|------------|
|
||||
| File versioning | Automatic | Full (MinIO) | - | - |
|
||||
| File preview | Thumbnails, 3D preview | None | Medium | Complex |
|
||||
| File conversion | PDF, DXF generation | None | Medium | Complex |
|
||||
| Replication | Multi-site sync | None | Low | Complex |
|
||||
| File copy with refs | Copy tree with references | None | Medium | Moderate |
|
||||
|
||||
File storage works well. Thumbnail generation and file preview would significantly improve the web UI experience. Automatic conversion to PDF/DXF is valuable for sharing with non-CAD users.
|
||||
|
||||
---
|
||||
|
||||
## Appendix D: Feature Comparison Matrix
|
||||
|
||||
| Category | Feature | SW PDM Standard | SW PDM Pro | Silo Current | Silo Planned |
|
||||
|----------|---------|-----------------|------------|--------------|--------------|
|
||||
| **Version Control** | Check-in/out | Yes | Yes | No | Tier 1 |
|
||||
| | Version history | Yes | Yes | Yes | - |
|
||||
| | Rollback | Yes | Yes | Yes | - |
|
||||
| | Revision labels/status | Yes | Yes | Yes | - |
|
||||
| | Revision comparison | Yes | Yes | Yes (metadata) | - |
|
||||
| **Workflow** | Custom workflows | Limited | Yes | No | Tier 4 |
|
||||
| | Parallel approval | No | Yes | No | Tier 4 |
|
||||
| | Notifications | No | Yes | No | Tier 1 |
|
||||
| **Security** | User auth | Windows | Windows/LDAP | Yes (local, LDAP, OIDC) | - |
|
||||
| | Permissions | Basic | Granular | Partial (role-based) | Tier 4 |
|
||||
| | Audit trail | Basic | Full | Yes | - |
|
||||
| **Search** | Metadata search | Yes | Yes | Partial (API + fuzzy) | Tier 0 |
|
||||
| | Content search | No | Yes | No | Tier 2 |
|
||||
| | Where-used | Yes | Yes | Yes | - |
|
||||
| **BOM** | Single-level | Yes | Yes | Yes | - |
|
||||
| | Multi-level | Yes | Yes | Yes (recursive) | - |
|
||||
| | BOM export | Yes | Yes | Yes (CSV, ODS) | - |
|
||||
| **Data** | CSV import/export | Yes | Yes | Yes | - |
|
||||
| | ODS import/export | No | No | Yes | - |
|
||||
| | Project management | Yes | Yes | Yes | - |
|
||||
| **Integration** | API | Limited | Full | Full REST (78) | - |
|
||||
| | ERP connectors | No | Yes | Partial (Odoo stubs) | Tier 6 |
|
||||
| | Web access | No | Yes | Yes (React SPA + auth) | - |
|
||||
| **Files** | Versioning | Yes | Yes | Yes | - |
|
||||
| | Preview | Yes | Yes | No | Tier 2 |
|
||||
| | Multi-site | No | Yes | No | Not Planned |
|
||||
|
||||
518
docs/INSTALL.md
Normal file
518
docs/INSTALL.md
Normal file
@@ -0,0 +1,518 @@
|
||||
# Installing Silo
|
||||
|
||||
This guide covers two installation methods:
|
||||
|
||||
- **[Option A: Docker Compose](#option-a-docker-compose)** — self-contained stack with all services. Recommended for evaluation, small teams, and environments where Docker is the standard.
|
||||
- **[Option B: Daemon Install](#option-b-daemon-install-systemd--external-services)** — systemd service with external PostgreSQL, MinIO, and optional LDAP/nginx. Recommended for production deployments integrated with existing infrastructure.
|
||||
|
||||
Both methods produce the same result: a running Silo server with a web UI, REST API, and authentication.
|
||||
|
||||
---
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Option A: Docker Compose](#option-a-docker-compose)
|
||||
- [A.1 Prerequisites](#a1-prerequisites)
|
||||
- [A.2 Clone the Repository](#a2-clone-the-repository)
|
||||
- [A.3 Run the Setup Script](#a3-run-the-setup-script)
|
||||
- [A.4 Start the Stack](#a4-start-the-stack)
|
||||
- [A.5 Verify the Installation](#a5-verify-the-installation)
|
||||
- [A.6 LDAP Users and Groups](#a6-ldap-users-and-groups)
|
||||
- [A.7 Optional: Enable Nginx Reverse Proxy](#a7-optional-enable-nginx-reverse-proxy)
|
||||
- [A.8 Stopping, Starting, and Upgrading](#a8-stopping-starting-and-upgrading)
|
||||
- [Option B: Daemon Install (systemd + External Services)](#option-b-daemon-install-systemd--external-services)
|
||||
- [B.1 Architecture Overview](#b1-architecture-overview)
|
||||
- [B.2 Prerequisites](#b2-prerequisites)
|
||||
- [B.3 Set Up External Services](#b3-set-up-external-services)
|
||||
- [B.4 Prepare the Host](#b4-prepare-the-host)
|
||||
- [B.5 Configure Credentials](#b5-configure-credentials)
|
||||
- [B.6 Deploy](#b6-deploy)
|
||||
- [B.7 Set Up Nginx and TLS](#b7-set-up-nginx-and-tls)
|
||||
- [B.8 Verify the Installation](#b8-verify-the-installation)
|
||||
- [B.9 Upgrading](#b9-upgrading)
|
||||
- [Post-Install Configuration](#post-install-configuration)
|
||||
- [Further Reading](#further-reading)
|
||||
|
||||
---
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Regardless of which method you choose:
|
||||
|
||||
- **Git** to clone the repository
|
||||
- A machine with at least **2 GB RAM** and **10 GB free disk**
|
||||
- Network access to pull container images or download Go/Node toolchains
|
||||
|
||||
---
|
||||
|
||||
## Option A: Docker Compose
|
||||
|
||||
A single Docker Compose file runs everything: PostgreSQL, MinIO, OpenLDAP, and Silo. An optional nginx container can be enabled for reverse proxying.
|
||||
|
||||
### A.1 Prerequisites
|
||||
|
||||
- [Docker Engine](https://docs.docker.com/engine/install/) 24+ with the [Compose plugin](https://docs.docker.com/compose/install/) (v2)
|
||||
- `openssl` (used by the setup script to generate secrets)
|
||||
|
||||
Verify your installation:
|
||||
|
||||
```bash
|
||||
docker --version # Docker Engine 24+
|
||||
docker compose version # Docker Compose v2+
|
||||
```
|
||||
|
||||
### A.2 Clone the Repository
|
||||
|
||||
```bash
|
||||
git clone https://git.kindred-systems.com/kindred/silo.git
|
||||
cd silo
|
||||
```
|
||||
|
||||
### A.3 Run the Setup Script
|
||||
|
||||
The setup script generates credentials and configuration files:
|
||||
|
||||
```bash
|
||||
./scripts/setup-docker.sh
|
||||
```
|
||||
|
||||
It prompts for:
|
||||
- Server domain (default: `localhost`)
|
||||
- PostgreSQL password (auto-generated if you press Enter)
|
||||
- MinIO credentials (auto-generated)
|
||||
- OpenLDAP admin password and initial user (auto-generated)
|
||||
- Silo local admin account (fallback when LDAP is unavailable)
|
||||
|
||||
For automated/CI environments, use non-interactive mode:
|
||||
|
||||
```bash
|
||||
./scripts/setup-docker.sh --non-interactive
|
||||
```
|
||||
|
||||
The script writes two files:
|
||||
- `deployments/.env` — secrets for Docker Compose
|
||||
- `deployments/config.docker.yaml` — Silo server configuration
|
||||
|
||||
### A.4 Start the Stack
|
||||
|
||||
```bash
|
||||
docker compose -f deployments/docker-compose.allinone.yaml up -d
|
||||
```
|
||||
|
||||
Wait for all services to become healthy:
|
||||
|
||||
```bash
|
||||
docker compose -f deployments/docker-compose.allinone.yaml ps
|
||||
```
|
||||
|
||||
You should see `silo-postgres`, `silo-minio`, `silo-openldap`, and `silo-api` all in a healthy state.
|
||||
|
||||
View logs:
|
||||
|
||||
```bash
|
||||
# All services
|
||||
docker compose -f deployments/docker-compose.allinone.yaml logs -f
|
||||
|
||||
# Silo only
|
||||
docker compose -f deployments/docker-compose.allinone.yaml logs -f silo
|
||||
```
|
||||
|
||||
### A.5 Verify the Installation
|
||||
|
||||
```bash
|
||||
# Health check
|
||||
curl http://localhost:8080/health
|
||||
|
||||
# Readiness check (includes database and storage connectivity)
|
||||
curl http://localhost:8080/ready
|
||||
```
|
||||
|
||||
Open http://localhost:8080 in your browser. Log in with either:
|
||||
|
||||
- **LDAP account**: the username and password shown by the setup script (default: `siloadmin`)
|
||||
- **Local admin**: the local admin credentials shown by the setup script (default: `admin`)
|
||||
|
||||
The credentials were printed at the end of the setup script output and are stored in `deployments/.env`.
|
||||
|
||||
### A.6 LDAP Users and Groups
|
||||
|
||||
The Docker stack includes an OpenLDAP server with three preconfigured groups that map to Silo roles:
|
||||
|
||||
| LDAP Group | Silo Role | Access Level |
|
||||
|------------|-----------|-------------|
|
||||
| `cn=silo-admins,ou=groups,dc=silo,dc=local` | admin | Full access |
|
||||
| `cn=silo-users,ou=groups,dc=silo,dc=local` | editor | Create and modify items |
|
||||
| `cn=silo-viewers,ou=groups,dc=silo,dc=local` | viewer | Read-only |
|
||||
|
||||
The initial LDAP user (default: `siloadmin`) is added to `silo-admins`.
|
||||
|
||||
**Add a new LDAP user:**
|
||||
|
||||
```bash
|
||||
# From the host (using the exposed port)
|
||||
ldapadd -x -H ldap://localhost:1389 \
|
||||
-D "cn=admin,dc=silo,dc=local" \
|
||||
-w "YOUR_LDAP_ADMIN_PASSWORD" << EOF
|
||||
dn: cn=jdoe,ou=users,dc=silo,dc=local
|
||||
objectClass: inetOrgPerson
|
||||
cn: jdoe
|
||||
sn: Doe
|
||||
userPassword: changeme
|
||||
mail: jdoe@example.com
|
||||
EOF
|
||||
```
|
||||
|
||||
**Add a user to a group:**
|
||||
|
||||
```bash
|
||||
ldapmodify -x -H ldap://localhost:1389 \
|
||||
-D "cn=admin,dc=silo,dc=local" \
|
||||
-w "YOUR_LDAP_ADMIN_PASSWORD" << EOF
|
||||
dn: cn=silo-users,ou=groups,dc=silo,dc=local
|
||||
changetype: modify
|
||||
add: member
|
||||
member: cn=jdoe,ou=users,dc=silo,dc=local
|
||||
EOF
|
||||
```
|
||||
|
||||
**List all users:**
|
||||
|
||||
```bash
|
||||
ldapsearch -x -H ldap://localhost:1389 \
|
||||
-b "ou=users,dc=silo,dc=local" \
|
||||
-D "cn=admin,dc=silo,dc=local" \
|
||||
-w "YOUR_LDAP_ADMIN_PASSWORD" "(objectClass=inetOrgPerson)" cn mail memberOf
|
||||
```
|
||||
|
||||
### A.7 Optional: Enable Nginx Reverse Proxy
|
||||
|
||||
To place nginx in front of Silo (for TLS termination or to serve on port 80):
|
||||
|
||||
```bash
|
||||
docker compose -f deployments/docker-compose.allinone.yaml --profile nginx up -d
|
||||
```
|
||||
|
||||
By default nginx listens on ports 80 and 443 and proxies to the Silo container. The configuration is at `deployments/nginx/nginx.conf`.
|
||||
|
||||
**To enable HTTPS**, edit `deployments/docker-compose.allinone.yaml` and uncomment the TLS certificate volume mounts in the `nginx` service, then uncomment the HTTPS server block in `deployments/nginx/nginx.conf`. See the comments in those files for details.
|
||||
|
||||
If you already have your own reverse proxy or load balancer, skip the nginx profile and point your proxy at port 8080.
|
||||
|
||||
### A.8 Stopping, Starting, and Upgrading
|
||||
|
||||
```bash
|
||||
# Stop the stack (data is preserved in Docker volumes)
|
||||
docker compose -f deployments/docker-compose.allinone.yaml down
|
||||
|
||||
# Start again
|
||||
docker compose -f deployments/docker-compose.allinone.yaml up -d
|
||||
|
||||
# Stop and delete all data (WARNING: destroys database, files, and LDAP data)
|
||||
docker compose -f deployments/docker-compose.allinone.yaml down -v
|
||||
```
|
||||
|
||||
**To upgrade to a newer version:**
|
||||
|
||||
```bash
|
||||
cd silo
|
||||
git pull
|
||||
docker compose -f deployments/docker-compose.allinone.yaml up -d --build
|
||||
```
|
||||
|
||||
The Silo container is rebuilt from the updated source. Database migrations in `migrations/` are applied automatically on container startup via the PostgreSQL init mechanism.
|
||||
|
||||
---
|
||||
|
||||
## Option B: Daemon Install (systemd + External Services)
|
||||
|
||||
This method runs Silo as a systemd service on a dedicated host, connecting to externally managed PostgreSQL, MinIO, and optionally LDAP services.
|
||||
|
||||
### B.1 Architecture Overview
|
||||
|
||||
```
|
||||
┌──────────────────────┐
|
||||
│ Silo Host │
|
||||
│ ┌────────────────┐ │
|
||||
HTTPS (443) ──►│ │ nginx │ │
|
||||
│ └───────┬────────┘ │
|
||||
│ │ :8080 │
|
||||
│ ┌───────▼────────┐ │
|
||||
│ │ silod │ │
|
||||
│ │ (API server) │ │
|
||||
│ └──┬─────────┬───┘ │
|
||||
└─────┼─────────┼──────┘
|
||||
│ │
|
||||
┌───────────▼──┐ ┌───▼──────────────┐
|
||||
│ PostgreSQL 16│ │ MinIO (S3) │
|
||||
│ :5432 │ │ :9000 API │
|
||||
└──────────────┘ │ :9001 Console │
|
||||
└──────────────────┘
|
||||
```
|
||||
|
||||
### B.2 Prerequisites
|
||||
|
||||
- Linux host (Debian/Ubuntu or RHEL/Fedora/AlmaLinux)
|
||||
- Root or sudo access
|
||||
- Network access to your PostgreSQL and MinIO servers
|
||||
|
||||
The setup script installs Go and other build dependencies automatically.
|
||||
|
||||
### B.3 Set Up External Services
|
||||
|
||||
#### PostgreSQL 16
|
||||
|
||||
Install PostgreSQL and create the Silo database:
|
||||
|
||||
- [PostgreSQL downloads](https://www.postgresql.org/download/)
|
||||
|
||||
```bash
|
||||
# After installing PostgreSQL, create the database and user:
|
||||
sudo -u postgres createuser silo
|
||||
sudo -u postgres createdb -O silo silo
|
||||
sudo -u postgres psql -c "ALTER USER silo WITH PASSWORD 'your-password';"
|
||||
```
|
||||
|
||||
Ensure the Silo host can connect (check `pg_hba.conf` on the PostgreSQL server).
|
||||
|
||||
Verify:
|
||||
|
||||
```bash
|
||||
psql -h YOUR_PG_HOST -U silo -d silo -c 'SELECT 1'
|
||||
```
|
||||
|
||||
#### MinIO
|
||||
|
||||
Install MinIO and create a bucket and service account:
|
||||
|
||||
- [MinIO quickstart](https://min.io/docs/minio/linux/index.html)
|
||||
|
||||
```bash
|
||||
# Using the MinIO client (mc):
|
||||
mc alias set local http://YOUR_MINIO_HOST:9000 minioadmin minioadmin
|
||||
mc mb local/silo-files
|
||||
mc admin user add local silouser YOUR_MINIO_SECRET
|
||||
mc admin policy attach local readwrite --user silouser
|
||||
```
|
||||
|
||||
Verify:
|
||||
|
||||
```bash
|
||||
curl -I http://YOUR_MINIO_HOST:9000/minio/health/live
|
||||
```
|
||||
|
||||
#### LDAP / FreeIPA (Optional)
|
||||
|
||||
For LDAP authentication, you need an LDAP server with user and group entries. Options:
|
||||
|
||||
- [FreeIPA](https://www.freeipa.org/page/Quick_Start_Guide) — full identity management (recommended for organizations already using it)
|
||||
- [OpenLDAP](https://www.openldap.org/doc/admin26/) — lightweight LDAP server
|
||||
|
||||
Silo needs:
|
||||
- A base DN (e.g., `dc=example,dc=com`)
|
||||
- Users under a known OU (e.g., `cn=users,cn=accounts,dc=example,dc=com`)
|
||||
- Groups that map to Silo roles (`admin`, `editor`, `viewer`)
|
||||
- The `memberOf` overlay enabled (so user entries have `memberOf` attributes)
|
||||
|
||||
See [CONFIGURATION.md — LDAP](CONFIGURATION.md#ldap--freeipa) for the full LDAP configuration reference.
|
||||
|
||||
### B.4 Prepare the Host
|
||||
|
||||
Run the setup script on the target host:
|
||||
|
||||
```bash
|
||||
# Copy and run the script
|
||||
scp scripts/setup-host.sh root@YOUR_HOST:/tmp/
|
||||
ssh root@YOUR_HOST 'bash /tmp/setup-host.sh'
|
||||
```
|
||||
|
||||
Or directly on the host:
|
||||
|
||||
```bash
|
||||
sudo bash scripts/setup-host.sh
|
||||
```
|
||||
|
||||
The script:
|
||||
1. Installs dependencies (git, Go 1.24)
|
||||
2. Creates the `silo` system user
|
||||
3. Creates directories (`/opt/silo`, `/etc/silo`)
|
||||
4. Clones the repository
|
||||
5. Creates the environment file template
|
||||
|
||||
To override the default service hostnames:
|
||||
|
||||
```bash
|
||||
SILO_DB_HOST=db.example.com SILO_MINIO_HOST=s3.example.com sudo -E bash scripts/setup-host.sh
|
||||
```
|
||||
|
||||
### B.5 Configure Credentials
|
||||
|
||||
Edit the environment file with your service credentials:
|
||||
|
||||
```bash
|
||||
sudo nano /etc/silo/silod.env
|
||||
```
|
||||
|
||||
```bash
|
||||
# Database
|
||||
SILO_DB_PASSWORD=your-database-password
|
||||
|
||||
# MinIO
|
||||
SILO_MINIO_ACCESS_KEY=silouser
|
||||
SILO_MINIO_SECRET_KEY=your-minio-secret
|
||||
|
||||
# Authentication
|
||||
SILO_SESSION_SECRET=generate-a-long-random-string
|
||||
SILO_ADMIN_USERNAME=admin
|
||||
SILO_ADMIN_PASSWORD=your-admin-password
|
||||
```
|
||||
|
||||
Generate a session secret:
|
||||
|
||||
```bash
|
||||
openssl rand -hex 32
|
||||
```
|
||||
|
||||
Review the server configuration:
|
||||
|
||||
```bash
|
||||
sudo nano /etc/silo/config.yaml
|
||||
```
|
||||
|
||||
Update `database.host`, `storage.endpoint`, `server.base_url`, and authentication settings for your environment. See [CONFIGURATION.md](CONFIGURATION.md) for all options.
|
||||
|
||||
### B.6 Deploy
|
||||
|
||||
Run the deploy script:
|
||||
|
||||
```bash
|
||||
sudo /opt/silo/src/scripts/deploy.sh
|
||||
```
|
||||
|
||||
The script:
|
||||
1. Pulls latest code from git
|
||||
2. Builds the `silod` binary and React frontend
|
||||
3. Installs files to `/opt/silo` and `/etc/silo`
|
||||
4. Runs database migrations
|
||||
5. Installs and starts the systemd service
|
||||
|
||||
Deploy options:
|
||||
|
||||
```bash
|
||||
# Skip git pull (use current checkout)
|
||||
sudo /opt/silo/src/scripts/deploy.sh --no-pull
|
||||
|
||||
# Skip build (use existing binary)
|
||||
sudo /opt/silo/src/scripts/deploy.sh --no-build
|
||||
|
||||
# Just restart the service
|
||||
sudo /opt/silo/src/scripts/deploy.sh --restart-only
|
||||
|
||||
# Check service status
|
||||
sudo /opt/silo/src/scripts/deploy.sh --status
|
||||
```
|
||||
|
||||
To override the target host or database host:
|
||||
|
||||
```bash
|
||||
SILO_DEPLOY_TARGET=silo.example.com SILO_DB_HOST=db.example.com sudo -E scripts/deploy.sh
|
||||
```
|
||||
|
||||
### B.7 Set Up Nginx and TLS
|
||||
|
||||
#### With FreeIPA (automated)
|
||||
|
||||
If your organization uses FreeIPA, the included script handles nginx setup, IPA enrollment, and certificate issuance:
|
||||
|
||||
```bash
|
||||
sudo /opt/silo/src/scripts/setup-ipa-nginx.sh
|
||||
```
|
||||
|
||||
Override the hostname if needed:
|
||||
|
||||
```bash
|
||||
SILO_HOSTNAME=silo.example.com sudo -E /opt/silo/src/scripts/setup-ipa-nginx.sh
|
||||
```
|
||||
|
||||
The script installs nginx, enrolls the host in FreeIPA, requests a TLS certificate from the IPA CA (auto-renewed by certmonger), and configures nginx as an HTTPS reverse proxy.
|
||||
|
||||
#### Manual nginx setup
|
||||
|
||||
Install nginx and create a config:
|
||||
|
||||
```bash
|
||||
sudo apt install nginx # or: sudo dnf install nginx
|
||||
```
|
||||
|
||||
Use the template at `deployments/nginx/nginx.conf` as a starting point. Copy it to `/etc/nginx/sites-available/silo`, update the `server_name` and certificate paths, then enable it:
|
||||
|
||||
```bash
|
||||
sudo ln -sf /etc/nginx/sites-available/silo /etc/nginx/sites-enabled/silo
|
||||
sudo nginx -t
|
||||
sudo systemctl reload nginx
|
||||
```
|
||||
|
||||
After enabling HTTPS, update `server.base_url` in `/etc/silo/config.yaml` to use `https://` and restart Silo:
|
||||
|
||||
```bash
|
||||
sudo systemctl restart silod
|
||||
```
|
||||
|
||||
### B.8 Verify the Installation
|
||||
|
||||
```bash
|
||||
# Service status
|
||||
sudo systemctl status silod
|
||||
|
||||
# Health check
|
||||
curl http://localhost:8080/health
|
||||
|
||||
# Readiness check
|
||||
curl http://localhost:8080/ready
|
||||
|
||||
# Follow logs
|
||||
sudo journalctl -u silod -f
|
||||
```
|
||||
|
||||
Open your configured base URL in a browser and log in.
|
||||
|
||||
### B.9 Upgrading
|
||||
|
||||
```bash
|
||||
# Pull latest code and redeploy
|
||||
sudo /opt/silo/src/scripts/deploy.sh
|
||||
|
||||
# Or deploy a specific version
|
||||
cd /opt/silo/src
|
||||
git fetch --all --tags
|
||||
git checkout v1.2.3
|
||||
sudo /opt/silo/src/scripts/deploy.sh --no-pull
|
||||
```
|
||||
|
||||
New database migrations are applied automatically during deployment.
|
||||
|
||||
---
|
||||
|
||||
## Post-Install Configuration
|
||||
|
||||
After a successful installation:
|
||||
|
||||
- **Authentication**: Configure LDAP, OIDC, or local auth backends. See [CONFIGURATION.md — Authentication](CONFIGURATION.md#authentication).
|
||||
- **Schemas**: Part numbering schemas are loaded from YAML files. See the `schemas/` directory and [CONFIGURATION.md — Schemas](CONFIGURATION.md#schemas).
|
||||
- **Read-only mode**: Toggle write protection at runtime with `kill -USR1 $(pidof silod)` or by setting `server.read_only: true` in the config.
|
||||
- **Ongoing maintenance**: See [DEPLOYMENT.md](DEPLOYMENT.md) for service management, log viewing, troubleshooting, and the security checklist.
|
||||
|
||||
---
|
||||
|
||||
## Further Reading
|
||||
|
||||
| Document | Description |
|
||||
|----------|-------------|
|
||||
| [CONFIGURATION.md](CONFIGURATION.md) | Complete `config.yaml` reference |
|
||||
| [DEPLOYMENT.md](DEPLOYMENT.md) | Operations guide: maintenance, troubleshooting, security |
|
||||
| [AUTH.md](AUTH.md) | Authentication system design |
|
||||
| [AUTH_USER_GUIDE.md](AUTH_USER_GUIDE.md) | User guide for login, tokens, and roles |
|
||||
| [SPECIFICATION.md](SPECIFICATION.md) | Full design specification and API reference |
|
||||
| [STATUS.md](STATUS.md) | Implementation status |
|
||||
| [GAP_ANALYSIS.md](GAP_ANALYSIS.md) | Gap analysis and revision control roadmap |
|
||||
| [COMPONENT_AUDIT.md](COMPONENT_AUDIT.md) | Component audit tool design |
|
||||
442
docs/ROADMAP.md
Normal file
442
docs/ROADMAP.md
Normal file
@@ -0,0 +1,442 @@
|
||||
# Silo Platform Roadmap
|
||||
|
||||
**Version:** 2.0
|
||||
**Date:** February 2026
|
||||
|
||||
Silo is the server component of the Kindred ecosystem. Its core function is storing and version-controlling engineering data (parts, assemblies, BOMs). This roadmap describes the expansion of Silo from a PDM server into a modular platform -- comparable to how Gitea/GitHub extend Git hosting with Actions, Wikis, Packages, and webhooks.
|
||||
|
||||
For a detailed comparison against SOLIDWORKS PDM, see [GAP_ANALYSIS.md](GAP_ANALYSIS.md).
|
||||
|
||||
---
|
||||
|
||||
## Guiding Principles
|
||||
|
||||
- **Modular architecture.** Every capability beyond core PDM is a module. Modules register against a central API endpoint registry and declare their menu entries, views, dependencies, and routes via a module manifest.
|
||||
- **Odoo-aligned UX.** The web UI follows Odoo's navigation patterns: a top-level app launcher grid, breadcrumb navigation (`Module > List > Record > Sub-view`), and standard view types (list, form, kanban, calendar, pivot). This alignment provides a familiar experience for shops already using Odoo as their ERP, and a clean integration path for those who adopt it later.
|
||||
- **Open by default.** Silo and all modules are open-source. Enterprise customers can fork, extend, and self-host. Developer tools for building and distributing custom Create forks are available to everyone, not just Kindred.
|
||||
- **Odoo as reference ERP.** For shops on Odoo, a bridge module syncs Silo data to Odoo models (`mrp.bom`, `mrp.production`, `quality.check`, etc.). For shops on other ERPs, the open API serves as a documented integration surface. Silo's web UI is fully self-sufficient with no ERP dependency required.
|
||||
|
||||
---
|
||||
|
||||
## Foundational Contracts
|
||||
|
||||
### The .kc File Format
|
||||
|
||||
Silo introduces the `.kc` file format as an enhanced superset of FreeCAD's `.fcstd`. Both are ZIP bundles. A `.kc` file contains everything an `.fcstd` does, plus a `silo/` directory with platform metadata.
|
||||
|
||||
#### Standard FCStd contents (preserved as-is)
|
||||
|
||||
- `Document.xml`, `GuiDocument.xml`
|
||||
- BREP geometry files (`.brp`)
|
||||
- `thumbnails/`
|
||||
|
||||
#### Added .kc entries
|
||||
|
||||
| Path | Purpose |
|
||||
|------|---------|
|
||||
| `silo/manifest.json` | Silo instance origin, part UUID, revision hash, .kc schema version |
|
||||
| `silo/metadata.json` | Custom schema field values, tags, lifecycle state |
|
||||
| `silo/history.json` | Local revision log (lightweight; full history is server-side) |
|
||||
| `silo/approvals.json` | ECO/approval state snapshot |
|
||||
| `silo/dependencies.json` | Assembly link references by Silo UUID (not filepath) |
|
||||
| `silo/macros/` | Embedded macro references or inline scripts bound to this part |
|
||||
| `silo/inspection/` | GD&T annotations, tolerance data, CMM linkage metadata |
|
||||
| `silo/thumbnails/` | Silo-generated renderings (separate from FreeCAD's built-in thumbnail) |
|
||||
|
||||
#### Interoperability
|
||||
|
||||
- **FCStd -> Silo:** On import, the `silo/` directory is generated with defaults. A UUID is assigned and the user is prompted for schema fields.
|
||||
- **Silo -> FCStd:** On export, the `silo/` directory is stripped. The remaining contents are a valid `.fcstd`.
|
||||
- **Round-trip safety:** FreeCAD ignores the `silo/` directory on save, so there is no risk of FreeCAD corrupting Silo metadata.
|
||||
- **Schema versioning:** `silo/manifest.json` carries a format version for forward-compatible migrations.
|
||||
|
||||
### Module Manifest
|
||||
|
||||
Each module ships a manifest declaring its integration surface:
|
||||
|
||||
```
|
||||
id, name, version, description
|
||||
dependencies (other module IDs)
|
||||
menu_entries (app launcher icon, label, route)
|
||||
view_declarations (list, form, kanban, etc.)
|
||||
api_routes (REST endpoints the module registers)
|
||||
hooks (events the module listens to or emits)
|
||||
permissions (required roles/scopes)
|
||||
```
|
||||
|
||||
The exact format (JSON, TOML, or Python-based a la Odoo's `__manifest__.py`) is TBD. The contract is: a module is anything that provides a valid manifest and registers against the endpoint registry.
|
||||
|
||||
### Web UI Shell
|
||||
|
||||
The Silo web application provides the chrome that all modules render within.
|
||||
|
||||
- **App launcher:** Top-level grid of installed module icons. Driven by the API endpoint registry -- only enabled modules appear. Disabled modules show greyed with an "Enable" action for discoverability.
|
||||
- **Breadcrumbs:** Every view follows `Module > List > Record > Sub-view`. Consistent across all modules.
|
||||
- **View types:** List, form, kanban, calendar, pivot/reporting. Modules declare supported views in their manifest.
|
||||
- **Schema-driven forms:** The user-customizable schema engine maps directly to form views, enabling end-users to define part metadata fields through the web UI without code changes.
|
||||
|
||||
---
|
||||
|
||||
## Dependency Tiers
|
||||
|
||||
Modules are organized into tiers based on what they depend on. Lower tiers must be stable before higher tiers are built.
|
||||
|
||||
### Tier 0 -- Foundation
|
||||
|
||||
Everything depends on these. They define what Silo *is*.
|
||||
|
||||
| Component | Description | Status |
|
||||
|-----------|-------------|--------|
|
||||
| **Core Silo** | Part/assembly storage, version control, auth, base REST API | Complete |
|
||||
| **.kc Format Spec** | File format contract between Create and Silo | Not Started |
|
||||
| **API Endpoint Registry** | Module discovery, dynamic UI rendering, health checks | Not Started |
|
||||
| **Web UI Shell** | App launcher, breadcrumbs, view framework, module rendering | Partial |
|
||||
| **Python Scripting Engine** | Server-side hook execution, module extension point | Not Started |
|
||||
| **Job Queue Infrastructure** | Redis/NATS shared async service for all compute modules | Not Started |
|
||||
|
||||
### Tier 1 -- Core Services
|
||||
|
||||
Broad downstream dependencies. These should be built early because retrofitting is painful.
|
||||
|
||||
| Module | Description | Depends On | Status |
|
||||
|--------|-------------|------------|--------|
|
||||
| **Headless Create** | API-driven FreeCAD instance for file manipulation, geometry queries, format conversion, rendering | Core Silo, Job Queue | Not Started |
|
||||
| **Notifications & Subscriptions** | Per-part watch lists, lifecycle event hooks, webhook delivery | Core Silo, Registry | Not Started |
|
||||
| **Audit Trail / Compliance** | ITAR, ISO 9001, AS9100 traceability; module-level event journaling | Core Silo | Partial |
|
||||
|
||||
### Tier 2 -- File Intelligence & Collaboration
|
||||
|
||||
High-visibility features. Mostly low-hanging fruit once Tier 1 is solid.
|
||||
|
||||
| Module | Description | Depends On | Status |
|
||||
|--------|-------------|------------|--------|
|
||||
| **Intelligent FCStd Diffing** | XML-based structural diff of .kc bundles | Headless Create | Not Started |
|
||||
| **Thumbnail Generation** | Auto-rendered part/assembly previews | Headless Create | Not Started |
|
||||
| **Macro Store** | Shared macro library across Create instances | Core Silo, Registry | Not Started |
|
||||
| **Theme & Addon Manager** | Centralized distribution of UI themes and workbench addons | Core Silo, Registry | Not Started |
|
||||
| **User-Customizable Schemas** | End-user defined part/form metadata via web UI | Core Silo, Scripting Engine | Not Started |
|
||||
|
||||
### Tier 3 -- Compute
|
||||
|
||||
Heavy async workloads. All route through the shared job queue.
|
||||
|
||||
| Module | Description | Depends On | Status |
|
||||
|--------|-------------|------------|--------|
|
||||
| **Batch Jobs (CPU/GPU)** | FEA, CFD, rendering, bulk export | Job Queue, Headless Create | Not Started |
|
||||
| **AI Broker** | LLM tasks (Ollama), GNN constraint optimization, appearance AI | Job Queue | Not Started |
|
||||
| **Reporting & Analytics** | Part reuse, revision frequency, compute usage dashboards, cost roll-ups | Audit Trail, Core Silo | Not Started |
|
||||
|
||||
### Tier 4 -- Engineering Workflow
|
||||
|
||||
Process modules that formalize how engineering work moves through an organization.
|
||||
|
||||
| Module | Description | Depends On | Status |
|
||||
|--------|-------------|------------|--------|
|
||||
| **Approval / ECO Workflow** | Engineering change orders, multi-stage review gates, digital signatures | Notifications, Audit Trail, Schemas | Not Started |
|
||||
| **Shop Floor Drawing Distribution** | Controlled push-to-production drawings; web-based appliance displays on the floor | Headless Create, Approval Workflow | Not Started |
|
||||
| **Import/Export Bridge** | STEP, IGES, 3MF connectors; SOLIDWORKS migration tooling; ERP adapters | Headless Create | Not Started |
|
||||
| **Multi-tenant / Org Management** | Org boundaries, role-based permissioning, storage quotas | Core Auth, Audit Trail | Not Started |
|
||||
|
||||
### Tier 5 -- Manufacturing & Quality
|
||||
|
||||
Deep domain modules. Heavy spec work required independent of software dependencies.
|
||||
|
||||
| Module | Description | Depends On | Status |
|
||||
|--------|-------------|------------|--------|
|
||||
| **MES Module** | Manufacturing execution -- internal module or bridge to external MES | Approval Workflow, Schemas, Shop Floor Drawings | Not Started |
|
||||
| **Quality / Tolerance Stackup** | Inspection data ingestion, CMM device linking, statistical tolerance analysis, material mapping | Schemas, Import Bridge | Not Started |
|
||||
| **Inspection Plan Generator** | Auto-generate CMM programs or inspection checklists from GD&T drawings | Headless Create, Quality Module | Not Started |
|
||||
| **BIM Inventory / Receiving** | Live facility model with real-time inventory location, explorable in a custom BIM-MES workbench in Create | Custom BIM-MES Workbench, Schemas, Notifications | Not Started |
|
||||
|
||||
### Tier 6 -- Platform & Ecosystem
|
||||
|
||||
Modules that serve the broader community and long-horizon use cases.
|
||||
|
||||
| Module | Description | Depends On | Status |
|
||||
|--------|-------------|------------|--------|
|
||||
| **Developer Tools** | Managed Gitea instance for in-house Create fork development; CI/CD to build and distribute fork updates to configured clients | Tier 0-1 stability | Not Started |
|
||||
| **Digital Twin Sync** | Live sensor data mapped onto BIM/assembly models; operational monitoring | BIM Inventory, Reporting | Not Started |
|
||||
| **ERP Adapters (Odoo, SAP, etc.)** | Bidirectional sync of parts, BOMs, ECOs, production orders to external ERP | Import/Export Bridge, MES, Schemas | Partial (Odoo stubs) |
|
||||
|
||||
---
|
||||
|
||||
## Near-Term Priorities
|
||||
|
||||
These are the concrete tasks that map to Tier 0 completion and the first steps into Tier 1. They replace the older Phase 1-6 calendar-based timelines.
|
||||
|
||||
### Tier 0 Completion
|
||||
|
||||
Complete MVP and stabilize core functionality.
|
||||
|
||||
| Task | Description | Status |
|
||||
|------|-------------|--------|
|
||||
| Unit test suite | Core API, database, partnum, file, CSV/ODS handler tests | Partial (~40%) |
|
||||
| Date segment type | Implement `date` segment with strftime-style formatting | Not Started |
|
||||
| Part number validation | Validate format against schema on creation | Not Started |
|
||||
| Location CRUD API | Expose location hierarchy via REST | Not Started |
|
||||
| Inventory API | Expose inventory operations via REST | Not Started |
|
||||
|
||||
**Success metrics:**
|
||||
- All existing tests pass
|
||||
- File upload/download works end-to-end
|
||||
- FreeCAD users can checkout, modify, commit parts
|
||||
|
||||
### Multi-User Enablement
|
||||
|
||||
Enable team collaboration (feeds into Tier 1 and Tier 4).
|
||||
|
||||
| Task | Description | Status |
|
||||
|------|-------------|--------|
|
||||
| Check-out locking | Pessimistic locks with timeout | Not Started |
|
||||
| User/group management | Create, assign, manage users and groups | Not Started |
|
||||
| Folder permissions | Read/write/delete per folder hierarchy | Not Started |
|
||||
|
||||
**Success metrics:**
|
||||
- 5+ concurrent users supported
|
||||
- No data corruption under concurrent access
|
||||
- Audit log captures all modifications
|
||||
|
||||
### Workflow Engine
|
||||
|
||||
Implement engineering change processes (Tier 4: Approval/ECO Workflow).
|
||||
|
||||
| Task | Description | Status |
|
||||
|------|-------------|--------|
|
||||
| Workflow designer | YAML-defined state machines | Not Started |
|
||||
| State transitions | Configurable transition rules with permissions | Not Started |
|
||||
| Approval workflows | Single and parallel approver gates | Not Started |
|
||||
| Email notifications | SMTP integration for alerts on state changes | Not Started |
|
||||
|
||||
**Success metrics:**
|
||||
- Engineering change process completable in Silo
|
||||
- Email notifications delivered reliably
|
||||
- Workflow state visible in web UI
|
||||
|
||||
### Search & Discovery
|
||||
|
||||
Improve findability and navigation (Tier 0 Web UI Shell).
|
||||
|
||||
| Task | Description | Status |
|
||||
|------|-------------|--------|
|
||||
| Advanced search UI | Web interface with filters and operators | Not Started |
|
||||
| Saved searches | User-defined query favorites | Not Started |
|
||||
|
||||
**Success metrics:**
|
||||
- Search returns results in <2 seconds
|
||||
- Where-used queries complete in <5 seconds
|
||||
|
||||
---
|
||||
|
||||
## Gap Summary
|
||||
|
||||
For full SOLIDWORKS PDM comparison tables, see [GAP_ANALYSIS.md Appendix C](GAP_ANALYSIS.md#appendix-c-solidworks-pdm-comparison).
|
||||
|
||||
### Completed (Previously Critical/High)
|
||||
|
||||
1. ~~User authentication~~ -- local, LDAP, OIDC
|
||||
2. ~~Role-based permissions~~ -- 3-tier role model (admin/editor/viewer)
|
||||
3. ~~Audit trail~~ -- audit_log table with completeness scoring
|
||||
4. ~~Where-used search~~ -- reverse parent lookup API
|
||||
5. ~~Multi-level BOM API~~ -- recursive expansion with configurable depth
|
||||
6. ~~BOM export~~ -- CSV and ODS formats
|
||||
|
||||
### Critical Gaps (Required for Team Use)
|
||||
|
||||
1. **Workflow engine** -- state machines with transitions and approvals
|
||||
2. **Check-out locking** -- pessimistic locking for CAD files
|
||||
|
||||
### High Priority Gaps (Significant Value)
|
||||
|
||||
1. **Email notifications** -- alert users on state changes
|
||||
2. **Web UI search** -- advanced search interface with saved searches
|
||||
3. **Folder/state permissions** -- granular access control beyond role model
|
||||
|
||||
### Medium Priority Gaps (Nice to Have)
|
||||
|
||||
1. **Saved searches** -- frequently used queries
|
||||
2. **File preview/thumbnails** -- visual browsing
|
||||
3. **Reporting** -- activity and inventory reports
|
||||
4. **Scheduled tasks** -- background automation
|
||||
5. **BOM comparison** -- revision diff for assemblies
|
||||
|
||||
---
|
||||
|
||||
## Priority Notes
|
||||
|
||||
- **Headless Create** is the single highest-leverage Tier 1 item. It unblocks diffing, thumbnails, batch export, drawing distribution, and inspection plan generation.
|
||||
- **Audit Trail** is unglamorous but critical to build early. Retrofitting compliance logging after modules ship is expensive and error-prone.
|
||||
- **Tier 2** delivers visible, demo-able value quickly -- diffing, thumbnails, and the macro store are features users immediately understand.
|
||||
- **Tiers 5-6** carry heavy domain complexity. They need detailed specification and industry consultation well before implementation begins.
|
||||
- The **.kc format** and **module manifest** are the two foundational contracts. Getting these right determines how cleanly everything above them composes.
|
||||
|
||||
---
|
||||
|
||||
## Open Questions
|
||||
|
||||
1. **Module manifest format** -- JSON, TOML, or Python-based? Tradeoffs between simplicity and expressiveness.
|
||||
2. **.kc thumbnail policy** -- Single canonical thumbnail vs. multi-view renders. Impacts file size and generation cost.
|
||||
3. **Job queue technology** -- Redis Streams vs. NATS. Redis is already in the stack; NATS offers better pub/sub semantics for event-driven modules.
|
||||
4. **Headless Create deployment** -- Sidecar container per Silo instance, or pool of workers behind the job queue?
|
||||
5. **BIM-MES workbench scope** -- How much of FreeCAD BIM is reusable vs. needs to be purpose-built for inventory/facility modeling?
|
||||
6. **Offline .kc workflow** -- How much of the `silo/` metadata is authoritative when disconnected? Reconciliation strategy on reconnect.
|
||||
|
||||
---
|
||||
|
||||
## Appendix A: Current Project Inventory
|
||||
|
||||
### Implemented Features (MVP Complete)
|
||||
|
||||
#### Core Database System
|
||||
- PostgreSQL schema with 13 migrations
|
||||
- UUID-based identifiers throughout
|
||||
- Soft delete support via `archived_at` timestamps
|
||||
- Atomic sequence generation for part numbers
|
||||
|
||||
#### Part Number Generation
|
||||
- YAML schema parser with validation
|
||||
- Segment types: `string`, `enum`, `serial`, `constant`
|
||||
- Scope templates for serial counters (e.g., `{category}`, `{project}`)
|
||||
- Format templates for custom output
|
||||
|
||||
#### Item Management
|
||||
- Full CRUD operations for items
|
||||
- Item types: part, assembly, drawing, document, tooling, purchased, electrical, software
|
||||
- Custom properties via JSONB storage
|
||||
- Project tagging with many-to-many relationships
|
||||
|
||||
#### Revision Control
|
||||
- Append-only revision history
|
||||
- Revision metadata: properties, file reference, checksum, comment
|
||||
- Status tracking: draft, review, released, obsolete
|
||||
- Labels/tags per revision
|
||||
- Revision comparison (diff)
|
||||
- Rollback functionality
|
||||
|
||||
#### File Management
|
||||
- MinIO integration with versioning
|
||||
- File upload/download via REST API
|
||||
- SHA256 checksums for integrity
|
||||
- Storage path: `items/{partNumber}/rev{N}.FCStd`
|
||||
|
||||
#### Bill of Materials (BOM)
|
||||
- Relationship types: component, alternate, reference
|
||||
- Multi-level BOM (recursive expansion with configurable depth)
|
||||
- Where-used queries (reverse parent lookup)
|
||||
- BOM CSV and ODS export/import with cycle detection
|
||||
- Reference designators for electronics
|
||||
- Quantity tracking with units
|
||||
- Revision-specific child linking
|
||||
|
||||
#### Project Management
|
||||
- Project CRUD operations
|
||||
- Unique project codes (2-10 characters)
|
||||
- Item-to-project tagging
|
||||
- Project-filtered queries
|
||||
|
||||
#### Data Import/Export
|
||||
- CSV export with configurable properties
|
||||
- CSV import with dry-run validation
|
||||
- ODS spreadsheet import/export (items, BOMs, project sheets)
|
||||
- Template generation for import formatting
|
||||
|
||||
#### API & Web Interface
|
||||
- REST API with 78 endpoints
|
||||
- Authentication: local (bcrypt), LDAP/FreeIPA, OIDC/Keycloak
|
||||
- Role-based access control (admin > editor > viewer)
|
||||
- API token management (SHA-256 hashed)
|
||||
- Session management (PostgreSQL-backed, 24h lifetime)
|
||||
- CSRF protection (nosurf on web forms)
|
||||
- Middleware: logging, CORS, recovery, request ID
|
||||
- Web UI -- React SPA (Vite + TypeScript, Catppuccin Mocha theme)
|
||||
- Fuzzy search
|
||||
- Health and readiness probes
|
||||
|
||||
#### Audit & Completeness
|
||||
- Audit logging (database table with user/action/resource tracking)
|
||||
- Item completeness scoring with weighted fields
|
||||
- Category-specific property validation
|
||||
- Tier classification (critical/low/partial/good/complete)
|
||||
|
||||
#### Configuration
|
||||
- YAML configuration with environment variable overrides
|
||||
- Multi-schema support
|
||||
- Docker Compose deployment ready
|
||||
|
||||
### Partially Implemented
|
||||
|
||||
| Feature | Status | Notes |
|
||||
|---------|--------|-------|
|
||||
| Odoo ERP integration | Partial | Config and sync-log CRUD functional; push/pull sync operations are stubs |
|
||||
| Date segment type | Not started | Schema parser placeholder exists |
|
||||
| Part number validation | Not started | API accepts but doesn't validate format |
|
||||
| Location hierarchy CRUD | Schema only | Tables exist, no API endpoints |
|
||||
| Inventory tracking | Schema only | Tables exist, no API endpoints |
|
||||
| Unit tests | Partial | 11 Go test files across api, db, ods, partnum, schema packages |
|
||||
|
||||
---
|
||||
|
||||
## Appendix B: Phase 1 Detailed Tasks
|
||||
|
||||
### 1.1 MinIO Integration -- COMPLETE
|
||||
- [x] MinIO service configured in Docker Compose
|
||||
- [x] File upload via REST API
|
||||
- [x] File download via REST API (latest and by revision)
|
||||
- [x] SHA256 checksums on upload
|
||||
|
||||
### 1.2 Authentication & Authorization -- COMPLETE
|
||||
- [x] Local authentication (bcrypt)
|
||||
- [x] LDAP/FreeIPA authentication
|
||||
- [x] OIDC/Keycloak authentication
|
||||
- [x] Role-based access control (admin/editor/viewer)
|
||||
- [x] API token management (SHA-256 hashed)
|
||||
- [x] Session management (PostgreSQL-backed)
|
||||
- [x] CSRF protection (nosurf)
|
||||
- [x] Audit logging (database table)
|
||||
|
||||
### 1.3 Multi-level BOM & Export -- COMPLETE
|
||||
- [x] Recursive BOM expansion with configurable depth
|
||||
- [x] Where-used reverse lookup
|
||||
- [x] BOM CSV export/import with cycle detection
|
||||
- [x] BOM ODS export
|
||||
- [x] ODS item export/import/template
|
||||
|
||||
### 1.4 Unit Test Suite
|
||||
- [ ] Database connection and transaction tests
|
||||
- [ ] Item CRUD operation tests
|
||||
- [ ] Revision creation and retrieval tests
|
||||
- [ ] Part number generation tests
|
||||
- [ ] File upload/download tests
|
||||
- [ ] CSV import/export tests
|
||||
- [ ] API endpoint tests
|
||||
|
||||
### 1.5 Missing Segment Types
|
||||
- [ ] Implement date segment type
|
||||
- [ ] Add strftime-style format support
|
||||
|
||||
### 1.6 Location & Inventory APIs
|
||||
- [ ] `GET /api/locations` - List locations
|
||||
- [ ] `POST /api/locations` - Create location
|
||||
- [ ] `GET /api/locations/{path}` - Get location
|
||||
- [ ] `DELETE /api/locations/{path}` - Delete location
|
||||
- [ ] `GET /api/inventory/{partNumber}` - Get inventory
|
||||
- [ ] `POST /api/inventory/{partNumber}/adjust` - Adjust quantity
|
||||
- [ ] `POST /api/inventory/{partNumber}/move` - Move between locations
|
||||
|
||||
---
|
||||
|
||||
## Appendix C: References
|
||||
|
||||
### SOLIDWORKS PDM Documentation
|
||||
- [SOLIDWORKS PDM Product Page](https://www.solidworks.com/product/solidworks-pdm)
|
||||
- [What's New in SOLIDWORKS PDM 2025](https://blogs.solidworks.com/solidworksblog/2024/10/whats-new-in-solidworks-pdm-2025.html)
|
||||
- [Top 5 Enhancements in SOLIDWORKS PDM 2024](https://blogs.solidworks.com/solidworksblog/2023/10/top-5-enhancements-in-solidworks-pdm-2024.html)
|
||||
- [SOLIDWORKS PDM Workflow Transitions](https://help.solidworks.com/2023/english/EnterprisePDM/Admin/c_workflow_transition.htm)
|
||||
- [Ultimate Guide to SOLIDWORKS PDM Permissions](https://www.goengineer.com/blog/ultimate-guide-to-solidworks-pdm-permissions)
|
||||
- [Searching in SOLIDWORKS PDM](https://help.solidworks.com/2021/english/EnterprisePDM/fileexplorer/c_searches.htm)
|
||||
- [SOLIDWORKS PDM API Getting Started](https://3dswym.3dexperience.3ds.com/wiki/solidworks-news-info/getting-started-with-the-solidworks-pdm-api-solidpractices_gBCYaM75RgORBcpSO1m_Mw)
|
||||
|
||||
### Silo Documentation
|
||||
- [Specification](SPECIFICATION.md)
|
||||
- [Development Status](STATUS.md)
|
||||
- [Deployment Guide](DEPLOYMENT.md)
|
||||
- [Gap Analysis](GAP_ANALYSIS.md)
|
||||
@@ -37,7 +37,7 @@ Silo treats **part numbering schemas as configuration, not code**. Multiple numb
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Silo Server (silod) │
|
||||
│ - REST API (75 endpoints) │
|
||||
│ - REST API (78 endpoints) │
|
||||
│ - Authentication (local, LDAP, OIDC) │
|
||||
│ - Schema parsing and validation │
|
||||
│ - Part number generation engine │
|
||||
@@ -50,7 +50,7 @@ Silo treats **part numbering schemas as configuration, not code**. Multiple numb
|
||||
▼ ▼
|
||||
┌─────────────────────────┐ ┌─────────────────────────────┐
|
||||
│ PostgreSQL │ │ MinIO │
|
||||
│ (psql.kindred.internal)│ │ - File storage │
|
||||
│ (psql.example.internal)│ │ - File storage │
|
||||
│ - Item metadata │ │ - Versioned objects │
|
||||
│ - Relationships │ │ - Thumbnails │
|
||||
│ - Revision history │ │ │
|
||||
@@ -63,7 +63,7 @@ Silo treats **part numbering schemas as configuration, not code**. Multiple numb
|
||||
|
||||
| Component | Technology | Notes |
|
||||
|-----------|------------|-------|
|
||||
| Database | PostgreSQL 16 | Existing instance at psql.kindred.internal |
|
||||
| Database | PostgreSQL 16 | Existing instance at psql.example.internal |
|
||||
| File Storage | MinIO | S3-compatible, versioning enabled |
|
||||
| CLI & API Server | Go (1.24) | chi/v5 router, pgx/v5 driver, zerolog |
|
||||
| Authentication | Multi-backend | Local (bcrypt), LDAP/FreeIPA, OIDC/Keycloak |
|
||||
@@ -598,7 +598,7 @@ See [AUTH.md](AUTH.md) for full architecture details and [AUTH_USER_GUIDE.md](AU
|
||||
|
||||
## 11. API Design
|
||||
|
||||
### 11.1 REST Endpoints (75 Implemented)
|
||||
### 11.1 REST Endpoints (78 Implemented)
|
||||
|
||||
```
|
||||
# Health (no auth)
|
||||
@@ -615,6 +615,9 @@ GET /auth/callback # OIDC callback
|
||||
# Public API (no auth required)
|
||||
GET /api/auth/config # Auth backend configuration (for login UI)
|
||||
|
||||
# Server-Sent Events (require auth)
|
||||
GET /api/events # SSE stream for real-time updates
|
||||
|
||||
# Auth API (require auth)
|
||||
GET /api/auth/me # Current authenticated user
|
||||
GET /api/auth/tokens # List user's API tokens
|
||||
@@ -627,7 +630,7 @@ POST /api/uploads/presign # Get presigned MinI
|
||||
# Schemas (read: viewer, write: editor)
|
||||
GET /api/schemas # List all schemas
|
||||
GET /api/schemas/{name} # Get schema details
|
||||
GET /api/schemas/{name}/properties # Get property schema for category
|
||||
GET /api/schemas/{name}/form # Get form descriptor (field groups, widgets, category picker)
|
||||
POST /api/schemas/{name}/segments/{segment}/values # Add enum value [editor]
|
||||
PUT /api/schemas/{name}/segments/{segment}/values/{code} # Update enum value [editor]
|
||||
DELETE /api/schemas/{name}/segments/{segment}/values/{code} # Delete enum value [editor]
|
||||
@@ -644,6 +647,7 @@ DELETE /api/projects/{code} # Delete project [ed
|
||||
# Items (read: viewer, write: editor)
|
||||
GET /api/items # List/filter items
|
||||
GET /api/items/search # Fuzzy search
|
||||
GET /api/items/by-uuid/{uuid} # Get item by UUID
|
||||
GET /api/items/export.csv # Export items to CSV
|
||||
GET /api/items/template.csv # CSV import template
|
||||
GET /api/items/export.ods # Export items to ODS
|
||||
@@ -689,6 +693,7 @@ GET /api/items/{partNumber}/bom/export.csv # Export BOM as CSV
|
||||
GET /api/items/{partNumber}/bom/export.ods # Export BOM as ODS
|
||||
POST /api/items/{partNumber}/bom # Add BOM entry [editor]
|
||||
POST /api/items/{partNumber}/bom/import # Import BOM from CSV [editor]
|
||||
POST /api/items/{partNumber}/bom/merge # Merge BOM from ODS with conflict resolution [editor]
|
||||
PUT /api/items/{partNumber}/bom/{childPartNumber} # Update BOM entry [editor]
|
||||
DELETE /api/items/{partNumber}/bom/{childPartNumber} # Remove BOM entry [editor]
|
||||
|
||||
@@ -734,11 +739,11 @@ POST /api/inventory/{partNumber}/move
|
||||
|
||||
### 12.1 Implemented
|
||||
|
||||
- [x] PostgreSQL database schema (11 migrations)
|
||||
- [x] PostgreSQL database schema (13 migrations)
|
||||
- [x] YAML schema parser for part numbering
|
||||
- [x] Part number generation engine
|
||||
- [x] CLI tool (`cmd/silo`)
|
||||
- [x] API server (`cmd/silod`) with 75 endpoints
|
||||
- [x] API server (`cmd/silod`) with 78 endpoints
|
||||
- [x] MinIO integration for file storage with versioning
|
||||
- [x] BOM relationships (component, alternate, reference)
|
||||
- [x] Multi-level BOM (recursive expansion with configurable depth)
|
||||
|
||||
@@ -10,10 +10,10 @@
|
||||
|
||||
| Component | Status | Notes |
|
||||
|-----------|--------|-------|
|
||||
| PostgreSQL schema | Complete | 11 migrations applied |
|
||||
| PostgreSQL schema | Complete | 13 migrations applied |
|
||||
| YAML schema parser | Complete | Supports enum, serial, constant, string segments |
|
||||
| Part number generator | Complete | Scoped sequences, category-based format |
|
||||
| API server (`silod`) | Complete | 75 REST endpoints via chi/v5 |
|
||||
| API server (`silod`) | Complete | 78 REST endpoints via chi/v5 |
|
||||
| CLI tool (`silo`) | Complete | Item registration and management |
|
||||
| MinIO file storage | Complete | Upload, download, versioning, checksums |
|
||||
| Revision control | Complete | Append-only history, rollback, comparison, status/labels |
|
||||
@@ -55,7 +55,7 @@ FreeCAD workbench and LibreOffice Calc extension are maintained in separate repo
|
||||
|
||||
| Service | Host | Status |
|
||||
|---------|------|--------|
|
||||
| PostgreSQL | psql.kindred.internal:5432 | Running |
|
||||
| PostgreSQL | psql.example.internal:5432 | Running |
|
||||
| MinIO | localhost:9000 (API) / :9001 (console) | Configured |
|
||||
| Silo API | localhost:8080 | Builds successfully |
|
||||
|
||||
@@ -92,5 +92,7 @@ The schema defines 170 category codes across 10 groups:
|
||||
| 007_revision_status.sql | Revision status and labels |
|
||||
| 008_odoo_integration.sql | Odoo ERP integration tables (integrations, sync_log) |
|
||||
| 009_auth.sql | Authentication system (users, api_tokens, sessions, audit_log, user tracking columns) |
|
||||
| 010_item_extended_fields.sql | Extended item fields (sourcing_type, sourcing_link, standard_cost, long_description) |
|
||||
| 010_item_extended_fields.sql | Extended item fields (sourcing_type, long_description) |
|
||||
| 011_item_files.sql | Item file attachments (item_files table, thumbnail_key column) |
|
||||
| 012_bom_source.sql | BOM entry source tracking |
|
||||
| 013_move_cost_sourcing_to_props.sql | Move sourcing_link and standard_cost from item columns to revision properties |
|
||||
|
||||
515
docs/STYLE.md
Normal file
515
docs/STYLE.md
Normal file
@@ -0,0 +1,515 @@
|
||||
# Silo Style Guide
|
||||
|
||||
> Living reference for the Silo web UI. All modules must follow these conventions to maintain visual consistency across the platform.
|
||||
|
||||
---
|
||||
|
||||
## Color System
|
||||
|
||||
Silo uses the [Catppuccin Mocha](https://github.com/catppuccin/catppuccin) palette exclusively. All colors are referenced via CSS custom properties defined at `:root`.
|
||||
|
||||
### Palette
|
||||
|
||||
```
|
||||
--ctp-rosewater: #f5e0dc
|
||||
--ctp-flamingo: #f2cdcd
|
||||
--ctp-pink: #f5c2e7
|
||||
--ctp-mauve: #cba6f7
|
||||
--ctp-red: #f38ba8
|
||||
--ctp-maroon: #eba0ac
|
||||
--ctp-peach: #fab387
|
||||
--ctp-yellow: #f9e2af
|
||||
--ctp-green: #a6e3a1
|
||||
--ctp-teal: #94e2d5
|
||||
--ctp-sky: #89dceb
|
||||
--ctp-sapphire: #74c7ec
|
||||
--ctp-blue: #89b4fa
|
||||
--ctp-lavender: #b4befe
|
||||
--ctp-text: #cdd6f4
|
||||
--ctp-subtext1: #bac2de
|
||||
--ctp-subtext0: #a6adc8
|
||||
--ctp-overlay2: #9399b2
|
||||
--ctp-overlay1: #7f849c
|
||||
--ctp-overlay0: #6c7086
|
||||
--ctp-surface2: #585b70
|
||||
--ctp-surface1: #45475a
|
||||
--ctp-surface0: #313244
|
||||
--ctp-base: #1e1e2e
|
||||
--ctp-mantle: #181825
|
||||
--ctp-crust: #11111b
|
||||
```
|
||||
|
||||
### Semantic Roles
|
||||
|
||||
| Role | Token | Usage |
|
||||
|------|-------|-------|
|
||||
| Page background | `--ctp-base` | Main content area |
|
||||
| Panel background | `--ctp-mantle` | Sidebars, detail panes, headers |
|
||||
| Inset/input background | `--ctp-crust` | Form inputs, code blocks, drop zones |
|
||||
| Primary accent | `--ctp-mauve` | Primary buttons, active states, links, selection highlights |
|
||||
| Secondary accent | `--ctp-blue` | Informational highlights, secondary actions |
|
||||
| Success | `--ctp-green` | Confirmations, positive status |
|
||||
| Warning | `--ctp-yellow` | Caution states, pending actions |
|
||||
| Danger | `--ctp-red` | Destructive actions, errors, required indicators |
|
||||
| Informational | `--ctp-teal` | Auto-generated metadata, system-assigned values |
|
||||
| Body text | `--ctp-text` | Primary content |
|
||||
| Secondary text | `--ctp-subtext1` | Descriptions, timestamps |
|
||||
| Muted text | `--ctp-overlay1` | Placeholders, disabled states |
|
||||
| Borders | `--ctp-surface0` | Dividers, panel edges |
|
||||
| Hover borders | `--ctp-surface1` | Interactive element borders, row separators |
|
||||
| Focus ring | `rgba(203, 166, 247, 0.25)` | `box-shadow` on focused inputs (mauve at 25%) |
|
||||
|
||||
### Accent Usage for Data Types
|
||||
|
||||
| Data type | Color | Token |
|
||||
|-----------|-------|-------|
|
||||
| Assembly | `--ctp-mauve` | Badge, icon tint |
|
||||
| Part | `--ctp-green` | Badge, icon tint |
|
||||
| Document | `--ctp-blue` | Badge, icon tint |
|
||||
| Purchased | `--ctp-peach` | Badge, icon tint |
|
||||
| Phantom | `--ctp-overlay1` | Badge, icon tint |
|
||||
|
||||
These mappings are used anywhere item types appear: list badges, detail pane headers, BOM entries, tree views.
|
||||
|
||||
---
|
||||
|
||||
## Typography
|
||||
|
||||
### Scale
|
||||
|
||||
| Role | Size | Weight | Token/Color | Transform |
|
||||
|------|------|--------|-------------|-----------|
|
||||
| Page title | 1.1rem | 600 | `--ctp-text` | None |
|
||||
| Section header | 11px | 600 | `--ctp-overlay0` | Uppercase, `letter-spacing: 0.06em` |
|
||||
| Form label | 11px | 600 | `--ctp-overlay1` | Uppercase, `letter-spacing: 0.05em` |
|
||||
| Body text | 13px | 400 | `--ctp-text` | None |
|
||||
| Table cell | 12px | 400 | `--ctp-text` | None |
|
||||
| Caption / metadata | 11px | 400 | `--ctp-subtext0` | None |
|
||||
| Badge text | 10px | 600 | Varies | Uppercase |
|
||||
| Breadcrumb segment | 13px | 500 | `--ctp-subtext1` | None |
|
||||
| Breadcrumb active | 13px | 600 | `--ctp-text` | None |
|
||||
|
||||
### Font Stack
|
||||
|
||||
```css
|
||||
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', system-ui, sans-serif;
|
||||
```
|
||||
|
||||
No external font dependencies. System fonts ensure fast rendering and native feel across platforms.
|
||||
|
||||
### Rules
|
||||
|
||||
- Never use font sizes below 10px.
|
||||
- Use `font-weight: 600` for emphasis instead of bold (700). Reserve 700 for page titles only when extra weight is needed.
|
||||
- `text-transform: uppercase` is reserved for section headers, form labels, and badges. Never uppercase body text or descriptions.
|
||||
|
||||
---
|
||||
|
||||
## Spacing
|
||||
|
||||
Base unit: **4px**. All spacing values are multiples of 4.
|
||||
|
||||
| Token | Value | Usage |
|
||||
|-------|-------|-------|
|
||||
| `xs` | 4px (0.25rem) | Tight gaps: icon-to-label, tag internal padding |
|
||||
| `sm` | 8px (0.5rem) | Compact spacing: between related fields, badge padding |
|
||||
| `md` | 12px (0.75rem) | Standard: form group gaps, sidebar section padding |
|
||||
| `lg` | 16px (1rem) | Section separation, card padding |
|
||||
| `xl` | 24px (1.5rem) | Page-level padding, major section breaks |
|
||||
| `2xl` | 32px (2rem) | Page horizontal padding |
|
||||
|
||||
### Application
|
||||
|
||||
- **Page padding:** `1.5rem 2rem` (24px vertical, 32px horizontal)
|
||||
- **Sidebar section padding:** `1rem 1.25rem`
|
||||
- **Form grid gap:** `1.25rem 1.5rem` (row gap × column gap)
|
||||
- **Table row height:** 36px minimum (padding included)
|
||||
- **Table cell padding:** `0.4rem 0.75rem`
|
||||
|
||||
---
|
||||
|
||||
## Layout
|
||||
|
||||
### Page Structure
|
||||
|
||||
Every module page follows the same shell:
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────┐
|
||||
│ Top Nav (52px) │
|
||||
├──────────┬──────────────────────────────────────┤
|
||||
│ App Menu │ Page Header (58px) │
|
||||
│ (icons) ├──────────────────────┬───────────────┤
|
||||
│ │ Content Area │ Detail Pane │
|
||||
│ │ │ (360px) │
|
||||
│ │ │ │
|
||||
│ │ │ │
|
||||
└──────────┴──────────────────────┴───────────────┘
|
||||
```
|
||||
|
||||
- **Top nav:** `52px` height, `--ctp-mantle` background, `1px solid --ctp-surface0` bottom border.
|
||||
- **App menu sidebar:** Icon strip on the left. Module icons, tooltips on hover. Active module highlighted with `--ctp-mauve` indicator.
|
||||
- **Page header:** `58px` height, `--ctp-mantle` background. Contains page title (with module icon), action buttons right-aligned.
|
||||
- **Content area:** `--ctp-base` background. Scrollable. Contains list views, kanban boards, or other primary content.
|
||||
- **Detail pane:** `360px` fixed width, `--ctp-mantle` background, `1px solid --ctp-surface0` left border. Appears on record selection.
|
||||
|
||||
### Grid Patterns
|
||||
|
||||
**Two-column form:**
|
||||
```css
|
||||
display: grid;
|
||||
grid-template-columns: 1fr 1fr;
|
||||
gap: 1.25rem 1.5rem;
|
||||
max-width: 800px;
|
||||
```
|
||||
|
||||
**List + detail:**
|
||||
```css
|
||||
display: grid;
|
||||
grid-template-columns: 1fr 360px;
|
||||
min-height: calc(100vh - 52px - 58px);
|
||||
```
|
||||
|
||||
### Breakpoints
|
||||
|
||||
Not currently required. Silo targets desktop browsers on engineering workstations. If mobile support is added later, breakpoints will be defined at `768px` and `1024px`.
|
||||
|
||||
---
|
||||
|
||||
## Components
|
||||
|
||||
### Buttons
|
||||
|
||||
Four tiers. All buttons share a base style:
|
||||
|
||||
```css
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
gap: 0.35rem;
|
||||
padding: 0.4rem 0.85rem;
|
||||
border-radius: 6px;
|
||||
font-size: 12px;
|
||||
font-weight: 500;
|
||||
cursor: pointer;
|
||||
transition: all 0.15s;
|
||||
```
|
||||
|
||||
| Tier | Name | Background | Border | Text | Hover |
|
||||
|------|------|-----------|--------|------|-------|
|
||||
| Primary | `.btn-primary` | `--ctp-mauve` | `--ctp-mauve` | `--ctp-crust` | `--ctp-lavender` bg + border |
|
||||
| Secondary | `.btn` (default) | `--ctp-surface0` | `--ctp-surface1` | `--ctp-text` | `--ctp-surface1` bg, `--ctp-overlay0` border |
|
||||
| Ghost | `.btn-ghost` | transparent | transparent | `--ctp-subtext0` | `--ctp-surface0` bg, `--ctp-text` text |
|
||||
| Danger | `.btn-danger` | transparent | `--ctp-surface1` | `--ctp-red` | `rgba(243, 139, 168, 0.1)` bg, `--ctp-red` border |
|
||||
|
||||
Primary is used once per visible context (the main action). All other actions use secondary or ghost. Danger is only for destructive actions and always requires confirmation.
|
||||
|
||||
### Badges
|
||||
|
||||
Used for type indicators, status labels, and tags.
|
||||
|
||||
```css
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
padding: 0.15rem 0.5rem;
|
||||
border-radius: 4px;
|
||||
font-size: 10px;
|
||||
font-weight: 600;
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.03em;
|
||||
```
|
||||
|
||||
Badges use a translucent background derived from their accent color:
|
||||
|
||||
```css
|
||||
/* Example: assembly badge */
|
||||
background: rgba(203, 166, 247, 0.15); /* --ctp-mauve at 15% */
|
||||
color: var(--ctp-mauve);
|
||||
```
|
||||
|
||||
Standard badge colors follow the [accent usage table](#accent-usage-for-data-types). Status badges:
|
||||
|
||||
| Status | Color |
|
||||
|--------|-------|
|
||||
| Active / Released | `--ctp-green` |
|
||||
| Draft / In Progress | `--ctp-blue` |
|
||||
| Review / Pending | `--ctp-yellow` |
|
||||
| Obsolete / Rejected | `--ctp-red` |
|
||||
| Locked | `--ctp-overlay1` |
|
||||
|
||||
### Form Inputs
|
||||
|
||||
All inputs share a base style:
|
||||
|
||||
```css
|
||||
background: var(--ctp-crust);
|
||||
border: 1px solid var(--ctp-surface1);
|
||||
border-radius: 6px;
|
||||
padding: 0.45rem 0.65rem;
|
||||
font-size: 12px;
|
||||
color: var(--ctp-text);
|
||||
transition: border-color 0.15s;
|
||||
```
|
||||
|
||||
| State | Border | Shadow |
|
||||
|-------|--------|--------|
|
||||
| Default | `--ctp-surface1` | None |
|
||||
| Hover | `--ctp-overlay0` | None |
|
||||
| Focus | `--ctp-mauve` | `0 0 0 0.2rem rgba(203, 166, 247, 0.25)` |
|
||||
| Error | `--ctp-red` | `0 0 0 0.2rem rgba(243, 139, 168, 0.15)` |
|
||||
| Disabled | `--ctp-surface0` | None, `opacity: 0.5` |
|
||||
|
||||
Placeholder text: `--ctp-overlay0`. Labels sit above inputs (never inline or floating).
|
||||
|
||||
### Tag Input
|
||||
|
||||
Used for multi-value fields (projects, tags):
|
||||
|
||||
```css
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
gap: 0.3rem;
|
||||
padding: 0.35rem 0.5rem;
|
||||
background: var(--ctp-crust);
|
||||
border: 1px solid var(--ctp-surface1);
|
||||
border-radius: 6px;
|
||||
min-height: 36px;
|
||||
```
|
||||
|
||||
Individual tags use the badge pattern: `rgba(accent, 0.15)` background with accent text. Remove button (×) at `opacity: 0.6`, `1.0` on hover.
|
||||
|
||||
### Tables
|
||||
|
||||
```css
|
||||
width: 100%;
|
||||
border-collapse: collapse;
|
||||
font-size: 12px;
|
||||
```
|
||||
|
||||
| Element | Style |
|
||||
|---------|-------|
|
||||
| Header row | `background: --ctp-mantle`, `font-size: 11px`, uppercase, `--ctp-overlay1` text |
|
||||
| Body row | `border-bottom: 1px solid --ctp-surface0` |
|
||||
| Row hover | `background: --ctp-surface0` |
|
||||
| Row selected | `background: rgba(203, 166, 247, 0.08)` |
|
||||
| Cell padding | `0.4rem 0.75rem` |
|
||||
| Text columns | Left-aligned |
|
||||
| Number columns | Right-aligned |
|
||||
| Date columns | Right-aligned |
|
||||
| Action columns | Center-aligned |
|
||||
|
||||
Row actions use icon buttons (not text links). Icons at 14px, `--ctp-overlay1` default, `--ctp-text` on hover.
|
||||
|
||||
### Tabs
|
||||
|
||||
Used in detail panes and module sub-views:
|
||||
|
||||
```css
|
||||
display: flex;
|
||||
gap: 0;
|
||||
border-bottom: 2px solid var(--ctp-surface0);
|
||||
```
|
||||
|
||||
| State | Style |
|
||||
|-------|-------|
|
||||
| Default | `padding: 0.5rem 1rem`, `--ctp-subtext0` text, no border |
|
||||
| Hover | `--ctp-text` text |
|
||||
| Active | `--ctp-text` text, `font-weight: 600`, `border-bottom: 2px solid --ctp-mauve` (overlaps container border) |
|
||||
|
||||
### Section Dividers
|
||||
|
||||
Used to visually group form fields:
|
||||
|
||||
```css
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 0.75rem;
|
||||
grid-column: 1 / -1; /* span full form grid */
|
||||
margin-top: 0.75rem;
|
||||
```
|
||||
|
||||
Contains a label (`11px`, uppercase, `--ctp-overlay0`) and a horizontal line (`flex: 1`, `1px solid --ctp-surface0`).
|
||||
|
||||
### Sidebar Sections
|
||||
|
||||
Stacked vertically within detail panes:
|
||||
|
||||
```css
|
||||
padding: 1rem 1.25rem;
|
||||
border-bottom: 1px solid var(--ctp-surface0);
|
||||
```
|
||||
|
||||
Last section has no bottom border. Section titles follow the section header typography (11px, uppercase, `--ctp-overlay0`).
|
||||
|
||||
### Tooltips
|
||||
|
||||
Appear on hover after a 300ms delay. Position: above the target element by default, flip below if insufficient space.
|
||||
|
||||
```css
|
||||
background: var(--ctp-surface0);
|
||||
border: 1px solid var(--ctp-surface1);
|
||||
border-radius: 4px;
|
||||
padding: 0.3rem 0.6rem;
|
||||
font-size: 11px;
|
||||
color: var(--ctp-text);
|
||||
box-shadow: 0 4px 12px rgba(17, 17, 27, 0.4);
|
||||
```
|
||||
|
||||
### Breadcrumbs
|
||||
|
||||
Module navigation breadcrumbs:
|
||||
|
||||
```
|
||||
Module Name > List View > Record Name > Sub-view
|
||||
```
|
||||
|
||||
Separator: `>` character in `--ctp-overlay0`. Segments are clickable links in `--ctp-subtext1`. Active (final) segment is `--ctp-text` at `font-weight: 600`.
|
||||
|
||||
### Dropdowns / Selects
|
||||
|
||||
Follow the input base style. The dropdown menu:
|
||||
|
||||
```css
|
||||
background: var(--ctp-surface0);
|
||||
border: 1px solid var(--ctp-surface1);
|
||||
border-radius: 6px;
|
||||
box-shadow: 0 8px 24px rgba(17, 17, 27, 0.5);
|
||||
padding: 0.25rem;
|
||||
max-height: 240px;
|
||||
overflow-y: auto;
|
||||
```
|
||||
|
||||
Menu items:
|
||||
|
||||
```css
|
||||
padding: 0.4rem 0.65rem;
|
||||
border-radius: 4px;
|
||||
font-size: 12px;
|
||||
color: var(--ctp-text);
|
||||
cursor: pointer;
|
||||
```
|
||||
|
||||
Hover: `background: --ctp-surface1`. Selected: `background: rgba(203, 166, 247, 0.12)`, `color: --ctp-mauve`, `font-weight: 600`.
|
||||
|
||||
---
|
||||
|
||||
## Icons
|
||||
|
||||
Use [Lucide](https://lucide.dev) icons. Size: 14px for inline/table contexts, 16px for buttons and navigation, 20px for page headers and empty states.
|
||||
|
||||
Stroke width: 1.5px (Lucide default). Color inherits from parent text color unless explicitly set.
|
||||
|
||||
Do not mix icon libraries. If Lucide does not have a suitable icon, request one be added or create a custom SVG following Lucide's 24×24 grid and stroke conventions.
|
||||
|
||||
---
|
||||
|
||||
## Transitions & Animation
|
||||
|
||||
All interactive state changes use `transition: all 0.15s ease`. This applies to hover, focus, active, and open/close states.
|
||||
|
||||
No entrance animations on page load. Content renders immediately. Skeleton loaders are acceptable for async data using a pulsing `--ctp-surface0` → `--ctp-surface1` gradient.
|
||||
|
||||
Dropdown menus and tooltips appear instantly (no slide/fade). Collapse/expand panels (if used) transition `max-height` at `0.2s ease`.
|
||||
|
||||
---
|
||||
|
||||
## Styling Implementation
|
||||
|
||||
Silo's React frontend uses **inline `React.CSSProperties` objects** with `var(--ctp-*)` token references. This is the project convention and must not be changed.
|
||||
|
||||
### Rules
|
||||
|
||||
- No CSS modules, no Tailwind, no external CSS-in-JS libraries.
|
||||
- Styles are defined as `const` objects at the top of each component file.
|
||||
- Shared style patterns (button base, input base) can be extracted to a `styles/` directory as exported `CSSProperties` objects.
|
||||
- Use `as const` or `as React.CSSProperties` for type safety.
|
||||
- Pseudo-classes (`:hover`, `:focus`) require state-driven inline styles or a thin CSS file for the base pseudo-class rules.
|
||||
|
||||
### Example
|
||||
|
||||
```typescript
|
||||
const styles = {
|
||||
container: {
|
||||
display: 'grid',
|
||||
gridTemplateColumns: '1fr 360px',
|
||||
height: '100%',
|
||||
overflow: 'hidden',
|
||||
} as React.CSSProperties,
|
||||
|
||||
sidebar: {
|
||||
background: 'var(--ctp-mantle)',
|
||||
borderLeft: '1px solid var(--ctp-surface0)',
|
||||
display: 'flex',
|
||||
flexDirection: 'column' as const,
|
||||
overflowY: 'auto' as const,
|
||||
} as React.CSSProperties,
|
||||
};
|
||||
```
|
||||
|
||||
### Pseudo-class CSS
|
||||
|
||||
A single `silo-base.css` file provides pseudo-class rules that cannot be expressed inline:
|
||||
|
||||
```css
|
||||
/* Hover, focus, and active states for core interactive elements */
|
||||
.silo-input:hover { border-color: var(--ctp-overlay0); }
|
||||
.silo-input:focus { border-color: var(--ctp-mauve); box-shadow: 0 0 0 0.2rem rgba(203, 166, 247, 0.25); }
|
||||
.silo-btn:hover { /* per-tier overrides */ }
|
||||
.silo-row:hover { background: var(--ctp-surface0); }
|
||||
```
|
||||
|
||||
Components apply the corresponding class names alongside their inline styles. This is the only place class-based styling is used.
|
||||
|
||||
---
|
||||
|
||||
## Do / Don't
|
||||
|
||||
| Do | Don't |
|
||||
|----|-------|
|
||||
| Use `var(--ctp-*)` for every color | Hardcode hex values |
|
||||
| Use the 4px spacing scale | Use arbitrary padding/margins |
|
||||
| Use Lucide icons at standard sizes | Mix icon libraries |
|
||||
| Use inline `CSSProperties` | Use CSS modules or Tailwind |
|
||||
| One primary button per visible context | Multiple competing primary buttons |
|
||||
| Use translucent accent backgrounds for badges | Use solid bright backgrounds for badges |
|
||||
| Use icon buttons for row-level table actions | Use text links in table rows |
|
||||
| Define styles as `const` at file top | Inline style objects in JSX |
|
||||
| Show tooltips on icon-only buttons | Leave icon buttons unlabeled |
|
||||
| Use section dividers to group form fields | Use cards or borders around field groups |
|
||||
| Follow the breadcrumb pattern for navigation | Use nested tab bars |
|
||||
|
||||
---
|
||||
|
||||
## Appendix: CSS Custom Properties Block
|
||||
|
||||
Paste this at the root of the application stylesheet:
|
||||
|
||||
```css
|
||||
:root {
|
||||
--ctp-rosewater: #f5e0dc;
|
||||
--ctp-flamingo: #f2cdcd;
|
||||
--ctp-pink: #f5c2e7;
|
||||
--ctp-mauve: #cba6f7;
|
||||
--ctp-red: #f38ba8;
|
||||
--ctp-maroon: #eba0ac;
|
||||
--ctp-peach: #fab387;
|
||||
--ctp-yellow: #f9e2af;
|
||||
--ctp-green: #a6e3a1;
|
||||
--ctp-teal: #94e2d5;
|
||||
--ctp-sky: #89dceb;
|
||||
--ctp-sapphire: #74c7ec;
|
||||
--ctp-blue: #89b4fa;
|
||||
--ctp-lavender: #b4befe;
|
||||
--ctp-text: #cdd6f4;
|
||||
--ctp-subtext1: #bac2de;
|
||||
--ctp-subtext0: #a6adc8;
|
||||
--ctp-overlay2: #9399b2;
|
||||
--ctp-overlay1: #7f849c;
|
||||
--ctp-overlay0: #6c7086;
|
||||
--ctp-surface2: #585b70;
|
||||
--ctp-surface1: #45475a;
|
||||
--ctp-surface0: #313244;
|
||||
--ctp-base: #1e1e2e;
|
||||
--ctp-mantle: #181825;
|
||||
--ctp-crust: #11111b;
|
||||
}
|
||||
```
|
||||
364
docs/WORKERS.md
Normal file
364
docs/WORKERS.md
Normal file
@@ -0,0 +1,364 @@
|
||||
# Worker System Specification
|
||||
|
||||
**Status:** Draft
|
||||
**Last Updated:** 2026-02-13
|
||||
|
||||
---
|
||||
|
||||
## 1. Purpose
|
||||
|
||||
The worker system provides async compute job execution for Silo. Jobs are defined as YAML files, managed by the Silo server, and executed by external runner processes. The system is general-purpose -- while DAG validation is the first use case, it supports any compute workload: geometry export, thumbnail rendering, FEA/CFD batch jobs, report generation, and data migration.
|
||||
|
||||
---
|
||||
|
||||
## 2. Architecture
|
||||
|
||||
```
|
||||
YAML Job Definitions (files on disk, version-controllable)
|
||||
|
|
||||
v
|
||||
Silo Server (parser, scheduler, state machine, REST API, SSE events)
|
||||
|
|
||||
v
|
||||
Runners (silorunner binary, polls via REST, executes Headless Create)
|
||||
```
|
||||
|
||||
**Three layers:**
|
||||
|
||||
1. **Job definitions** -- YAML files in a configurable directory (default `/etc/silo/jobdefs`). Each file defines a job type: what triggers it, what it operates on, what computation to perform, and what runner capabilities are required. These are the source of truth and can be version-controlled alongside other Silo config.
|
||||
|
||||
2. **Silo server** -- Parses YAML definitions on startup and upserts them into the `job_definitions` table. Creates job instances when triggers fire (revision created, BOM changed, manual). Manages job lifecycle, enforces timeouts, and broadcasts status via SSE.
|
||||
|
||||
3. **Runners** -- Separate `silorunner` processes that authenticate with Silo via API tokens, poll for available jobs, claim them atomically, execute the compute, and report results. A runner host must have Headless Create and silo-mod installed for geometry jobs.
|
||||
|
||||
---
|
||||
|
||||
## 3. Job Definition Format
|
||||
|
||||
Job definitions are YAML files with the following structure:
|
||||
|
||||
```yaml
|
||||
job:
|
||||
name: assembly-validate
|
||||
version: 1
|
||||
description: "Validate assembly by rebuilding its dependency subgraph"
|
||||
|
||||
trigger:
|
||||
type: revision_created # revision_created, bom_changed, manual, schedule
|
||||
filter:
|
||||
item_type: assembly # only trigger for assemblies
|
||||
|
||||
scope:
|
||||
type: assembly # item, assembly, project
|
||||
|
||||
compute:
|
||||
type: validate # validate, rebuild, diff, export, custom
|
||||
command: create-validate # runner-side command identifier
|
||||
args: # passed to runner as JSON
|
||||
rebuild_mode: incremental
|
||||
check_interference: true
|
||||
|
||||
runner:
|
||||
tags: [create] # required runner capabilities
|
||||
|
||||
timeout: 900 # seconds before job is marked failed (default 600)
|
||||
max_retries: 2 # retry count on failure (default 1)
|
||||
priority: 50 # lower = higher priority (default 100)
|
||||
```
|
||||
|
||||
### 3.1 Trigger Types
|
||||
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| `revision_created` | Fires when a new revision is created on an item matching the filter |
|
||||
| `bom_changed` | Fires when a BOM merge completes |
|
||||
| `manual` | Only triggered via `POST /api/jobs` |
|
||||
| `schedule` | Future: cron-like scheduling (not yet implemented) |
|
||||
|
||||
### 3.2 Trigger Filters
|
||||
|
||||
The `filter` map supports key-value matching against item properties:
|
||||
|
||||
| Key | Description |
|
||||
|-----|-------------|
|
||||
| `item_type` | Match item type: `part`, `assembly`, `drawing`, etc. |
|
||||
| `schema` | Match schema name |
|
||||
|
||||
All filter keys must match for the trigger to fire. An empty filter matches all items.
|
||||
|
||||
### 3.3 Scope Types
|
||||
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| `item` | Job operates on a single item |
|
||||
| `assembly` | Job operates on an assembly and its BOM tree |
|
||||
| `project` | Job operates on all items in a project |
|
||||
|
||||
### 3.4 Compute Commands
|
||||
|
||||
The `command` field identifies what the runner should execute. Built-in commands:
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `create-validate` | Open file in Headless Create, rebuild features, report validation results |
|
||||
| `create-export` | Open file, export to specified format (STEP, IGES, 3MF) |
|
||||
| `create-dag-extract` | Open file, extract feature DAG, output as JSON |
|
||||
| `create-thumbnail` | Open file, render thumbnail image |
|
||||
|
||||
Custom commands can be added by extending silo-mod's `silo.runner` module.
|
||||
|
||||
---
|
||||
|
||||
## 4. Job Lifecycle
|
||||
|
||||
```
|
||||
pending → claimed → running → completed
|
||||
→ failed
|
||||
→ cancelled
|
||||
```
|
||||
|
||||
| State | Description |
|
||||
|-------|-------------|
|
||||
| `pending` | Job created, waiting for a runner to claim it |
|
||||
| `claimed` | Runner has claimed the job. `expires_at` is set. |
|
||||
| `running` | Runner has started execution (reported via progress update) |
|
||||
| `completed` | Runner reported success. `result` JSONB contains output. |
|
||||
| `failed` | Runner reported failure, timeout expired, or max retries exceeded |
|
||||
| `cancelled` | Admin cancelled the job before completion |
|
||||
|
||||
### 4.1 Claim Semantics
|
||||
|
||||
Runners claim jobs via `POST /api/runner/claim`. The server uses PostgreSQL's `SELECT FOR UPDATE SKIP LOCKED` to ensure exactly-once delivery:
|
||||
|
||||
```sql
|
||||
WITH claimable AS (
|
||||
SELECT id FROM jobs
|
||||
WHERE status = 'pending'
|
||||
AND runner_tags <@ $2::text[]
|
||||
ORDER BY priority ASC, created_at ASC
|
||||
LIMIT 1
|
||||
FOR UPDATE SKIP LOCKED
|
||||
)
|
||||
UPDATE jobs SET
|
||||
status = 'claimed',
|
||||
runner_id = $1,
|
||||
claimed_at = now(),
|
||||
expires_at = now() + (timeout_seconds || ' seconds')::interval
|
||||
FROM claimable
|
||||
WHERE jobs.id = claimable.id
|
||||
RETURNING jobs.*;
|
||||
```
|
||||
|
||||
The `runner_tags <@ $2::text[]` condition ensures the runner has all tags required by the job. A runner with tags `["create", "linux", "gpu"]` can claim a job requiring `["create"]`, but not one requiring `["create", "windows"]`.
|
||||
|
||||
### 4.2 Timeout Enforcement
|
||||
|
||||
A background sweeper runs every 30 seconds (configurable via `jobs.job_timeout_check`) and marks expired jobs as failed:
|
||||
|
||||
```sql
|
||||
UPDATE jobs SET status = 'failed', error_message = 'job timed out'
|
||||
WHERE status IN ('claimed', 'running')
|
||||
AND expires_at < now();
|
||||
```
|
||||
|
||||
### 4.3 Retry
|
||||
|
||||
When a job fails and `retry_count < max_retries`, a new job is created with the same definition and scope, with `retry_count` incremented.
|
||||
|
||||
---
|
||||
|
||||
## 5. Runners
|
||||
|
||||
### 5.1 Registration
|
||||
|
||||
Runners are registered via `POST /api/runners` (admin only). The server generates a token (shown once) and stores the SHA-256 hash in the `runners` table. This follows the same pattern as API tokens in `internal/auth/token.go`.
|
||||
|
||||
### 5.2 Authentication
|
||||
|
||||
Runners authenticate via `Authorization: Bearer silo_runner_<token>`. A dedicated `RequireRunnerAuth` middleware validates the token against the `runners` table and injects a `RunnerIdentity` into the request context.
|
||||
|
||||
### 5.3 Heartbeat
|
||||
|
||||
Runners send `POST /api/runner/heartbeat` every 30 seconds. The server updates `last_heartbeat` and sets `status = 'online'`. A background sweeper marks runners as `offline` if their heartbeat is older than `runner_timeout` seconds (default 90).
|
||||
|
||||
### 5.4 Tags
|
||||
|
||||
Each runner declares capability tags (e.g., `["create", "linux", "gpu"]`). Jobs require specific tags via the `runner.tags` field in their YAML definition. A runner can only claim jobs whose required tags are a subset of the runner's tags.
|
||||
|
||||
### 5.5 Runner Config
|
||||
|
||||
The `silorunner` binary reads its config from a YAML file:
|
||||
|
||||
```yaml
|
||||
server_url: "https://silo.example.com"
|
||||
token: "silo_runner_abc123..."
|
||||
name: "worker-01"
|
||||
tags: ["create", "linux"]
|
||||
poll_interval: 5 # seconds between claim attempts
|
||||
create_path: "/usr/bin/create" # path to Headless Create binary (with silo-mod installed)
|
||||
```
|
||||
|
||||
Or via environment variables: `SILO_RUNNER_SERVER_URL`, `SILO_RUNNER_TOKEN`, etc.
|
||||
|
||||
### 5.6 Deployment
|
||||
|
||||
Runner prerequisites:
|
||||
- `silorunner` binary (built from `cmd/silorunner/`)
|
||||
- Headless Create (Kindred's fork of FreeCAD) with silo-mod workbench installed
|
||||
- Network access to Silo server API
|
||||
|
||||
Runners can be deployed as:
|
||||
- Bare metal processes alongside Create installations
|
||||
- Docker containers with Create pre-installed
|
||||
- Scaled horizontally by registering multiple runners with different names
|
||||
|
||||
---
|
||||
|
||||
## 6. Job Log
|
||||
|
||||
Each job has an append-only log stored in the `job_log` table. Runners append entries via `POST /api/runner/jobs/{jobID}/log`:
|
||||
|
||||
```json
|
||||
{
|
||||
"level": "info",
|
||||
"message": "Rebuilding Pad003...",
|
||||
"metadata": {"node_key": "Pad003", "progress_pct": 45}
|
||||
}
|
||||
```
|
||||
|
||||
Log levels: `debug`, `info`, `warn`, `error`.
|
||||
|
||||
---
|
||||
|
||||
## 7. SSE Events
|
||||
|
||||
All job lifecycle transitions are broadcast via Silo's SSE broker. Clients subscribe to `/api/events` and receive:
|
||||
|
||||
| Event Type | Payload | When |
|
||||
|------------|---------|------|
|
||||
| `job.created` | `{id, definition_name, item_id, status, priority}` | Job created |
|
||||
| `job.claimed` | `{id, runner_id, runner_name}` | Runner claims job |
|
||||
| `job.progress` | `{id, progress, progress_message}` | Runner reports progress (0-100) |
|
||||
| `job.completed` | `{id, result_summary, duration_seconds}` | Job completed successfully |
|
||||
| `job.failed` | `{id, error_message}` | Job failed |
|
||||
| `job.cancelled` | `{id, cancelled_by}` | Admin cancelled job |
|
||||
| `runner.online` | `{id, name, tags}` | Runner heartbeat (first after offline) |
|
||||
| `runner.offline` | `{id, name}` | Runner heartbeat timeout |
|
||||
|
||||
---
|
||||
|
||||
## 8. REST API
|
||||
|
||||
### 8.1 Job Endpoints (user-facing, require auth)
|
||||
|
||||
| Method | Path | Auth | Description |
|
||||
|--------|------|------|-------------|
|
||||
| `GET` | `/api/jobs` | viewer | List jobs (filterable by status, item, definition) |
|
||||
| `GET` | `/api/jobs/{jobID}` | viewer | Get job details |
|
||||
| `GET` | `/api/jobs/{jobID}/logs` | viewer | Get job log entries |
|
||||
| `POST` | `/api/jobs` | editor | Manually trigger a job |
|
||||
| `POST` | `/api/jobs/{jobID}/cancel` | editor | Cancel a pending/running job |
|
||||
|
||||
### 8.2 Job Definition Endpoints
|
||||
|
||||
| Method | Path | Auth | Description |
|
||||
|--------|------|------|-------------|
|
||||
| `GET` | `/api/job-definitions` | viewer | List loaded definitions |
|
||||
| `GET` | `/api/job-definitions/{name}` | viewer | Get specific definition |
|
||||
| `POST` | `/api/job-definitions/reload` | admin | Re-read YAML from disk |
|
||||
|
||||
### 8.3 Runner Management Endpoints (admin)
|
||||
|
||||
| Method | Path | Auth | Description |
|
||||
|--------|------|------|-------------|
|
||||
| `GET` | `/api/runners` | admin | List registered runners |
|
||||
| `POST` | `/api/runners` | admin | Register runner (returns token) |
|
||||
| `DELETE` | `/api/runners/{runnerID}` | admin | Delete runner |
|
||||
|
||||
### 8.4 Runner-Facing Endpoints (runner token auth)
|
||||
|
||||
| Method | Path | Auth | Description |
|
||||
|--------|------|------|-------------|
|
||||
| `POST` | `/api/runner/heartbeat` | runner | Send heartbeat |
|
||||
| `POST` | `/api/runner/claim` | runner | Claim next available job |
|
||||
| `PUT` | `/api/runner/jobs/{jobID}/progress` | runner | Report progress |
|
||||
| `POST` | `/api/runner/jobs/{jobID}/complete` | runner | Report completion with result |
|
||||
| `POST` | `/api/runner/jobs/{jobID}/fail` | runner | Report failure |
|
||||
| `POST` | `/api/runner/jobs/{jobID}/log` | runner | Append log entry |
|
||||
| `PUT` | `/api/runner/jobs/{jobID}/dag` | runner | Sync DAG results after compute |
|
||||
|
||||
---
|
||||
|
||||
## 9. Configuration
|
||||
|
||||
Add to `config.yaml`:
|
||||
|
||||
```yaml
|
||||
jobs:
|
||||
directory: /etc/silo/jobdefs # path to YAML job definitions
|
||||
runner_timeout: 90 # seconds before marking runner offline
|
||||
job_timeout_check: 30 # seconds between timeout sweeps
|
||||
default_priority: 100 # default job priority
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 10. Example Job Definitions
|
||||
|
||||
### Assembly Validation
|
||||
|
||||
```yaml
|
||||
job:
|
||||
name: assembly-validate
|
||||
version: 1
|
||||
description: "Validate assembly by rebuilding its dependency subgraph"
|
||||
trigger:
|
||||
type: revision_created
|
||||
filter:
|
||||
item_type: assembly
|
||||
scope:
|
||||
type: assembly
|
||||
compute:
|
||||
type: validate
|
||||
command: create-validate
|
||||
args:
|
||||
rebuild_mode: incremental
|
||||
check_interference: true
|
||||
runner:
|
||||
tags: [create]
|
||||
timeout: 900
|
||||
max_retries: 2
|
||||
priority: 50
|
||||
```
|
||||
|
||||
### STEP Export
|
||||
|
||||
```yaml
|
||||
job:
|
||||
name: part-export-step
|
||||
version: 1
|
||||
description: "Export a part to STEP format"
|
||||
trigger:
|
||||
type: manual
|
||||
scope:
|
||||
type: item
|
||||
compute:
|
||||
type: export
|
||||
command: create-export
|
||||
args:
|
||||
format: step
|
||||
output_key_template: "exports/{part_number}_rev{revision}.step"
|
||||
runner:
|
||||
tags: [create]
|
||||
timeout: 300
|
||||
max_retries: 1
|
||||
priority: 100
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 11. References
|
||||
|
||||
- [DAG.md](DAG.md) -- Dependency DAG specification
|
||||
- [MULTI_USER_EDITS.md](MULTI_USER_EDITS.md) -- Multi-user editing specification
|
||||
- [ROADMAP.md](ROADMAP.md) -- Tier 0 Job Queue Infrastructure, Tier 1 Headless Create
|
||||
339
frontend-spec.md
339
frontend-spec.md
@@ -1,6 +1,6 @@
|
||||
# Silo Frontend Specification
|
||||
|
||||
Current as of 2026-02-08. Documents the React + Vite + TypeScript frontend (migration from Go templates is complete).
|
||||
Current as of 2026-02-11. Documents the React + Vite + TypeScript frontend (migration from Go templates is complete).
|
||||
|
||||
## Overview
|
||||
|
||||
@@ -68,6 +68,7 @@ web/
|
||||
│ └── AuthContext.tsx AuthProvider with login/logout/refresh methods
|
||||
├── hooks/
|
||||
│ ├── useAuth.ts Context consumer hook
|
||||
│ ├── useFormDescriptor.ts Fetches form descriptor from /api/schemas/{name}/form (replaces useCategories)
|
||||
│ ├── useItems.ts Items fetching with search, filters, pagination, debounce
|
||||
│ └── useLocalStorage.ts Typed localStorage persistence hook
|
||||
├── styles/
|
||||
@@ -271,63 +272,81 @@ Vite dev server runs on port 5173 with proxy config in `vite.config.ts` forwardi
|
||||
|
||||
## New Frontend Tasks
|
||||
|
||||
# CreateItemPane Redesign Specification
|
||||
# CreateItemPane — Schema-Driven Dynamic Form
|
||||
|
||||
**Date**: 2026-02-06
|
||||
**Scope**: Replace existing `CreateItemPane.tsx` with a two-column layout, multi-stage category picker, file attachment via MinIO, and full use of screen real estate.
|
||||
**Date**: 2026-02-10
|
||||
**Scope**: `CreateItemPane.tsx` renders a dynamic form driven entirely by the form descriptor API (`GET /api/schemas/{name}/form`). All field groups, field types, widgets, and category-specific fields are defined in YAML and resolved server-side.
|
||||
**Parent**: Items page (`ItemsPage.tsx`) — renders in the detail pane area per existing in-pane CRUD pattern.
|
||||
|
||||
---
|
||||
|
||||
## Layout
|
||||
|
||||
The pane uses a CSS Grid two-column layout instead of the current single-column form:
|
||||
Single-column scrollable form with a green header bar. Field groups are rendered dynamically from the form descriptor. Category-specific field groups appear after global groups when a category is selected.
|
||||
|
||||
```
|
||||
┌──────────────────────────────────────────────────────┬──────────────┐
|
||||
│ Header: "New Item" [green bar] Cancel │ Create │ │
|
||||
├──────────────────────────────────────────────────────┤ │
|
||||
│ │ Auto- │
|
||||
│ ── Identity ────────────────────────────────────── │ assigned │
|
||||
│ [Part Number *] [Type * v] │ metadata │
|
||||
│ [Description ] │ │
|
||||
│ Category * [Domain │ Group │ Subtype ] │──────────────│
|
||||
│ Mechanical│ Structural│ Bracket │ │ │
|
||||
│ Electrical│ Bearings │ Plate │ │ Attachments │
|
||||
│ ... │ ... │ ... │ │ ┌─ ─ ─ ─ ┐ │
|
||||
│ ── Sourcing ────────────────────────────────────── │ │ Drop │ │
|
||||
│ [Sourcing Type v] [Standard Cost $ ] │ │ zone │ │
|
||||
│ [Unit of Measure v] [Sourcing Link ] │ └─ ─ ─ ─ ┘ │
|
||||
│ │ file.FCStd │
|
||||
│ ── Details ─────────────────────────────────────── │ drawing.pdf │
|
||||
│ [Long Description ] │ │
|
||||
│ [Projects: [tag][tag] type to search... ] │──────────────│
|
||||
│ │ Thumbnail │
|
||||
│ │ [preview] │
|
||||
└──────────────────────────────────────────────────────┴──────────────┘
|
||||
┌──────────────────────────────────────────────────────────────────────┐
|
||||
│ Header: "New Item" [green bar] Cancel │ Create │
|
||||
├──────────────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ Category * [Domain buttons: F C R S E M T A P X] │
|
||||
│ [Subcategory search + filtered list] │
|
||||
│ │
|
||||
│ ── Identity ────────────────────────────────────────────────────── │
|
||||
│ [Type * (auto-derived from category)] [Description ] │
|
||||
│ │
|
||||
│ ── Sourcing ────────────────────────────────────────────────────── │
|
||||
│ [Sourcing Type v] [Manufacturer] [MPN] [Supplier] [SPN] │
|
||||
│ [Sourcing Link] │
|
||||
│ │
|
||||
│ ── Cost & Lead Time ────────────────────────────────────────────── │
|
||||
│ [Standard Cost $] [Lead Time Days] [Min Order Qty] │
|
||||
│ │
|
||||
│ ── Status ──────────────────────────────────────────────────────── │
|
||||
│ [Lifecycle Status v] [RoHS Compliant ☐] [Country of Origin] │
|
||||
│ │
|
||||
│ ── Details ─────────────────────────────────────────────────────── │
|
||||
│ [Long Description ] │
|
||||
│ [Projects: [tag][tag] type to search... ] │
|
||||
│ [Notes ] │
|
||||
│ │
|
||||
│ ── Fastener Specifications (category-specific) ─────────────────── │
|
||||
│ [Material] [Finish] [Thread Size] [Head Type] [Drive Type] ... │
|
||||
│ │
|
||||
└──────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Grid definition: `grid-template-columns: 1fr 320px`. The left column scrolls independently if content overflows. The right sidebar is a flex column with sections separated by `--ctp-surface1` borders.
|
||||
## Data Source — Form Descriptor API
|
||||
|
||||
All form structure is fetched from `GET /api/schemas/kindred-rd/form`, which returns:
|
||||
|
||||
- `category_picker`: Multi-stage picker config (domain → subcategory)
|
||||
- `item_fields`: Definitions for item-level fields (description, item_type, sourcing_type, etc.)
|
||||
- `field_groups`: Ordered groups with resolved field metadata (Identity, Sourcing, Cost, Status, Details)
|
||||
- `category_field_groups`: Per-category-prefix groups (e.g., Fastener Specifications for `F` prefix)
|
||||
- `field_overrides`: Widget hints (currency, url, select, checkbox)
|
||||
|
||||
The YAML schema (`schemas/kindred-rd.yaml`) is the single source of truth. Adding a new field or category in YAML propagates to all clients with no code changes.
|
||||
|
||||
## File Location
|
||||
|
||||
`web/src/components/items/CreateItemPane.tsx` (replaces existing file)
|
||||
`web/src/components/items/CreateItemPane.tsx`
|
||||
|
||||
New supporting files:
|
||||
Supporting files:
|
||||
|
||||
| File | Purpose |
|
||||
|------|---------|
|
||||
| `web/src/components/items/CategoryPicker.tsx` | Multi-stage category selector |
|
||||
| `web/src/components/items/CategoryPicker.tsx` | Multi-stage domain/subcategory selector |
|
||||
| `web/src/components/items/FileDropZone.tsx` | Drag-and-drop file upload with MinIO presigned URLs |
|
||||
| `web/src/components/items/TagInput.tsx` | Multi-select tag input for projects |
|
||||
| `web/src/hooks/useCategories.ts` | Fetches category tree from schema data |
|
||||
| `web/src/hooks/useFormDescriptor.ts` | Fetches and caches form descriptor from `/api/schemas/{name}/form` |
|
||||
| `web/src/hooks/useFileUpload.ts` | Manages presigned URL upload flow |
|
||||
|
||||
## Component Breakdown
|
||||
|
||||
### CreateItemPane
|
||||
|
||||
Top-level orchestrator. Manages form state, submission, and layout.
|
||||
Top-level orchestrator. Renders dynamic form from the form descriptor.
|
||||
|
||||
**Props** (unchanged interface):
|
||||
|
||||
@@ -341,68 +360,64 @@ interface CreateItemPaneProps {
|
||||
**State**:
|
||||
|
||||
```typescript
|
||||
const [form, setForm] = useState<CreateItemForm>({
|
||||
part_number: '',
|
||||
item_type: 'part',
|
||||
description: '',
|
||||
category_path: [], // e.g. ['Mechanical', 'Structural', 'Bracket']
|
||||
sourcing_type: 'manufactured',
|
||||
standard_cost: '',
|
||||
unit_of_measure: 'ea',
|
||||
sourcing_link: '',
|
||||
long_description: '',
|
||||
project_ids: [],
|
||||
});
|
||||
const [attachments, setAttachments] = useState<PendingAttachment[]>([]);
|
||||
const [thumbnail, setThumbnail] = useState<PendingAttachment | null>(null);
|
||||
const { descriptor, categories, loading } = useFormDescriptor();
|
||||
const [category, setCategory] = useState(''); // selected category code, e.g. "F01"
|
||||
const [fields, setFields] = useState<Record<string, string>>({}); // all field values keyed by name
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const [submitting, setSubmitting] = useState(false);
|
||||
```
|
||||
|
||||
A single `fields` record holds all form values (both item-level and property fields). The `ITEM_LEVEL_FIELDS` set (`description`, `item_type`, `sourcing_type`, `long_description`) determines which fields go into the top-level request vs. the `properties` map on submission.
|
||||
|
||||
**Auto-derivation**: When a category is selected, `item_type` is automatically set based on the `derived_from_category` mapping in the form descriptor (e.g., category prefix `A` → `assembly`, `T` → `tooling`, default → `part`).
|
||||
|
||||
**Dynamic rendering**: A `renderField()` function maps each field's `widget` type to the appropriate input:
|
||||
|
||||
| Widget | Rendered As |
|
||||
|--------|-------------|
|
||||
| `text` | `<input type="text">` |
|
||||
| `number` | `<input type="number">` |
|
||||
| `textarea` | `<textarea>` |
|
||||
| `select` | `<select>` with `<option>` elements from `field.options` |
|
||||
| `checkbox` | `<input type="checkbox">` |
|
||||
| `currency` | `<input type="number">` with currency prefix (e.g., "$") |
|
||||
| `url` | `<input type="url">` |
|
||||
| `tag_input` | `TagInput` component with search endpoint |
|
||||
|
||||
**Submission flow**:
|
||||
|
||||
1. Validate required fields (part_number, item_type, category_path length === 3).
|
||||
2. `POST /api/items` with form data → returns created `Item` with UUID.
|
||||
3. For each attachment in `attachments[]`, call the file association endpoint: `POST /api/items/{id}/files` with the MinIO object key returned from upload.
|
||||
4. If thumbnail exists, `PUT /api/items/{id}/thumbnail` with the object key.
|
||||
5. Call `onCreated(item)`.
|
||||
1. Validate required fields (category must be selected).
|
||||
2. Split `fields` into item-level fields and properties using `ITEM_LEVEL_FIELDS`.
|
||||
3. `POST /api/items` with `{ part_number: '', item_type, description, sourcing_type, long_description, category, properties: {...} }`.
|
||||
4. Call `onCreated(item)`.
|
||||
|
||||
If step 2 fails, show error banner. If file association fails, show warning but still navigate (item was created, files can be re-attached).
|
||||
|
||||
**Header bar**: Green (`--ctp-green` background, `--ctp-crust` text) per existing create-pane convention. "New Item" title on left, Cancel (ghost button) and Create Item (primary button, `--ctp-green` bg) on right.
|
||||
**Header bar**: Green (`--ctp-green` background, `--ctp-crust` text). "New Item" title on left, Cancel and Create Item buttons on right.
|
||||
|
||||
### CategoryPicker
|
||||
|
||||
Three-column scrollable list for hierarchical category selection.
|
||||
Multi-stage category selector driven by the form descriptor's `category_picker.stages` config.
|
||||
|
||||
**Props**:
|
||||
|
||||
```typescript
|
||||
interface CategoryPickerProps {
|
||||
value: string[]; // current selection path, e.g. ['Mechanical', 'Structural']
|
||||
onChange: (path: string[]) => void;
|
||||
categories: CategoryNode[]; // top-level nodes
|
||||
}
|
||||
|
||||
interface CategoryNode {
|
||||
name: string;
|
||||
children?: CategoryNode[];
|
||||
value: string; // selected category code, e.g. "F01"
|
||||
onChange: (code: string) => void;
|
||||
categories: Record<string, string>; // flat code → description map
|
||||
stages?: CategoryPickerStage[]; // from form descriptor
|
||||
}
|
||||
```
|
||||
|
||||
**Rendering**: Three side-by-side `<div>` columns inside a container with `border: 1px solid var(--ctp-surface1)` and `border-radius: 0.4rem`. Each column has:
|
||||
**Rendering**: Two-stage selection:
|
||||
|
||||
- A sticky header row (10px uppercase, `--ctp-overlay0` text, `--ctp-mantle` background) labeling the tier. Labels come from the schema definition if available, otherwise "Level 1", "Level 2", "Level 3".
|
||||
- A scrollable list of options. Each option is a `<div>` row, 28px height, `0.85rem` font. Hover: `--ctp-surface0` background. Selected: translucent mauve background (`rgba(203, 166, 247, 0.12)`), `--ctp-mauve` text, weight 600.
|
||||
- If a node has children, show a `›` chevron on the right side of the row.
|
||||
1. **Domain row**: Horizontal row of buttons, one per domain from `stages[0].values` (F=Fasteners, C=Fluid Fittings, etc.). Selected domain has mauve highlight.
|
||||
2. **Subcategory list**: Filtered list of categories matching the selected domain prefix. Includes a search input for filtering. Each row shows code and description.
|
||||
|
||||
Column 1 always shows all top-level nodes. Column 2 shows children of the selected Column 1 node. Column 3 shows children of the selected Column 2 node. If nothing is selected in a column, the next column shows an empty state with muted text: "Select a [tier name]".
|
||||
If no `stages` prop is provided, falls back to a flat searchable list of all categories.
|
||||
|
||||
Below the picker, render a breadcrumb trail: `Mechanical › Structural › Bracket` in `--ctp-mauve` with `›` separators in `--ctp-overlay0`. Only show segments that are selected.
|
||||
Below the picker, the selected category is shown as a breadcrumb: `Fasteners › F01 — Hex Cap Screw` in `--ctp-mauve`.
|
||||
|
||||
**Data source**: Categories are derived from schemas. The `useCategories` hook calls `GET /api/schemas` and transforms the response into a `CategoryNode[]` tree. The exact mapping depends on how schemas define category hierarchies — if schemas don't currently support hierarchical categories, this requires a backend addition (see Backend Changes section).
|
||||
|
||||
**Max height**: 180px per column with `overflow-y: auto`.
|
||||
**Data source**: Categories come from `useFormDescriptor()` which derives them from the `category_picker` stages and `values_by_domain` in the form descriptor response.
|
||||
|
||||
### FileDropZone
|
||||
|
||||
@@ -478,17 +493,17 @@ The dropdown is an absolutely-positioned `<div>` below the input container, `--c
|
||||
|
||||
**For projects**: `searchFn` calls `GET /api/projects?q={query}` and maps to `{ id: project.id, label: project.code + ' — ' + project.name }`.
|
||||
|
||||
### useCategories Hook
|
||||
### useFormDescriptor Hook
|
||||
|
||||
```typescript
|
||||
function useCategories(): {
|
||||
categories: CategoryNode[];
|
||||
function useFormDescriptor(schemaName = "kindred-rd"): {
|
||||
descriptor: FormDescriptor | null;
|
||||
categories: Record<string, string>; // flat code → description map derived from descriptor
|
||||
loading: boolean;
|
||||
error: string | null;
|
||||
}
|
||||
```
|
||||
|
||||
Fetches `GET /api/schemas` on mount and transforms into a category tree. Caches in a module-level variable so repeated renders don't refetch. If the API doesn't currently support hierarchical categories, this returns a flat list as a single-tier picker until the backend is extended.
|
||||
Fetches `GET /api/schemas/{name}/form` on mount. Caches the result in a module-level variable so repeated renders/mounts don't refetch. Derives a flat `categories` map from the `category_picker` stages and `values_by_domain` in the response. Replaces the old `useCategories` hook (deleted).
|
||||
|
||||
### useFileUpload Hook
|
||||
|
||||
@@ -542,30 +557,32 @@ const styles = {
|
||||
|
||||
## Form Sections
|
||||
|
||||
The form is visually divided by section headers. Each header is a flex row containing a label (11px uppercase, `--ctp-overlay0`) and a `flex: 1` horizontal line (`1px solid --ctp-surface0`). Sections span `grid-column: 1 / -1`.
|
||||
Form sections are rendered dynamically from the `field_groups` array in the form descriptor. Each section header is a flex row containing a label (11px uppercase, `--ctp-overlay0`) and a `flex: 1` horizontal line (`1px solid --ctp-surface0`).
|
||||
|
||||
| Section | Fields |
|
||||
|---------|--------|
|
||||
| Identity | Part Number*, Type*, Description, Category* |
|
||||
| Sourcing | Sourcing Type, Standard Cost, Unit of Measure, Sourcing Link |
|
||||
| Details | Long Description, Projects |
|
||||
**Global field groups** (from `ui.field_groups` in YAML):
|
||||
|
||||
## Sidebar Sections
|
||||
| Group Key | Label | Fields |
|
||||
|-----------|-------|--------|
|
||||
| identity | Identity | item_type, description |
|
||||
| sourcing | Sourcing | sourcing_type, manufacturer, manufacturer_pn, supplier, supplier_pn, sourcing_link |
|
||||
| cost | Cost & Lead Time | standard_cost, lead_time_days, minimum_order_qty |
|
||||
| status | Status | lifecycle_status, rohs_compliant, country_of_origin |
|
||||
| details | Details | long_description, projects, notes |
|
||||
|
||||
The right sidebar is divided into three sections with `borderBottom: 1px solid var(--ctp-surface0)`:
|
||||
**Category-specific field groups** (from `ui.category_field_groups` in YAML, shown when a category is selected):
|
||||
|
||||
**Auto-assigned metadata**: Read-only key-value rows showing:
|
||||
- UUID: "On create" in `--ctp-teal` italic
|
||||
- Revision: "A" (hardcoded initial)
|
||||
- Created By: current user's display name from `useAuth()`
|
||||
| Prefix | Group | Example Fields |
|
||||
|--------|-------|----------------|
|
||||
| F | Fastener Specifications | material, finish, thread_size, head_type, drive_type, ... |
|
||||
| C | Fitting Specifications | material, connection_type, size_1, pressure_rating, ... |
|
||||
| R | Motion Specifications | bearing_type, bore_diameter, load_rating, ... |
|
||||
| ... | ... | (one group per category prefix, defined in YAML) |
|
||||
|
||||
**Attachments**: `FileDropZone` component. Takes `flex: 1` to fill available space.
|
||||
|
||||
**Thumbnail**: A 4:3 aspect ratio placeholder box (`--ctp-crust` bg, `--ctp-surface0` border) with centered text "Generated from CAD file or upload manually". Clicking opens file picker filtered to images. If a thumbnail is uploaded, show it as an `<img>` with `object-fit: cover`.
|
||||
Note: `sourcing_link` and `standard_cost` are revision properties (stored in the `properties` JSONB), not item-level DB columns. They were migrated from item-level fields in PR #1 (migration 013).
|
||||
|
||||
## Backend Changes
|
||||
|
||||
Items 1-3 and 5 below are implemented (migration `011_item_files.sql`, `internal/api/file_handlers.go`). Item 4 (hierarchical categories) remains open.
|
||||
Items 1-5 below are implemented. Item 4 (hierarchical categories) is resolved by the form descriptor's multi-stage category picker.
|
||||
|
||||
### 1. Presigned Upload URL -- IMPLEMENTED
|
||||
|
||||
@@ -597,33 +614,14 @@ Response: 204
|
||||
|
||||
Stores the thumbnail at `items/{item_id}/thumbnail.png` in MinIO. Updates `item.thumbnail_key` column.
|
||||
|
||||
### 4. Hierarchical Categories -- NOT IMPLEMENTED
|
||||
### 4. Hierarchical Categories -- IMPLEMENTED (via Form Descriptor)
|
||||
|
||||
If schemas don't currently support a hierarchical category tree, one of these approaches:
|
||||
Resolved by the schema-driven form descriptor (`GET /api/schemas/{name}/form`). The YAML schema's `ui.category_picker` section defines multi-stage selection:
|
||||
|
||||
**Option A — Schema-driven**: Add a `category_tree` JSON column to the `schemas` table that defines the hierarchy. The `GET /api/schemas` response already returns schemas; the frontend transforms this into the picker tree.
|
||||
- **Stage 1 (domain)**: Groups categories by first character of category code (F=Fasteners, C=Fluid Fittings, etc.). Values defined in `ui.category_picker.stages[0].values`.
|
||||
- **Stage 2 (subcategory)**: Auto-derived by the Go backend's `ValuesByDomain()` method, which groups the category enum values by their first character.
|
||||
|
||||
**Option B — Dedicated table**:
|
||||
|
||||
```sql
|
||||
CREATE TABLE categories (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
name TEXT NOT NULL,
|
||||
parent_id UUID REFERENCES categories(id),
|
||||
sort_order INT NOT NULL DEFAULT 0,
|
||||
UNIQUE(parent_id, name)
|
||||
);
|
||||
```
|
||||
|
||||
With endpoints:
|
||||
```
|
||||
GET /api/categories → flat list with parent_id, frontend builds tree
|
||||
POST /api/categories → { name, parent_id? }
|
||||
PUT /api/categories/{id} → { name, sort_order }
|
||||
DELETE /api/categories/{id} → cascade check
|
||||
```
|
||||
|
||||
**Recommendation**: Option B is more flexible and keeps categories as a first-class entity. The three-tier picker doesn't need to be limited to exactly three levels — it can render as many columns as the deepest category path, but three is the practical default (Domain → Group → Subtype).
|
||||
No separate `categories` table is needed — the existing schema enum values are the single source of truth. Adding a new category code to the YAML propagates to the picker automatically.
|
||||
|
||||
### 5. Database Schema Addition -- IMPLEMENTED
|
||||
|
||||
@@ -641,46 +639,89 @@ CREATE TABLE item_files (
|
||||
CREATE INDEX idx_item_files_item ON item_files(item_id);
|
||||
|
||||
ALTER TABLE items ADD COLUMN thumbnail_key TEXT;
|
||||
ALTER TABLE items ADD COLUMN category_id UUID REFERENCES categories(id);
|
||||
ALTER TABLE items ADD COLUMN sourcing_type TEXT NOT NULL DEFAULT 'manufactured';
|
||||
ALTER TABLE items ADD COLUMN sourcing_link TEXT;
|
||||
ALTER TABLE items ADD COLUMN standard_cost NUMERIC(12,2);
|
||||
ALTER TABLE items ADD COLUMN unit_of_measure TEXT NOT NULL DEFAULT 'ea';
|
||||
ALTER TABLE items ADD COLUMN long_description TEXT;
|
||||
```
|
||||
|
||||
## Implementation Order
|
||||
|
||||
1. **TagInput component** — reusable, no backend changes needed, uses existing projects API.
|
||||
2. **CategoryPicker component** — start with flat/mock data, wire to real API after backend adds categories.
|
||||
3. **FileDropZone + useFileUpload** — requires presigned URL backend endpoint first.
|
||||
4. **CreateItemPane rewrite** — compose the above into the two-column layout.
|
||||
5. **Backend: categories table + endpoints** — unblocks real category data.
|
||||
6. **Backend: presigned uploads + item_files** — unblocks file attachments.
|
||||
7. **Backend: items table migration** — adds new columns (sourcing_type, standard_cost, etc.).
|
||||
1. **[DONE] Deduplicate sourcing_link/standard_cost** — Migrated from item-level DB columns to revision properties (migration 013). Removed from Go structs, API types, frontend types.
|
||||
2. **[DONE] Form descriptor API** — Added `ui` section to YAML, Go structs + validation, `GET /api/schemas/{name}/form` endpoint.
|
||||
3. **[DONE] useFormDescriptor hook** — Replaces `useCategories`, fetches and caches form descriptor.
|
||||
4. **[DONE] CategoryPicker rewrite** — Multi-stage domain/subcategory picker driven by form descriptor.
|
||||
5. **[DONE] CreateItemPane rewrite** — Dynamic form rendering from field groups, widget-based field rendering.
|
||||
6. **TagInput component** — reusable, no backend changes needed, uses existing projects API.
|
||||
7. **FileDropZone + useFileUpload** — requires presigned URL backend endpoint (already implemented).
|
||||
|
||||
Steps 1-2 can start immediately. Steps 5-7 can run in parallel once specified. Step 4 ties it all together.
|
||||
## Types Added
|
||||
|
||||
## Types to Add
|
||||
|
||||
Add to `web/src/api/types.ts`:
|
||||
The following types were added to `web/src/api/types.ts` for the form descriptor system:
|
||||
|
||||
```typescript
|
||||
// Categories
|
||||
interface Category {
|
||||
id: string;
|
||||
// Form descriptor types (from GET /api/schemas/{name}/form)
|
||||
interface FormFieldDescriptor {
|
||||
name: string;
|
||||
parent_id: string | null;
|
||||
sort_order: number;
|
||||
type: string;
|
||||
widget: string;
|
||||
label: string;
|
||||
required?: boolean;
|
||||
default?: string;
|
||||
unit?: string;
|
||||
description?: string;
|
||||
options?: string[];
|
||||
currency?: string;
|
||||
derived_from_category?: Record<string, string>;
|
||||
search_endpoint?: string;
|
||||
}
|
||||
|
||||
interface CategoryNode {
|
||||
name: string;
|
||||
id: string;
|
||||
children?: CategoryNode[];
|
||||
interface FormFieldGroup {
|
||||
key: string;
|
||||
label: string;
|
||||
order: number;
|
||||
fields: FormFieldDescriptor[];
|
||||
}
|
||||
|
||||
// File uploads
|
||||
interface CategoryPickerStage {
|
||||
name: string;
|
||||
label: string;
|
||||
values?: Record<string, string>;
|
||||
values_by_domain?: Record<string, Record<string, string>>;
|
||||
}
|
||||
|
||||
interface CategoryPickerDescriptor {
|
||||
style: string;
|
||||
stages: CategoryPickerStage[];
|
||||
}
|
||||
|
||||
interface ItemFieldDef {
|
||||
type: string;
|
||||
widget: string;
|
||||
label: string;
|
||||
required?: boolean;
|
||||
default?: string;
|
||||
options?: string[];
|
||||
derived_from_category?: Record<string, string>;
|
||||
search_endpoint?: string;
|
||||
}
|
||||
|
||||
interface FieldOverride {
|
||||
widget?: string;
|
||||
currency?: string;
|
||||
options?: string[];
|
||||
}
|
||||
|
||||
interface FormDescriptor {
|
||||
schema_name: string;
|
||||
format: string;
|
||||
category_picker: CategoryPickerDescriptor;
|
||||
item_fields: Record<string, ItemFieldDef>;
|
||||
field_groups: FormFieldGroup[];
|
||||
category_field_groups: Record<string, FormFieldGroup[]>;
|
||||
field_overrides: Record<string, FieldOverride>;
|
||||
}
|
||||
|
||||
// File uploads (unchanged)
|
||||
interface PresignRequest {
|
||||
filename: string;
|
||||
content_type: string;
|
||||
@@ -703,20 +744,6 @@ interface ItemFile {
|
||||
created_at: string;
|
||||
}
|
||||
|
||||
// Extended create request
|
||||
interface CreateItemRequest {
|
||||
part_number: string;
|
||||
item_type: 'part' | 'assembly' | 'document';
|
||||
description?: string;
|
||||
category_id?: string;
|
||||
sourcing_type?: 'manufactured' | 'purchased' | 'phantom';
|
||||
standard_cost?: number;
|
||||
unit_of_measure?: string;
|
||||
sourcing_link?: string;
|
||||
long_description?: string;
|
||||
project_ids?: string[];
|
||||
}
|
||||
|
||||
// Pending upload (frontend only, not an API type)
|
||||
interface PendingAttachment {
|
||||
file: File;
|
||||
@@ -726,3 +753,5 @@ interface PendingAttachment {
|
||||
error?: string;
|
||||
}
|
||||
```
|
||||
|
||||
Note: `sourcing_link` and `standard_cost` have been removed from the `Item`, `CreateItemRequest`, and `UpdateItemRequest` interfaces — they are now stored as revision properties and rendered dynamically from the form descriptor.
|
||||
|
||||
106
internal/api/audit_handlers_test.go
Normal file
106
internal/api/audit_handlers_test.go
Normal file
@@ -0,0 +1,106 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
)
|
||||
|
||||
func newAuditRouter(s *Server) http.Handler {
|
||||
r := chi.NewRouter()
|
||||
r.Get("/api/audit/completeness", s.HandleAuditCompleteness)
|
||||
r.Get("/api/audit/completeness/{partNumber}", s.HandleAuditItemDetail)
|
||||
return r
|
||||
}
|
||||
|
||||
func TestHandleAuditCompletenessEmpty(t *testing.T) {
|
||||
s := newTestServerWithSchemas(t)
|
||||
router := newAuditRouter(s)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/audit/completeness", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status: got %d, want %d; body: %s", w.Code, http.StatusOK, w.Body.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleAuditCompleteness(t *testing.T) {
|
||||
s := newTestServerWithSchemas(t)
|
||||
router := newAuditRouter(s)
|
||||
|
||||
createItemDirect(t, s, "AUD-001", "audit item 1", nil)
|
||||
createItemDirect(t, s, "AUD-002", "audit item 2", nil)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/audit/completeness", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status: got %d, want %d; body: %s", w.Code, http.StatusOK, w.Body.String())
|
||||
}
|
||||
|
||||
var resp map[string]any
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil {
|
||||
t.Fatalf("decoding response: %v", err)
|
||||
}
|
||||
// Should have items array
|
||||
items, ok := resp["items"]
|
||||
if !ok {
|
||||
t.Fatal("response missing 'items' key")
|
||||
}
|
||||
itemList, ok := items.([]any)
|
||||
if !ok {
|
||||
t.Fatal("'items' is not an array")
|
||||
}
|
||||
if len(itemList) < 2 {
|
||||
t.Errorf("expected at least 2 audit items, got %d", len(itemList))
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleAuditItemDetail(t *testing.T) {
|
||||
s := newTestServerWithSchemas(t)
|
||||
router := newAuditRouter(s)
|
||||
|
||||
cost := 50.0
|
||||
createItemDirect(t, s, "AUDDET-001", "audit detail item", &cost)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/audit/completeness/AUDDET-001", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status: got %d, want %d; body: %s", w.Code, http.StatusOK, w.Body.String())
|
||||
}
|
||||
|
||||
var resp map[string]any
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil {
|
||||
t.Fatalf("decoding response: %v", err)
|
||||
}
|
||||
if resp["part_number"] != "AUDDET-001" {
|
||||
t.Errorf("part_number: got %v, want %q", resp["part_number"], "AUDDET-001")
|
||||
}
|
||||
if _, ok := resp["score"]; !ok {
|
||||
t.Error("response missing 'score' field")
|
||||
}
|
||||
if _, ok := resp["tier"]; !ok {
|
||||
t.Error("response missing 'tier' field")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleAuditItemDetailNotFound(t *testing.T) {
|
||||
s := newTestServerWithSchemas(t)
|
||||
router := newAuditRouter(s)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/audit/completeness/NOPE-999", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusNotFound {
|
||||
t.Errorf("status: got %d, want %d", w.Code, http.StatusNotFound)
|
||||
}
|
||||
}
|
||||
211
internal/api/auth_handlers_test.go
Normal file
211
internal/api/auth_handlers_test.go
Normal file
@@ -0,0 +1,211 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/kindredsystems/silo/internal/auth"
|
||||
"github.com/kindredsystems/silo/internal/db"
|
||||
"github.com/kindredsystems/silo/internal/modules"
|
||||
"github.com/kindredsystems/silo/internal/schema"
|
||||
"github.com/kindredsystems/silo/internal/testutil"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// newAuthTestServer creates a Server with a real auth service (for token tests).
|
||||
func newAuthTestServer(t *testing.T) *Server {
|
||||
t.Helper()
|
||||
pool := testutil.MustConnectTestPool(t)
|
||||
database := db.NewFromPool(pool)
|
||||
users := db.NewUserRepository(database)
|
||||
tokens := db.NewTokenRepository(database)
|
||||
authSvc := auth.NewService(zerolog.Nop(), users, tokens)
|
||||
broker := NewBroker(zerolog.Nop())
|
||||
state := NewServerState(zerolog.Nop(), nil, broker)
|
||||
return NewServer(
|
||||
zerolog.Nop(),
|
||||
database,
|
||||
map[string]*schema.Schema{},
|
||||
"", // schemasDir
|
||||
nil, // storage
|
||||
authSvc, // authService
|
||||
nil, // sessionManager
|
||||
nil, // oidcBackend
|
||||
nil, // authConfig
|
||||
broker,
|
||||
state,
|
||||
nil, // jobDefs
|
||||
"", // jobDefsDir
|
||||
modules.NewRegistry(), // modules
|
||||
nil, // cfg
|
||||
)
|
||||
}
|
||||
|
||||
// ensureTestUser creates a user in the DB and returns their ID.
|
||||
func ensureTestUser(t *testing.T, s *Server, username string) string {
|
||||
t.Helper()
|
||||
u := &db.User{
|
||||
Username: username,
|
||||
DisplayName: "Test " + username,
|
||||
Email: username + "@test.local",
|
||||
AuthSource: "local",
|
||||
Role: "admin",
|
||||
}
|
||||
users := db.NewUserRepository(s.db)
|
||||
if err := users.Upsert(context.Background(), u); err != nil {
|
||||
t.Fatalf("upserting user: %v", err)
|
||||
}
|
||||
return u.ID
|
||||
}
|
||||
|
||||
func newAuthRouter(s *Server) http.Handler {
|
||||
r := chi.NewRouter()
|
||||
r.Get("/api/auth/me", s.HandleGetCurrentUser)
|
||||
r.Post("/api/auth/tokens", s.HandleCreateToken)
|
||||
r.Get("/api/auth/tokens", s.HandleListTokens)
|
||||
r.Delete("/api/auth/tokens/{id}", s.HandleRevokeToken)
|
||||
r.Get("/api/auth/config", s.HandleAuthConfig)
|
||||
return r
|
||||
}
|
||||
|
||||
func TestHandleGetCurrentUser(t *testing.T) {
|
||||
s := newTestServer(t)
|
||||
router := newAuthRouter(s)
|
||||
|
||||
req := authRequest(httptest.NewRequest("GET", "/api/auth/me", nil))
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status: got %d, want %d; body: %s", w.Code, http.StatusOK, w.Body.String())
|
||||
}
|
||||
|
||||
var resp map[string]any
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil {
|
||||
t.Fatalf("decoding response: %v", err)
|
||||
}
|
||||
if resp["username"] != "testadmin" {
|
||||
t.Errorf("username: got %v, want %q", resp["username"], "testadmin")
|
||||
}
|
||||
if resp["role"] != "admin" {
|
||||
t.Errorf("role: got %v, want %q", resp["role"], "admin")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleGetCurrentUserUnauth(t *testing.T) {
|
||||
s := newTestServer(t)
|
||||
router := newAuthRouter(s)
|
||||
|
||||
// No auth context
|
||||
req := httptest.NewRequest("GET", "/api/auth/me", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusUnauthorized {
|
||||
t.Errorf("status: got %d, want %d", w.Code, http.StatusUnauthorized)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleAuthConfig(t *testing.T) {
|
||||
s := newTestServer(t)
|
||||
router := newAuthRouter(s)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/auth/config", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status: got %d, want %d; body: %s", w.Code, http.StatusOK, w.Body.String())
|
||||
}
|
||||
|
||||
var resp map[string]any
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil {
|
||||
t.Fatalf("decoding response: %v", err)
|
||||
}
|
||||
// With nil oidc and nil authConfig, both should be false
|
||||
if resp["oidc_enabled"] != false {
|
||||
t.Errorf("oidc_enabled: got %v, want false", resp["oidc_enabled"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleCreateAndListTokens(t *testing.T) {
|
||||
s := newAuthTestServer(t)
|
||||
router := newAuthRouter(s)
|
||||
|
||||
// Create a user in the DB so token generation can associate
|
||||
userID := ensureTestUser(t, s, "tokenuser")
|
||||
|
||||
// Inject user with the DB-assigned ID
|
||||
u := &auth.User{
|
||||
ID: userID,
|
||||
Username: "tokenuser",
|
||||
DisplayName: "Test tokenuser",
|
||||
Role: auth.RoleAdmin,
|
||||
AuthSource: "local",
|
||||
}
|
||||
|
||||
// Create token
|
||||
body := `{"name":"test-token"}`
|
||||
req := httptest.NewRequest("POST", "/api/auth/tokens", strings.NewReader(body))
|
||||
req = req.WithContext(auth.ContextWithUser(req.Context(), u))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusCreated {
|
||||
t.Fatalf("create token status: got %d, want %d; body: %s", w.Code, http.StatusCreated, w.Body.String())
|
||||
}
|
||||
|
||||
var createResp map[string]any
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &createResp); err != nil {
|
||||
t.Fatalf("decoding create response: %v", err)
|
||||
}
|
||||
if createResp["token"] == nil || createResp["token"] == "" {
|
||||
t.Error("expected token in response")
|
||||
}
|
||||
tokenID, _ := createResp["id"].(string)
|
||||
|
||||
// List tokens
|
||||
req = httptest.NewRequest("GET", "/api/auth/tokens", nil)
|
||||
req = req.WithContext(auth.ContextWithUser(req.Context(), u))
|
||||
w = httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("list tokens status: got %d, want %d; body: %s", w.Code, http.StatusOK, w.Body.String())
|
||||
}
|
||||
|
||||
var tokens []map[string]any
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &tokens); err != nil {
|
||||
t.Fatalf("decoding list response: %v", err)
|
||||
}
|
||||
if len(tokens) != 1 {
|
||||
t.Errorf("expected 1 token, got %d", len(tokens))
|
||||
}
|
||||
|
||||
// Revoke token
|
||||
req = httptest.NewRequest("DELETE", "/api/auth/tokens/"+tokenID, nil)
|
||||
req = req.WithContext(auth.ContextWithUser(req.Context(), u))
|
||||
w = httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusNoContent {
|
||||
t.Errorf("revoke token status: got %d, want %d; body: %s", w.Code, http.StatusNoContent, w.Body.String())
|
||||
}
|
||||
|
||||
// List again — should be empty
|
||||
req = httptest.NewRequest("GET", "/api/auth/tokens", nil)
|
||||
req = req.WithContext(auth.ContextWithUser(req.Context(), u))
|
||||
w = httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
json.Unmarshal(w.Body.Bytes(), &tokens)
|
||||
if len(tokens) != 0 {
|
||||
t.Errorf("expected 0 tokens after revoke, got %d", len(tokens))
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/csv"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@@ -1219,6 +1220,9 @@ func (s *Server) HandleMergeBOM(w http.ResponseWriter, r *http.Request) {
|
||||
"unreferenced": len(diff.Removed),
|
||||
}))
|
||||
|
||||
// Trigger auto-jobs (e.g. assembly validation)
|
||||
go s.triggerJobs(context.Background(), "bom_changed", parent.ID, parent)
|
||||
|
||||
writeJSON(w, http.StatusOK, resp)
|
||||
}
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/kindredsystems/silo/internal/auth"
|
||||
"github.com/kindredsystems/silo/internal/db"
|
||||
"github.com/kindredsystems/silo/internal/modules"
|
||||
"github.com/kindredsystems/silo/internal/schema"
|
||||
"github.com/kindredsystems/silo/internal/testutil"
|
||||
"github.com/rs/zerolog"
|
||||
@@ -35,6 +36,10 @@ func newTestServer(t *testing.T) *Server {
|
||||
nil, // authConfig (nil = dev mode)
|
||||
broker,
|
||||
state,
|
||||
nil, // jobDefs
|
||||
"", // jobDefsDir
|
||||
modules.NewRegistry(), // modules
|
||||
nil, // cfg
|
||||
)
|
||||
}
|
||||
|
||||
@@ -55,12 +60,15 @@ func newTestRouter(s *Server) http.Handler {
|
||||
func createItemDirect(t *testing.T, s *Server, pn, desc string, cost *float64) {
|
||||
t.Helper()
|
||||
item := &db.Item{
|
||||
PartNumber: pn,
|
||||
ItemType: "part",
|
||||
Description: desc,
|
||||
StandardCost: cost,
|
||||
PartNumber: pn,
|
||||
ItemType: "part",
|
||||
Description: desc,
|
||||
}
|
||||
if err := s.items.Create(context.Background(), item, nil); err != nil {
|
||||
var props map[string]any
|
||||
if cost != nil {
|
||||
props = map[string]any{"standard_cost": *cost}
|
||||
}
|
||||
if err := s.items.Create(context.Background(), item, props); err != nil {
|
||||
t.Fatalf("creating item %s: %v", pn, err)
|
||||
}
|
||||
}
|
||||
|
||||
259
internal/api/csv_handlers_test.go
Normal file
259
internal/api/csv_handlers_test.go
Normal file
@@ -0,0 +1,259 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/kindredsystems/silo/internal/modules"
|
||||
"github.com/kindredsystems/silo/internal/schema"
|
||||
"github.com/kindredsystems/silo/internal/testutil"
|
||||
"github.com/rs/zerolog"
|
||||
|
||||
"github.com/kindredsystems/silo/internal/db"
|
||||
)
|
||||
|
||||
// findSchemasDir walks upward to find the project root and returns
|
||||
// the path to the schemas/ directory.
|
||||
func findSchemasDir(t *testing.T) string {
|
||||
t.Helper()
|
||||
dir, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatalf("getting working directory: %v", err)
|
||||
}
|
||||
for {
|
||||
if _, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil {
|
||||
return filepath.Join(dir, "schemas")
|
||||
}
|
||||
parent := filepath.Dir(dir)
|
||||
if parent == dir {
|
||||
t.Fatal("could not find project root")
|
||||
}
|
||||
dir = parent
|
||||
}
|
||||
}
|
||||
|
||||
// newTestServerWithSchemas creates a Server backed by a real test DB with schemas loaded.
|
||||
func newTestServerWithSchemas(t *testing.T) *Server {
|
||||
t.Helper()
|
||||
pool := testutil.MustConnectTestPool(t)
|
||||
database := db.NewFromPool(pool)
|
||||
broker := NewBroker(zerolog.Nop())
|
||||
state := NewServerState(zerolog.Nop(), nil, broker)
|
||||
schemasDir := findSchemasDir(t)
|
||||
schemas, err := schema.LoadAll(schemasDir)
|
||||
if err != nil {
|
||||
t.Fatalf("loading schemas: %v", err)
|
||||
}
|
||||
return NewServer(
|
||||
zerolog.Nop(),
|
||||
database,
|
||||
schemas,
|
||||
schemasDir,
|
||||
nil, // storage
|
||||
nil, // authService
|
||||
nil, // sessionManager
|
||||
nil, // oidcBackend
|
||||
nil, // authConfig
|
||||
broker,
|
||||
state,
|
||||
nil, // jobDefs
|
||||
"", // jobDefsDir
|
||||
modules.NewRegistry(), // modules
|
||||
nil, // cfg
|
||||
)
|
||||
}
|
||||
|
||||
func newCSVRouter(s *Server) http.Handler {
|
||||
r := chi.NewRouter()
|
||||
r.Get("/api/items/export.csv", s.HandleExportCSV)
|
||||
r.Get("/api/items/template.csv", s.HandleCSVTemplate)
|
||||
r.Post("/api/items/import", s.HandleImportCSV)
|
||||
r.Get("/api/items/{partNumber}/bom/export.csv", s.HandleExportBOMCSV)
|
||||
return r
|
||||
}
|
||||
|
||||
func TestHandleExportCSVEmpty(t *testing.T) {
|
||||
s := newTestServer(t)
|
||||
router := newCSVRouter(s)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/items/export.csv", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status: got %d, want %d; body: %s", w.Code, http.StatusOK, w.Body.String())
|
||||
}
|
||||
|
||||
ct := w.Header().Get("Content-Type")
|
||||
if !strings.Contains(ct, "text/csv") {
|
||||
t.Errorf("content-type: got %q, want text/csv", ct)
|
||||
}
|
||||
|
||||
// Should have header row only
|
||||
lines := strings.Split(strings.TrimSpace(w.Body.String()), "\n")
|
||||
if len(lines) != 1 {
|
||||
t.Errorf("expected 1 line (header only), got %d", len(lines))
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleExportCSVWithItems(t *testing.T) {
|
||||
s := newTestServer(t)
|
||||
router := newCSVRouter(s)
|
||||
|
||||
createItemDirect(t, s, "CSV-001", "first csv item", nil)
|
||||
createItemDirect(t, s, "CSV-002", "second csv item", nil)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/items/export.csv", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status: got %d, want %d; body: %s", w.Code, http.StatusOK, w.Body.String())
|
||||
}
|
||||
|
||||
lines := strings.Split(strings.TrimSpace(w.Body.String()), "\n")
|
||||
// header + 2 data rows
|
||||
if len(lines) != 3 {
|
||||
t.Errorf("expected 3 lines (header + 2 rows), got %d", len(lines))
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleCSVTemplate(t *testing.T) {
|
||||
s := newTestServerWithSchemas(t)
|
||||
router := newCSVRouter(s)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/items/template.csv?schema=kindred-rd", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status: got %d, want %d; body: %s", w.Code, http.StatusOK, w.Body.String())
|
||||
}
|
||||
|
||||
ct := w.Header().Get("Content-Type")
|
||||
if !strings.Contains(ct, "text/csv") {
|
||||
t.Errorf("content-type: got %q, want text/csv", ct)
|
||||
}
|
||||
|
||||
// Should contain at least "category" and "description" columns
|
||||
header := strings.Split(strings.TrimSpace(w.Body.String()), "\n")[0]
|
||||
if !strings.Contains(header, "category") {
|
||||
t.Error("template header missing 'category' column")
|
||||
}
|
||||
if !strings.Contains(header, "description") {
|
||||
t.Error("template header missing 'description' column")
|
||||
}
|
||||
}
|
||||
|
||||
// csvMultipartBody creates a multipart form body with a CSV file and optional form fields.
|
||||
func csvMultipartBody(t *testing.T, csvContent string, fields map[string]string) (*bytes.Buffer, string) {
|
||||
t.Helper()
|
||||
body := &bytes.Buffer{}
|
||||
writer := multipart.NewWriter(body)
|
||||
|
||||
part, err := writer.CreateFormFile("file", "import.csv")
|
||||
if err != nil {
|
||||
t.Fatalf("creating form file: %v", err)
|
||||
}
|
||||
io.WriteString(part, csvContent)
|
||||
|
||||
for k, v := range fields {
|
||||
writer.WriteField(k, v)
|
||||
}
|
||||
writer.Close()
|
||||
return body, writer.FormDataContentType()
|
||||
}
|
||||
|
||||
func TestHandleImportCSVDryRun(t *testing.T) {
|
||||
s := newTestServerWithSchemas(t)
|
||||
router := newCSVRouter(s)
|
||||
|
||||
csv := "category,description\nF01,Dry run widget\nF01,Dry run gadget\n"
|
||||
body, contentType := csvMultipartBody(t, csv, map[string]string{"dry_run": "true"})
|
||||
|
||||
req := authRequest(httptest.NewRequest("POST", "/api/items/import", body))
|
||||
req.Header.Set("Content-Type", contentType)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status: got %d, want %d; body: %s", w.Code, http.StatusOK, w.Body.String())
|
||||
}
|
||||
|
||||
var result CSVImportResult
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &result); err != nil {
|
||||
t.Fatalf("decoding response: %v", err)
|
||||
}
|
||||
if result.TotalRows != 2 {
|
||||
t.Errorf("total_rows: got %d, want 2", result.TotalRows)
|
||||
}
|
||||
// Dry run should not create items
|
||||
if len(result.CreatedItems) != 0 {
|
||||
t.Errorf("dry run should not create items, got %d", len(result.CreatedItems))
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleImportCSVCommit(t *testing.T) {
|
||||
s := newTestServerWithSchemas(t)
|
||||
router := newCSVRouter(s)
|
||||
|
||||
csv := "category,description\nF01,Committed widget\n"
|
||||
body, contentType := csvMultipartBody(t, csv, nil)
|
||||
|
||||
req := authRequest(httptest.NewRequest("POST", "/api/items/import", body))
|
||||
req.Header.Set("Content-Type", contentType)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status: got %d, want %d; body: %s", w.Code, http.StatusOK, w.Body.String())
|
||||
}
|
||||
|
||||
var result CSVImportResult
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &result); err != nil {
|
||||
t.Fatalf("decoding response: %v", err)
|
||||
}
|
||||
if result.SuccessCount != 1 {
|
||||
t.Errorf("success_count: got %d, want 1", result.SuccessCount)
|
||||
}
|
||||
if len(result.CreatedItems) != 1 {
|
||||
t.Errorf("created_items: got %d, want 1", len(result.CreatedItems))
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleExportBOMCSV(t *testing.T) {
|
||||
s := newTestServer(t)
|
||||
router := newCSVRouter(s)
|
||||
|
||||
createItemDirect(t, s, "BOMCSV-P", "parent", nil)
|
||||
createItemDirect(t, s, "BOMCSV-C", "child", nil)
|
||||
addBOMDirect(t, s, "BOMCSV-P", "BOMCSV-C", 3)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/items/BOMCSV-P/bom/export.csv", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status: got %d, want %d; body: %s", w.Code, http.StatusOK, w.Body.String())
|
||||
}
|
||||
|
||||
ct := w.Header().Get("Content-Type")
|
||||
if !strings.Contains(ct, "text/csv") {
|
||||
t.Errorf("content-type: got %q, want text/csv", ct)
|
||||
}
|
||||
|
||||
lines := strings.Split(strings.TrimSpace(w.Body.String()), "\n")
|
||||
// header + 1 BOM entry
|
||||
if len(lines) != 2 {
|
||||
t.Errorf("expected 2 lines (header + 1 row), got %d", len(lines))
|
||||
}
|
||||
}
|
||||
271
internal/api/dag_handlers.go
Normal file
271
internal/api/dag_handlers.go
Normal file
@@ -0,0 +1,271 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/kindredsystems/silo/internal/db"
|
||||
)
|
||||
|
||||
// dagSyncRequest is the payload for PUT /api/items/{partNumber}/dag.
|
||||
type dagSyncRequest struct {
|
||||
RevisionNumber int `json:"revision_number"`
|
||||
Nodes []dagSyncNode `json:"nodes"`
|
||||
Edges []dagSyncEdge `json:"edges"`
|
||||
}
|
||||
|
||||
type dagSyncNode struct {
|
||||
NodeKey string `json:"node_key"`
|
||||
NodeType string `json:"node_type"`
|
||||
PropertiesHash *string `json:"properties_hash,omitempty"`
|
||||
ValidationState string `json:"validation_state,omitempty"`
|
||||
Metadata map[string]any `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
type dagSyncEdge struct {
|
||||
SourceKey string `json:"source_key"`
|
||||
TargetKey string `json:"target_key"`
|
||||
EdgeType string `json:"edge_type,omitempty"`
|
||||
Metadata map[string]any `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
// HandleGetDAG returns the feature DAG for an item's current revision.
|
||||
func (s *Server) HandleGetDAG(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
partNumber := chi.URLParam(r, "partNumber")
|
||||
|
||||
item, err := s.items.GetByPartNumber(ctx, partNumber)
|
||||
if err != nil || item == nil {
|
||||
writeError(w, http.StatusNotFound, "not_found", "Item not found")
|
||||
return
|
||||
}
|
||||
|
||||
nodes, err := s.dag.GetNodes(ctx, item.ID, item.CurrentRevision)
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to get DAG nodes")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to get DAG")
|
||||
return
|
||||
}
|
||||
|
||||
edges, err := s.dag.GetEdges(ctx, item.ID, item.CurrentRevision)
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to get DAG edges")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to get DAG edges")
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]any{
|
||||
"item_id": item.ID,
|
||||
"part_number": item.PartNumber,
|
||||
"revision_number": item.CurrentRevision,
|
||||
"nodes": nodes,
|
||||
"edges": edges,
|
||||
})
|
||||
}
|
||||
|
||||
// HandleGetForwardCone returns all downstream dependents of a node.
|
||||
func (s *Server) HandleGetForwardCone(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
partNumber := chi.URLParam(r, "partNumber")
|
||||
nodeKey := chi.URLParam(r, "nodeKey")
|
||||
|
||||
item, err := s.items.GetByPartNumber(ctx, partNumber)
|
||||
if err != nil || item == nil {
|
||||
writeError(w, http.StatusNotFound, "not_found", "Item not found")
|
||||
return
|
||||
}
|
||||
|
||||
node, err := s.dag.GetNodeByKey(ctx, item.ID, item.CurrentRevision, nodeKey)
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to get DAG node")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to get node")
|
||||
return
|
||||
}
|
||||
if node == nil {
|
||||
writeError(w, http.StatusNotFound, "not_found", "Node not found")
|
||||
return
|
||||
}
|
||||
|
||||
cone, err := s.dag.GetForwardCone(ctx, node.ID)
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to get forward cone")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to get forward cone")
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]any{
|
||||
"root_node": node,
|
||||
"cone": cone,
|
||||
})
|
||||
}
|
||||
|
||||
// HandleGetDirtySubgraph returns all non-clean nodes for an item.
|
||||
func (s *Server) HandleGetDirtySubgraph(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
partNumber := chi.URLParam(r, "partNumber")
|
||||
|
||||
item, err := s.items.GetByPartNumber(ctx, partNumber)
|
||||
if err != nil || item == nil {
|
||||
writeError(w, http.StatusNotFound, "not_found", "Item not found")
|
||||
return
|
||||
}
|
||||
|
||||
nodes, err := s.dag.GetDirtySubgraph(ctx, item.ID)
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to get dirty subgraph")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to get dirty subgraph")
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]any{
|
||||
"item_id": item.ID,
|
||||
"nodes": nodes,
|
||||
})
|
||||
}
|
||||
|
||||
// HandleSyncDAG accepts a full feature tree from a client or runner.
|
||||
func (s *Server) HandleSyncDAG(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
partNumber := chi.URLParam(r, "partNumber")
|
||||
|
||||
item, err := s.items.GetByPartNumber(ctx, partNumber)
|
||||
if err != nil || item == nil {
|
||||
writeError(w, http.StatusNotFound, "not_found", "Item not found")
|
||||
return
|
||||
}
|
||||
|
||||
var req dagSyncRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "invalid_body", "Invalid JSON body")
|
||||
return
|
||||
}
|
||||
|
||||
if req.RevisionNumber == 0 {
|
||||
req.RevisionNumber = item.CurrentRevision
|
||||
}
|
||||
|
||||
// Convert request nodes to DB nodes
|
||||
nodes := make([]db.DAGNode, len(req.Nodes))
|
||||
for i, n := range req.Nodes {
|
||||
state := n.ValidationState
|
||||
if state == "" {
|
||||
state = "clean"
|
||||
}
|
||||
nodes[i] = db.DAGNode{
|
||||
NodeKey: n.NodeKey,
|
||||
NodeType: n.NodeType,
|
||||
PropertiesHash: n.PropertiesHash,
|
||||
ValidationState: state,
|
||||
Metadata: n.Metadata,
|
||||
}
|
||||
}
|
||||
|
||||
// Sync nodes first to get IDs
|
||||
if err := s.dag.SyncFeatureTree(ctx, item.ID, req.RevisionNumber, nodes, nil); err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to sync DAG nodes")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to sync DAG")
|
||||
return
|
||||
}
|
||||
|
||||
// Build key→ID map from synced nodes
|
||||
keyToID := make(map[string]string, len(nodes))
|
||||
for _, n := range nodes {
|
||||
keyToID[n.NodeKey] = n.ID
|
||||
}
|
||||
|
||||
// Convert request edges, resolving keys to IDs
|
||||
edges := make([]db.DAGEdge, len(req.Edges))
|
||||
for i, e := range req.Edges {
|
||||
sourceID, ok := keyToID[e.SourceKey]
|
||||
if !ok {
|
||||
writeError(w, http.StatusBadRequest, "invalid_edge",
|
||||
"Unknown source_key: "+e.SourceKey)
|
||||
return
|
||||
}
|
||||
targetID, ok := keyToID[e.TargetKey]
|
||||
if !ok {
|
||||
writeError(w, http.StatusBadRequest, "invalid_edge",
|
||||
"Unknown target_key: "+e.TargetKey)
|
||||
return
|
||||
}
|
||||
edgeType := e.EdgeType
|
||||
if edgeType == "" {
|
||||
edgeType = "depends_on"
|
||||
}
|
||||
edges[i] = db.DAGEdge{
|
||||
SourceNodeID: sourceID,
|
||||
TargetNodeID: targetID,
|
||||
EdgeType: edgeType,
|
||||
Metadata: e.Metadata,
|
||||
}
|
||||
}
|
||||
|
||||
// Sync edges (nodes already synced, so pass empty nodes to skip re-upsert)
|
||||
if len(edges) > 0 {
|
||||
// Delete old edges and insert new ones
|
||||
if err := s.dag.DeleteEdgesForItem(ctx, item.ID, req.RevisionNumber); err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to delete old edges")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to sync DAG edges")
|
||||
return
|
||||
}
|
||||
for i := range edges {
|
||||
if err := s.dag.CreateEdge(ctx, &edges[i]); err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to create edge")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to create edge")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Publish SSE event
|
||||
s.broker.Publish("dag.updated", mustMarshal(map[string]any{
|
||||
"item_id": item.ID,
|
||||
"part_number": item.PartNumber,
|
||||
"revision_number": req.RevisionNumber,
|
||||
"node_count": len(req.Nodes),
|
||||
"edge_count": len(req.Edges),
|
||||
}))
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]any{
|
||||
"synced": true,
|
||||
"node_count": len(req.Nodes),
|
||||
"edge_count": len(req.Edges),
|
||||
})
|
||||
}
|
||||
|
||||
// HandleMarkDirty marks a node and all its downstream dependents as dirty.
|
||||
func (s *Server) HandleMarkDirty(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
partNumber := chi.URLParam(r, "partNumber")
|
||||
nodeKey := chi.URLParam(r, "nodeKey")
|
||||
|
||||
item, err := s.items.GetByPartNumber(ctx, partNumber)
|
||||
if err != nil || item == nil {
|
||||
writeError(w, http.StatusNotFound, "not_found", "Item not found")
|
||||
return
|
||||
}
|
||||
|
||||
node, err := s.dag.GetNodeByKey(ctx, item.ID, item.CurrentRevision, nodeKey)
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to get DAG node")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to get node")
|
||||
return
|
||||
}
|
||||
if node == nil {
|
||||
writeError(w, http.StatusNotFound, "not_found", "Node not found")
|
||||
return
|
||||
}
|
||||
|
||||
affected, err := s.dag.MarkDirty(ctx, node.ID)
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to mark dirty")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to mark dirty")
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]any{
|
||||
"node_key": nodeKey,
|
||||
"nodes_affected": affected,
|
||||
})
|
||||
}
|
||||
249
internal/api/dag_handlers_test.go
Normal file
249
internal/api/dag_handlers_test.go
Normal file
@@ -0,0 +1,249 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/kindredsystems/silo/internal/db"
|
||||
"github.com/kindredsystems/silo/internal/modules"
|
||||
"github.com/kindredsystems/silo/internal/schema"
|
||||
"github.com/kindredsystems/silo/internal/testutil"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
func newDAGTestServer(t *testing.T) *Server {
|
||||
t.Helper()
|
||||
pool := testutil.MustConnectTestPool(t)
|
||||
database := db.NewFromPool(pool)
|
||||
broker := NewBroker(zerolog.Nop())
|
||||
state := NewServerState(zerolog.Nop(), nil, broker)
|
||||
return NewServer(
|
||||
zerolog.Nop(),
|
||||
database,
|
||||
map[string]*schema.Schema{},
|
||||
"",
|
||||
nil, nil, nil, nil, nil,
|
||||
broker, state,
|
||||
nil, "",
|
||||
modules.NewRegistry(), nil,
|
||||
)
|
||||
}
|
||||
|
||||
func newDAGRouter(s *Server) http.Handler {
|
||||
r := chi.NewRouter()
|
||||
r.Route("/api/items/{partNumber}", func(r chi.Router) {
|
||||
r.Get("/dag", s.HandleGetDAG)
|
||||
r.Get("/dag/forward-cone/{nodeKey}", s.HandleGetForwardCone)
|
||||
r.Get("/dag/dirty", s.HandleGetDirtySubgraph)
|
||||
r.Put("/dag", s.HandleSyncDAG)
|
||||
r.Post("/dag/mark-dirty/{nodeKey}", s.HandleMarkDirty)
|
||||
})
|
||||
return r
|
||||
}
|
||||
|
||||
func TestHandleGetDAG_Empty(t *testing.T) {
|
||||
s := newDAGTestServer(t)
|
||||
r := newDAGRouter(s)
|
||||
|
||||
// Create an item
|
||||
item := &db.Item{PartNumber: "DAG-TEST-001", ItemType: "part", Description: "DAG test"}
|
||||
if err := s.items.Create(context.Background(), item, nil); err != nil {
|
||||
t.Fatalf("creating item: %v", err)
|
||||
}
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/items/DAG-TEST-001/dag", nil)
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
var resp map[string]any
|
||||
json.Unmarshal(w.Body.Bytes(), &resp)
|
||||
if resp["part_number"] != "DAG-TEST-001" {
|
||||
t.Errorf("expected part_number DAG-TEST-001, got %v", resp["part_number"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleSyncDAG(t *testing.T) {
|
||||
s := newDAGTestServer(t)
|
||||
r := newDAGRouter(s)
|
||||
|
||||
// Create an item with a revision
|
||||
item := &db.Item{PartNumber: "DAG-SYNC-001", ItemType: "part", Description: "sync test"}
|
||||
if err := s.items.Create(context.Background(), item, nil); err != nil {
|
||||
t.Fatalf("creating item: %v", err)
|
||||
}
|
||||
|
||||
// Sync a feature tree
|
||||
body := `{
|
||||
"nodes": [
|
||||
{"node_key": "Sketch001", "node_type": "sketch"},
|
||||
{"node_key": "Pad001", "node_type": "pad"},
|
||||
{"node_key": "Fillet001", "node_type": "fillet"}
|
||||
],
|
||||
"edges": [
|
||||
{"source_key": "Sketch001", "target_key": "Pad001", "edge_type": "depends_on"},
|
||||
{"source_key": "Pad001", "target_key": "Fillet001", "edge_type": "depends_on"}
|
||||
]
|
||||
}`
|
||||
|
||||
req := httptest.NewRequest("PUT", "/api/items/DAG-SYNC-001/dag", strings.NewReader(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
var resp map[string]any
|
||||
json.Unmarshal(w.Body.Bytes(), &resp)
|
||||
if resp["node_count"] != float64(3) {
|
||||
t.Errorf("expected 3 nodes, got %v", resp["node_count"])
|
||||
}
|
||||
if resp["edge_count"] != float64(2) {
|
||||
t.Errorf("expected 2 edges, got %v", resp["edge_count"])
|
||||
}
|
||||
|
||||
// Verify we can read the DAG back
|
||||
req2 := httptest.NewRequest("GET", "/api/items/DAG-SYNC-001/dag", nil)
|
||||
w2 := httptest.NewRecorder()
|
||||
r.ServeHTTP(w2, req2)
|
||||
|
||||
if w2.Code != http.StatusOK {
|
||||
t.Fatalf("GET dag: expected 200, got %d", w2.Code)
|
||||
}
|
||||
var dagResp map[string]any
|
||||
json.Unmarshal(w2.Body.Bytes(), &dagResp)
|
||||
nodes, ok := dagResp["nodes"].([]any)
|
||||
if !ok || len(nodes) != 3 {
|
||||
t.Errorf("expected 3 nodes in GET, got %v", dagResp["nodes"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleForwardCone(t *testing.T) {
|
||||
s := newDAGTestServer(t)
|
||||
r := newDAGRouter(s)
|
||||
|
||||
item := &db.Item{PartNumber: "DAG-CONE-001", ItemType: "part", Description: "cone test"}
|
||||
if err := s.items.Create(context.Background(), item, nil); err != nil {
|
||||
t.Fatalf("creating item: %v", err)
|
||||
}
|
||||
|
||||
// Sync a linear chain: A -> B -> C
|
||||
body := `{
|
||||
"nodes": [
|
||||
{"node_key": "A", "node_type": "sketch"},
|
||||
{"node_key": "B", "node_type": "pad"},
|
||||
{"node_key": "C", "node_type": "fillet"}
|
||||
],
|
||||
"edges": [
|
||||
{"source_key": "A", "target_key": "B"},
|
||||
{"source_key": "B", "target_key": "C"}
|
||||
]
|
||||
}`
|
||||
req := httptest.NewRequest("PUT", "/api/items/DAG-CONE-001/dag", strings.NewReader(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("sync: %d %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
// Forward cone from A should include B and C
|
||||
req2 := httptest.NewRequest("GET", "/api/items/DAG-CONE-001/dag/forward-cone/A", nil)
|
||||
w2 := httptest.NewRecorder()
|
||||
r.ServeHTTP(w2, req2)
|
||||
|
||||
if w2.Code != http.StatusOK {
|
||||
t.Fatalf("forward-cone: %d %s", w2.Code, w2.Body.String())
|
||||
}
|
||||
|
||||
var resp map[string]any
|
||||
json.Unmarshal(w2.Body.Bytes(), &resp)
|
||||
cone, ok := resp["cone"].([]any)
|
||||
if !ok || len(cone) != 2 {
|
||||
t.Errorf("expected 2 nodes in forward cone, got %v", resp["cone"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleMarkDirty(t *testing.T) {
|
||||
s := newDAGTestServer(t)
|
||||
r := newDAGRouter(s)
|
||||
|
||||
item := &db.Item{PartNumber: "DAG-DIRTY-001", ItemType: "part", Description: "dirty test"}
|
||||
if err := s.items.Create(context.Background(), item, nil); err != nil {
|
||||
t.Fatalf("creating item: %v", err)
|
||||
}
|
||||
|
||||
// Sync: A -> B -> C
|
||||
body := `{
|
||||
"nodes": [
|
||||
{"node_key": "X", "node_type": "sketch"},
|
||||
{"node_key": "Y", "node_type": "pad"},
|
||||
{"node_key": "Z", "node_type": "fillet"}
|
||||
],
|
||||
"edges": [
|
||||
{"source_key": "X", "target_key": "Y"},
|
||||
{"source_key": "Y", "target_key": "Z"}
|
||||
]
|
||||
}`
|
||||
req := httptest.NewRequest("PUT", "/api/items/DAG-DIRTY-001/dag", strings.NewReader(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("sync: %d %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
// Mark X dirty — should propagate to Y and Z
|
||||
req2 := httptest.NewRequest("POST", "/api/items/DAG-DIRTY-001/dag/mark-dirty/X", nil)
|
||||
w2 := httptest.NewRecorder()
|
||||
r.ServeHTTP(w2, req2)
|
||||
|
||||
if w2.Code != http.StatusOK {
|
||||
t.Fatalf("mark-dirty: %d %s", w2.Code, w2.Body.String())
|
||||
}
|
||||
|
||||
var resp map[string]any
|
||||
json.Unmarshal(w2.Body.Bytes(), &resp)
|
||||
affected := resp["nodes_affected"].(float64)
|
||||
if affected != 3 {
|
||||
t.Errorf("expected 3 nodes affected, got %v", affected)
|
||||
}
|
||||
|
||||
// Verify dirty subgraph
|
||||
req3 := httptest.NewRequest("GET", "/api/items/DAG-DIRTY-001/dag/dirty", nil)
|
||||
w3 := httptest.NewRecorder()
|
||||
r.ServeHTTP(w3, req3)
|
||||
|
||||
if w3.Code != http.StatusOK {
|
||||
t.Fatalf("dirty: %d %s", w3.Code, w3.Body.String())
|
||||
}
|
||||
var dirtyResp map[string]any
|
||||
json.Unmarshal(w3.Body.Bytes(), &dirtyResp)
|
||||
dirtyNodes, ok := dirtyResp["nodes"].([]any)
|
||||
if !ok || len(dirtyNodes) != 3 {
|
||||
t.Errorf("expected 3 dirty nodes, got %v", dirtyResp["nodes"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleGetDAG_NotFound(t *testing.T) {
|
||||
s := newDAGTestServer(t)
|
||||
r := newDAGRouter(s)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/items/NONEXISTENT-999/dag", nil)
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusNotFound {
|
||||
t.Errorf("expected 404, got %d", w.Code)
|
||||
}
|
||||
}
|
||||
186
internal/api/file_handlers_test.go
Normal file
186
internal/api/file_handlers_test.go
Normal file
@@ -0,0 +1,186 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/kindredsystems/silo/internal/db"
|
||||
)
|
||||
|
||||
// newFileRouter creates a chi router with file-related routes for testing.
|
||||
func newFileRouter(s *Server) http.Handler {
|
||||
r := chi.NewRouter()
|
||||
r.Route("/api/items/{partNumber}", func(r chi.Router) {
|
||||
r.Get("/files", s.HandleListItemFiles)
|
||||
r.Post("/files", s.HandleAssociateItemFile)
|
||||
r.Delete("/files/{fileId}", s.HandleDeleteItemFile)
|
||||
r.Put("/thumbnail", s.HandleSetItemThumbnail)
|
||||
r.Post("/file", s.HandleUploadFile)
|
||||
r.Get("/file/{revision}", s.HandleDownloadFile)
|
||||
})
|
||||
r.Post("/api/uploads/presign", s.HandlePresignUpload)
|
||||
return r
|
||||
}
|
||||
|
||||
// createFileDirect creates a file record directly via the DB for test setup.
|
||||
func createFileDirect(t *testing.T, s *Server, itemID, filename string) *db.ItemFile {
|
||||
t.Helper()
|
||||
f := &db.ItemFile{
|
||||
ItemID: itemID,
|
||||
Filename: filename,
|
||||
ContentType: "application/octet-stream",
|
||||
Size: 1024,
|
||||
ObjectKey: "items/" + itemID + "/files/" + filename,
|
||||
}
|
||||
if err := s.itemFiles.Create(context.Background(), f); err != nil {
|
||||
t.Fatalf("creating file %s: %v", filename, err)
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
func TestHandleListItemFiles(t *testing.T) {
|
||||
s := newTestServer(t)
|
||||
router := newFileRouter(s)
|
||||
|
||||
createItemDirect(t, s, "FAPI-001", "file list item", nil)
|
||||
item, _ := s.items.GetByPartNumber(context.Background(), "FAPI-001")
|
||||
|
||||
createFileDirect(t, s, item.ID, "drawing.pdf")
|
||||
createFileDirect(t, s, item.ID, "model.step")
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/items/FAPI-001/files", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status: got %d, want %d; body: %s", w.Code, http.StatusOK, w.Body.String())
|
||||
}
|
||||
|
||||
var files []itemFileResponse
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &files); err != nil {
|
||||
t.Fatalf("decoding response: %v", err)
|
||||
}
|
||||
if len(files) != 2 {
|
||||
t.Errorf("expected 2 files, got %d", len(files))
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleListItemFilesNotFound(t *testing.T) {
|
||||
s := newTestServer(t)
|
||||
router := newFileRouter(s)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/items/NONEXISTENT/files", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusNotFound {
|
||||
t.Errorf("status: got %d, want %d", w.Code, http.StatusNotFound)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleDeleteItemFile(t *testing.T) {
|
||||
s := newTestServer(t)
|
||||
router := newFileRouter(s)
|
||||
|
||||
createItemDirect(t, s, "FDEL-API-001", "delete file item", nil)
|
||||
item, _ := s.items.GetByPartNumber(context.Background(), "FDEL-API-001")
|
||||
f := createFileDirect(t, s, item.ID, "removable.bin")
|
||||
|
||||
req := authRequest(httptest.NewRequest("DELETE", "/api/items/FDEL-API-001/files/"+f.ID, nil))
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusNoContent {
|
||||
t.Errorf("status: got %d, want %d; body: %s", w.Code, http.StatusNoContent, w.Body.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleDeleteItemFileCrossItem(t *testing.T) {
|
||||
s := newTestServer(t)
|
||||
router := newFileRouter(s)
|
||||
|
||||
// Create two items, attach file to item A
|
||||
createItemDirect(t, s, "CROSS-A", "item A", nil)
|
||||
createItemDirect(t, s, "CROSS-B", "item B", nil)
|
||||
itemA, _ := s.items.GetByPartNumber(context.Background(), "CROSS-A")
|
||||
f := createFileDirect(t, s, itemA.ID, "belongs-to-a.pdf")
|
||||
|
||||
// Try to delete via item B — should fail
|
||||
req := authRequest(httptest.NewRequest("DELETE", "/api/items/CROSS-B/files/"+f.ID, nil))
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusNotFound {
|
||||
t.Errorf("status: got %d, want %d", w.Code, http.StatusNotFound)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandlePresignUploadNoStorage(t *testing.T) {
|
||||
s := newTestServer(t) // storage is nil
|
||||
router := newFileRouter(s)
|
||||
|
||||
body := `{"filename":"test.bin","content_type":"application/octet-stream","size":1024}`
|
||||
req := authRequest(httptest.NewRequest("POST", "/api/uploads/presign", strings.NewReader(body)))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusServiceUnavailable {
|
||||
t.Errorf("status: got %d, want %d; body: %s", w.Code, http.StatusServiceUnavailable, w.Body.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleUploadFileNoStorage(t *testing.T) {
|
||||
s := newTestServer(t) // storage is nil
|
||||
router := newFileRouter(s)
|
||||
|
||||
createItemDirect(t, s, "UPNS-001", "upload no storage", nil)
|
||||
|
||||
req := authRequest(httptest.NewRequest("POST", "/api/items/UPNS-001/file", strings.NewReader("fake")))
|
||||
req.Header.Set("Content-Type", "multipart/form-data")
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusServiceUnavailable {
|
||||
t.Errorf("status: got %d, want %d; body: %s", w.Code, http.StatusServiceUnavailable, w.Body.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleAssociateFileNoStorage(t *testing.T) {
|
||||
s := newTestServer(t) // storage is nil
|
||||
router := newFileRouter(s)
|
||||
|
||||
createItemDirect(t, s, "ASSNS-001", "associate no storage", nil)
|
||||
|
||||
body := `{"object_key":"uploads/tmp/abc/test.bin","filename":"test.bin"}`
|
||||
req := authRequest(httptest.NewRequest("POST", "/api/items/ASSNS-001/files", strings.NewReader(body)))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusServiceUnavailable {
|
||||
t.Errorf("status: got %d, want %d; body: %s", w.Code, http.StatusServiceUnavailable, w.Body.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleSetThumbnailNoStorage(t *testing.T) {
|
||||
s := newTestServer(t) // storage is nil
|
||||
router := newFileRouter(s)
|
||||
|
||||
createItemDirect(t, s, "THNS-001", "thumbnail no storage", nil)
|
||||
|
||||
body := `{"object_key":"uploads/tmp/abc/thumb.png"}`
|
||||
req := authRequest(httptest.NewRequest("PUT", "/api/items/THNS-001/thumbnail", strings.NewReader(body)))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusServiceUnavailable {
|
||||
t.Errorf("status: got %d, want %d; body: %s", w.Code, http.StatusServiceUnavailable, w.Body.String())
|
||||
}
|
||||
}
|
||||
@@ -18,6 +18,8 @@ import (
|
||||
"github.com/kindredsystems/silo/internal/auth"
|
||||
"github.com/kindredsystems/silo/internal/config"
|
||||
"github.com/kindredsystems/silo/internal/db"
|
||||
"github.com/kindredsystems/silo/internal/jobdef"
|
||||
"github.com/kindredsystems/silo/internal/modules"
|
||||
"github.com/kindredsystems/silo/internal/partnum"
|
||||
"github.com/kindredsystems/silo/internal/schema"
|
||||
"github.com/kindredsystems/silo/internal/storage"
|
||||
@@ -43,6 +45,13 @@ type Server struct {
|
||||
itemFiles *db.ItemFileRepository
|
||||
broker *Broker
|
||||
serverState *ServerState
|
||||
dag *db.DAGRepository
|
||||
jobs *db.JobRepository
|
||||
jobDefs map[string]*jobdef.Definition
|
||||
jobDefsDir string
|
||||
modules *modules.Registry
|
||||
cfg *config.Config
|
||||
settings *db.SettingsRepository
|
||||
}
|
||||
|
||||
// NewServer creates a new API server.
|
||||
@@ -58,11 +67,18 @@ func NewServer(
|
||||
authCfg *config.AuthConfig,
|
||||
broker *Broker,
|
||||
state *ServerState,
|
||||
jobDefs map[string]*jobdef.Definition,
|
||||
jobDefsDir string,
|
||||
registry *modules.Registry,
|
||||
cfg *config.Config,
|
||||
) *Server {
|
||||
items := db.NewItemRepository(database)
|
||||
projects := db.NewProjectRepository(database)
|
||||
relationships := db.NewRelationshipRepository(database)
|
||||
itemFiles := db.NewItemFileRepository(database)
|
||||
dag := db.NewDAGRepository(database)
|
||||
jobs := db.NewJobRepository(database)
|
||||
settings := db.NewSettingsRepository(database)
|
||||
seqStore := &dbSequenceStore{db: database, schemas: schemas}
|
||||
partgen := partnum.NewGenerator(schemas, seqStore)
|
||||
|
||||
@@ -83,6 +99,13 @@ func NewServer(
|
||||
itemFiles: itemFiles,
|
||||
broker: broker,
|
||||
serverState: state,
|
||||
dag: dag,
|
||||
jobs: jobs,
|
||||
jobDefs: jobDefs,
|
||||
jobDefsDir: jobDefsDir,
|
||||
modules: registry,
|
||||
cfg: cfg,
|
||||
settings: settings,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -153,6 +176,54 @@ func (s *Server) HandleReady(w http.ResponseWriter, r *http.Request) {
|
||||
})
|
||||
}
|
||||
|
||||
// HandleGetModules returns the public module discovery response.
|
||||
// No authentication required — clients call this pre-login.
|
||||
func (s *Server) HandleGetModules(w http.ResponseWriter, r *http.Request) {
|
||||
mods := make(map[string]any, 10)
|
||||
for _, m := range s.modules.All() {
|
||||
entry := map[string]any{
|
||||
"enabled": s.modules.IsEnabled(m.ID),
|
||||
"required": m.Required,
|
||||
"name": m.Name,
|
||||
}
|
||||
if m.Version != "" {
|
||||
entry["version"] = m.Version
|
||||
}
|
||||
if len(m.DependsOn) > 0 {
|
||||
entry["depends_on"] = m.DependsOn
|
||||
}
|
||||
|
||||
// Public config (non-secret) for specific modules.
|
||||
switch m.ID {
|
||||
case "auth":
|
||||
if s.cfg != nil {
|
||||
entry["config"] = map[string]any{
|
||||
"local_enabled": s.cfg.Auth.Local.Enabled,
|
||||
"ldap_enabled": s.cfg.Auth.LDAP.Enabled,
|
||||
"oidc_enabled": s.cfg.Auth.OIDC.Enabled,
|
||||
"oidc_issuer_url": s.cfg.Auth.OIDC.IssuerURL,
|
||||
}
|
||||
}
|
||||
case "freecad":
|
||||
if s.cfg != nil {
|
||||
entry["config"] = map[string]any{
|
||||
"uri_scheme": s.cfg.FreeCAD.URIScheme,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mods[m.ID] = entry
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]any{
|
||||
"modules": mods,
|
||||
"server": map[string]any{
|
||||
"version": "0.2",
|
||||
"read_only": s.serverState.IsReadOnly(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Schema handlers
|
||||
|
||||
// SchemaResponse represents a schema in API responses.
|
||||
@@ -621,6 +692,12 @@ func (s *Server) HandleCreateItem(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.partgen.Validate(partNumber, schemaName); err != nil {
|
||||
s.logger.Error().Err(err).Str("part_number", partNumber).Msg("generated part number failed validation")
|
||||
writeError(w, http.StatusInternalServerError, "validation_failed", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
item = &db.Item{
|
||||
PartNumber: partNumber,
|
||||
ItemType: itemType,
|
||||
@@ -1470,6 +1547,9 @@ func (s *Server) HandleCreateRevision(w http.ResponseWriter, r *http.Request) {
|
||||
"part_number": partNumber,
|
||||
"revision_number": rev.RevisionNumber,
|
||||
}))
|
||||
|
||||
// Trigger auto-jobs (e.g. validation, export)
|
||||
go s.triggerJobs(context.Background(), "revision_created", item.ID, item)
|
||||
}
|
||||
|
||||
// HandleUploadFile uploads a file and creates a new revision.
|
||||
|
||||
378
internal/api/job_handlers.go
Normal file
378
internal/api/job_handlers.go
Normal file
@@ -0,0 +1,378 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/kindredsystems/silo/internal/auth"
|
||||
"github.com/kindredsystems/silo/internal/db"
|
||||
)
|
||||
|
||||
// HandleListJobs returns jobs filtered by status and/or item.
|
||||
func (s *Server) HandleListJobs(w http.ResponseWriter, r *http.Request) {
|
||||
status := r.URL.Query().Get("status")
|
||||
itemID := r.URL.Query().Get("item_id")
|
||||
|
||||
limit := 50
|
||||
if v := r.URL.Query().Get("limit"); v != "" {
|
||||
if n, err := strconv.Atoi(v); err == nil && n > 0 && n <= 200 {
|
||||
limit = n
|
||||
}
|
||||
}
|
||||
offset := 0
|
||||
if v := r.URL.Query().Get("offset"); v != "" {
|
||||
if n, err := strconv.Atoi(v); err == nil && n >= 0 {
|
||||
offset = n
|
||||
}
|
||||
}
|
||||
|
||||
jobs, err := s.jobs.ListJobs(r.Context(), status, itemID, limit, offset)
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to list jobs")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to list jobs")
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, jobs)
|
||||
}
|
||||
|
||||
// HandleGetJob returns a single job by ID.
|
||||
func (s *Server) HandleGetJob(w http.ResponseWriter, r *http.Request) {
|
||||
jobID := chi.URLParam(r, "jobID")
|
||||
|
||||
job, err := s.jobs.GetJob(r.Context(), jobID)
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to get job")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to get job")
|
||||
return
|
||||
}
|
||||
if job == nil {
|
||||
writeError(w, http.StatusNotFound, "not_found", "Job not found")
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, job)
|
||||
}
|
||||
|
||||
// HandleGetJobLogs returns log entries for a job.
|
||||
func (s *Server) HandleGetJobLogs(w http.ResponseWriter, r *http.Request) {
|
||||
jobID := chi.URLParam(r, "jobID")
|
||||
|
||||
logs, err := s.jobs.GetJobLogs(r.Context(), jobID)
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to get job logs")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to get job logs")
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, logs)
|
||||
}
|
||||
|
||||
// HandleCreateJob manually triggers a job.
|
||||
func (s *Server) HandleCreateJob(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
user := auth.UserFromContext(ctx)
|
||||
|
||||
var req struct {
|
||||
DefinitionName string `json:"definition_name"`
|
||||
ItemID *string `json:"item_id,omitempty"`
|
||||
ProjectID *string `json:"project_id,omitempty"`
|
||||
ScopeMetadata map[string]any `json:"scope_metadata,omitempty"`
|
||||
}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "invalid_body", "Invalid JSON body")
|
||||
return
|
||||
}
|
||||
|
||||
if req.DefinitionName == "" {
|
||||
writeError(w, http.StatusBadRequest, "missing_field", "definition_name is required")
|
||||
return
|
||||
}
|
||||
|
||||
// Look up definition
|
||||
def, err := s.jobs.GetDefinition(ctx, req.DefinitionName)
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to look up job definition")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to look up definition")
|
||||
return
|
||||
}
|
||||
if def == nil {
|
||||
writeError(w, http.StatusNotFound, "not_found", "Job definition not found: "+req.DefinitionName)
|
||||
return
|
||||
}
|
||||
|
||||
var createdBy *string
|
||||
if user != nil {
|
||||
createdBy = &user.Username
|
||||
}
|
||||
|
||||
job := &db.Job{
|
||||
JobDefinitionID: &def.ID,
|
||||
DefinitionName: def.Name,
|
||||
Priority: def.Priority,
|
||||
ItemID: req.ItemID,
|
||||
ProjectID: req.ProjectID,
|
||||
ScopeMetadata: req.ScopeMetadata,
|
||||
RunnerTags: def.RunnerTags,
|
||||
TimeoutSeconds: def.TimeoutSeconds,
|
||||
MaxRetries: def.MaxRetries,
|
||||
CreatedBy: createdBy,
|
||||
}
|
||||
|
||||
if err := s.jobs.CreateJob(ctx, job); err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to create job")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to create job")
|
||||
return
|
||||
}
|
||||
|
||||
s.broker.Publish("job.created", mustMarshal(map[string]any{
|
||||
"job_id": job.ID,
|
||||
"definition_name": job.DefinitionName,
|
||||
"item_id": job.ItemID,
|
||||
}))
|
||||
|
||||
writeJSON(w, http.StatusCreated, job)
|
||||
}
|
||||
|
||||
// HandleCancelJob cancels a pending or active job.
|
||||
func (s *Server) HandleCancelJob(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
jobID := chi.URLParam(r, "jobID")
|
||||
user := auth.UserFromContext(ctx)
|
||||
|
||||
cancelledBy := "system"
|
||||
if user != nil {
|
||||
cancelledBy = user.Username
|
||||
}
|
||||
|
||||
if err := s.jobs.CancelJob(ctx, jobID, cancelledBy); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "cancel_failed", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
s.broker.Publish("job.cancelled", mustMarshal(map[string]any{
|
||||
"job_id": jobID,
|
||||
"cancelled_by": cancelledBy,
|
||||
}))
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]string{"status": "cancelled"})
|
||||
}
|
||||
|
||||
// HandleListJobDefinitions returns all loaded job definitions.
|
||||
func (s *Server) HandleListJobDefinitions(w http.ResponseWriter, r *http.Request) {
|
||||
defs, err := s.jobs.ListDefinitions(r.Context())
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to list job definitions")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to list definitions")
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, defs)
|
||||
}
|
||||
|
||||
// HandleGetJobDefinition returns a single job definition by name.
|
||||
func (s *Server) HandleGetJobDefinition(w http.ResponseWriter, r *http.Request) {
|
||||
name := chi.URLParam(r, "name")
|
||||
|
||||
def, err := s.jobs.GetDefinition(r.Context(), name)
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to get job definition")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to get definition")
|
||||
return
|
||||
}
|
||||
if def == nil {
|
||||
writeError(w, http.StatusNotFound, "not_found", "Job definition not found")
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, def)
|
||||
}
|
||||
|
||||
// HandleReloadJobDefinitions re-reads YAML files from disk and upserts them.
|
||||
func (s *Server) HandleReloadJobDefinitions(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
if s.jobDefsDir == "" {
|
||||
writeError(w, http.StatusBadRequest, "no_directory", "Job definitions directory not configured")
|
||||
return
|
||||
}
|
||||
|
||||
defs, err := loadAndUpsertJobDefs(ctx, s.jobDefsDir, s.jobs)
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to reload job definitions")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to reload definitions")
|
||||
return
|
||||
}
|
||||
|
||||
// Update in-memory map
|
||||
s.jobDefs = defs
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]any{
|
||||
"reloaded": len(defs),
|
||||
})
|
||||
}
|
||||
|
||||
// HandleListRunners returns all registered runners (admin).
|
||||
func (s *Server) HandleListRunners(w http.ResponseWriter, r *http.Request) {
|
||||
runners, err := s.jobs.ListRunners(r.Context())
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to list runners")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to list runners")
|
||||
return
|
||||
}
|
||||
|
||||
// Redact token hashes from response
|
||||
type runnerResponse struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
TokenPrefix string `json:"token_prefix"`
|
||||
Tags []string `json:"tags"`
|
||||
Status string `json:"status"`
|
||||
LastHeartbeat *string `json:"last_heartbeat,omitempty"`
|
||||
LastJobID *string `json:"last_job_id,omitempty"`
|
||||
Metadata map[string]any `json:"metadata,omitempty"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
}
|
||||
|
||||
resp := make([]runnerResponse, len(runners))
|
||||
for i, runner := range runners {
|
||||
var hb *string
|
||||
if runner.LastHeartbeat != nil {
|
||||
s := runner.LastHeartbeat.Format("2006-01-02T15:04:05Z07:00")
|
||||
hb = &s
|
||||
}
|
||||
resp[i] = runnerResponse{
|
||||
ID: runner.ID,
|
||||
Name: runner.Name,
|
||||
TokenPrefix: runner.TokenPrefix,
|
||||
Tags: runner.Tags,
|
||||
Status: runner.Status,
|
||||
LastHeartbeat: hb,
|
||||
LastJobID: runner.LastJobID,
|
||||
Metadata: runner.Metadata,
|
||||
CreatedAt: runner.CreatedAt.Format("2006-01-02T15:04:05Z07:00"),
|
||||
}
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, resp)
|
||||
}
|
||||
|
||||
// HandleRegisterRunner creates a new runner and returns the token (admin).
|
||||
func (s *Server) HandleRegisterRunner(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
var req struct {
|
||||
Name string `json:"name"`
|
||||
Tags []string `json:"tags"`
|
||||
Metadata map[string]any `json:"metadata,omitempty"`
|
||||
}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "invalid_body", "Invalid JSON body")
|
||||
return
|
||||
}
|
||||
if req.Name == "" {
|
||||
writeError(w, http.StatusBadRequest, "missing_field", "name is required")
|
||||
return
|
||||
}
|
||||
if len(req.Tags) == 0 {
|
||||
writeError(w, http.StatusBadRequest, "missing_field", "tags is required (at least one)")
|
||||
return
|
||||
}
|
||||
|
||||
rawToken, tokenHash, tokenPrefix := generateRunnerToken()
|
||||
|
||||
runner := &db.Runner{
|
||||
Name: req.Name,
|
||||
TokenHash: tokenHash,
|
||||
TokenPrefix: tokenPrefix,
|
||||
Tags: req.Tags,
|
||||
Metadata: req.Metadata,
|
||||
}
|
||||
|
||||
if err := s.jobs.RegisterRunner(ctx, runner); err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to register runner")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to register runner")
|
||||
return
|
||||
}
|
||||
|
||||
s.broker.Publish("runner.online", mustMarshal(map[string]any{
|
||||
"runner_id": runner.ID,
|
||||
"name": runner.Name,
|
||||
}))
|
||||
|
||||
writeJSON(w, http.StatusCreated, map[string]any{
|
||||
"id": runner.ID,
|
||||
"name": runner.Name,
|
||||
"token": rawToken,
|
||||
"tags": runner.Tags,
|
||||
})
|
||||
}
|
||||
|
||||
// HandleDeleteRunner removes a runner (admin).
|
||||
func (s *Server) HandleDeleteRunner(w http.ResponseWriter, r *http.Request) {
|
||||
runnerID := chi.URLParam(r, "runnerID")
|
||||
|
||||
if err := s.jobs.DeleteRunner(r.Context(), runnerID); err != nil {
|
||||
writeError(w, http.StatusNotFound, "not_found", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
// triggerJobs creates jobs for all enabled definitions matching the trigger type.
|
||||
// It applies trigger filters (e.g. item_type) before creating each job.
|
||||
func (s *Server) triggerJobs(ctx context.Context, triggerType string, itemID string, item *db.Item) {
|
||||
defs, err := s.jobs.GetDefinitionsByTrigger(ctx, triggerType)
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Str("trigger", triggerType).Msg("failed to get job definitions for trigger")
|
||||
return
|
||||
}
|
||||
|
||||
for _, def := range defs {
|
||||
// Apply trigger filter (e.g. item_type == "assembly")
|
||||
if def.Definition != nil {
|
||||
if triggerCfg, ok := def.Definition["trigger"].(map[string]any); ok {
|
||||
if filterCfg, ok := triggerCfg["filter"].(map[string]any); ok {
|
||||
if reqType, ok := filterCfg["item_type"].(string); ok && item != nil {
|
||||
if item.ItemType != reqType {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
job := &db.Job{
|
||||
JobDefinitionID: &def.ID,
|
||||
DefinitionName: def.Name,
|
||||
Priority: def.Priority,
|
||||
ItemID: &itemID,
|
||||
RunnerTags: def.RunnerTags,
|
||||
TimeoutSeconds: def.TimeoutSeconds,
|
||||
MaxRetries: def.MaxRetries,
|
||||
}
|
||||
|
||||
if err := s.jobs.CreateJob(ctx, job); err != nil {
|
||||
s.logger.Error().Err(err).Str("definition", def.Name).Msg("failed to create triggered job")
|
||||
continue
|
||||
}
|
||||
|
||||
s.broker.Publish("job.created", mustMarshal(map[string]any{
|
||||
"job_id": job.ID,
|
||||
"definition_name": def.Name,
|
||||
"trigger": triggerType,
|
||||
"item_id": itemID,
|
||||
}))
|
||||
|
||||
s.logger.Info().
|
||||
Str("job_id", job.ID).
|
||||
Str("definition", def.Name).
|
||||
Str("trigger", triggerType).
|
||||
Str("item_id", itemID).
|
||||
Msg("triggered job")
|
||||
}
|
||||
}
|
||||
340
internal/api/job_handlers_test.go
Normal file
340
internal/api/job_handlers_test.go
Normal file
@@ -0,0 +1,340 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/kindredsystems/silo/internal/db"
|
||||
"github.com/kindredsystems/silo/internal/modules"
|
||||
"github.com/kindredsystems/silo/internal/schema"
|
||||
"github.com/kindredsystems/silo/internal/testutil"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
func newJobTestServer(t *testing.T) *Server {
|
||||
t.Helper()
|
||||
pool := testutil.MustConnectTestPool(t)
|
||||
database := db.NewFromPool(pool)
|
||||
broker := NewBroker(zerolog.Nop())
|
||||
state := NewServerState(zerolog.Nop(), nil, broker)
|
||||
return NewServer(
|
||||
zerolog.Nop(),
|
||||
database,
|
||||
map[string]*schema.Schema{},
|
||||
"",
|
||||
nil, nil, nil, nil, nil,
|
||||
broker, state,
|
||||
nil, "",
|
||||
modules.NewRegistry(), nil,
|
||||
)
|
||||
}
|
||||
|
||||
func newJobRouter(s *Server) http.Handler {
|
||||
r := chi.NewRouter()
|
||||
r.Route("/api/jobs", func(r chi.Router) {
|
||||
r.Get("/", s.HandleListJobs)
|
||||
r.Get("/{jobID}", s.HandleGetJob)
|
||||
r.Get("/{jobID}/logs", s.HandleGetJobLogs)
|
||||
r.Post("/", s.HandleCreateJob)
|
||||
r.Post("/{jobID}/cancel", s.HandleCancelJob)
|
||||
})
|
||||
r.Route("/api/job-definitions", func(r chi.Router) {
|
||||
r.Get("/", s.HandleListJobDefinitions)
|
||||
r.Get("/{name}", s.HandleGetJobDefinition)
|
||||
})
|
||||
r.Route("/api/runners", func(r chi.Router) {
|
||||
r.Get("/", s.HandleListRunners)
|
||||
r.Post("/", s.HandleRegisterRunner)
|
||||
r.Delete("/{runnerID}", s.HandleDeleteRunner)
|
||||
})
|
||||
return r
|
||||
}
|
||||
|
||||
func seedJobDefinition(t *testing.T, s *Server) *db.JobDefinitionRecord {
|
||||
t.Helper()
|
||||
rec := &db.JobDefinitionRecord{
|
||||
Name: "test-validate",
|
||||
Version: 1,
|
||||
TriggerType: "manual",
|
||||
ScopeType: "item",
|
||||
ComputeType: "validate",
|
||||
RunnerTags: []string{"create"},
|
||||
TimeoutSeconds: 300,
|
||||
MaxRetries: 1,
|
||||
Priority: 100,
|
||||
Definition: map[string]any{"compute": map[string]any{"command": "create-validate"}},
|
||||
Enabled: true,
|
||||
}
|
||||
if err := s.jobs.UpsertDefinition(context.Background(), rec); err != nil {
|
||||
t.Fatalf("seeding job definition: %v", err)
|
||||
}
|
||||
return rec
|
||||
}
|
||||
|
||||
func TestHandleListJobDefinitions(t *testing.T) {
|
||||
s := newJobTestServer(t)
|
||||
r := newJobRouter(s)
|
||||
|
||||
seedJobDefinition(t, s)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/job-definitions", nil)
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
var defs []map[string]any
|
||||
json.Unmarshal(w.Body.Bytes(), &defs)
|
||||
if len(defs) == 0 {
|
||||
t.Error("expected at least one definition")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleGetJobDefinition(t *testing.T) {
|
||||
s := newJobTestServer(t)
|
||||
r := newJobRouter(s)
|
||||
|
||||
seedJobDefinition(t, s)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/job-definitions/test-validate", nil)
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
var def map[string]any
|
||||
json.Unmarshal(w.Body.Bytes(), &def)
|
||||
if def["name"] != "test-validate" {
|
||||
t.Errorf("expected name test-validate, got %v", def["name"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleCreateAndGetJob(t *testing.T) {
|
||||
s := newJobTestServer(t)
|
||||
r := newJobRouter(s)
|
||||
|
||||
seedJobDefinition(t, s)
|
||||
|
||||
// Create a job
|
||||
body := `{"definition_name": "test-validate"}`
|
||||
req := httptest.NewRequest("POST", "/api/jobs", strings.NewReader(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusCreated {
|
||||
t.Fatalf("create: expected 201, got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
var job map[string]any
|
||||
json.Unmarshal(w.Body.Bytes(), &job)
|
||||
jobID := job["ID"].(string)
|
||||
if jobID == "" {
|
||||
t.Fatal("job ID is empty")
|
||||
}
|
||||
|
||||
// Get the job
|
||||
req2 := httptest.NewRequest("GET", "/api/jobs/"+jobID, nil)
|
||||
w2 := httptest.NewRecorder()
|
||||
r.ServeHTTP(w2, req2)
|
||||
|
||||
if w2.Code != http.StatusOK {
|
||||
t.Fatalf("get: expected 200, got %d: %s", w2.Code, w2.Body.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleCancelJob(t *testing.T) {
|
||||
s := newJobTestServer(t)
|
||||
r := newJobRouter(s)
|
||||
|
||||
seedJobDefinition(t, s)
|
||||
|
||||
// Create a job
|
||||
body := `{"definition_name": "test-validate"}`
|
||||
req := httptest.NewRequest("POST", "/api/jobs", strings.NewReader(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
var job map[string]any
|
||||
json.Unmarshal(w.Body.Bytes(), &job)
|
||||
jobID := job["ID"].(string)
|
||||
|
||||
// Cancel the job
|
||||
req2 := httptest.NewRequest("POST", "/api/jobs/"+jobID+"/cancel", nil)
|
||||
w2 := httptest.NewRecorder()
|
||||
r.ServeHTTP(w2, req2)
|
||||
|
||||
if w2.Code != http.StatusOK {
|
||||
t.Fatalf("cancel: expected 200, got %d: %s", w2.Code, w2.Body.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleListJobs(t *testing.T) {
|
||||
s := newJobTestServer(t)
|
||||
r := newJobRouter(s)
|
||||
|
||||
seedJobDefinition(t, s)
|
||||
|
||||
// Create a job
|
||||
body := `{"definition_name": "test-validate"}`
|
||||
req := httptest.NewRequest("POST", "/api/jobs", strings.NewReader(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
// List jobs
|
||||
req2 := httptest.NewRequest("GET", "/api/jobs", nil)
|
||||
w2 := httptest.NewRecorder()
|
||||
r.ServeHTTP(w2, req2)
|
||||
|
||||
if w2.Code != http.StatusOK {
|
||||
t.Fatalf("list: expected 200, got %d: %s", w2.Code, w2.Body.String())
|
||||
}
|
||||
|
||||
var jobs []map[string]any
|
||||
json.Unmarshal(w2.Body.Bytes(), &jobs)
|
||||
if len(jobs) == 0 {
|
||||
t.Error("expected at least one job")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleListJobs_FilterByStatus(t *testing.T) {
|
||||
s := newJobTestServer(t)
|
||||
r := newJobRouter(s)
|
||||
|
||||
seedJobDefinition(t, s)
|
||||
|
||||
// Create a job
|
||||
body := `{"definition_name": "test-validate"}`
|
||||
req := httptest.NewRequest("POST", "/api/jobs", strings.NewReader(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
// Filter by pending
|
||||
req2 := httptest.NewRequest("GET", "/api/jobs?status=pending", nil)
|
||||
w2 := httptest.NewRecorder()
|
||||
r.ServeHTTP(w2, req2)
|
||||
|
||||
if w2.Code != http.StatusOK {
|
||||
t.Fatalf("expected 200, got %d", w2.Code)
|
||||
}
|
||||
|
||||
var jobs []map[string]any
|
||||
json.Unmarshal(w2.Body.Bytes(), &jobs)
|
||||
if len(jobs) == 0 {
|
||||
t.Error("expected pending jobs")
|
||||
}
|
||||
|
||||
// Filter by completed (should be empty)
|
||||
req3 := httptest.NewRequest("GET", "/api/jobs?status=completed", nil)
|
||||
w3 := httptest.NewRecorder()
|
||||
r.ServeHTTP(w3, req3)
|
||||
|
||||
var completedJobs []map[string]any
|
||||
json.Unmarshal(w3.Body.Bytes(), &completedJobs)
|
||||
if len(completedJobs) != 0 {
|
||||
t.Errorf("expected no completed jobs, got %d", len(completedJobs))
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleRegisterAndListRunners(t *testing.T) {
|
||||
s := newJobTestServer(t)
|
||||
r := newJobRouter(s)
|
||||
|
||||
// Register a runner
|
||||
body := `{"name": "test-runner-1", "tags": ["create", "linux"]}`
|
||||
req := httptest.NewRequest("POST", "/api/runners", strings.NewReader(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusCreated {
|
||||
t.Fatalf("register: expected 201, got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
var resp map[string]any
|
||||
json.Unmarshal(w.Body.Bytes(), &resp)
|
||||
if resp["token"] == nil || resp["token"] == "" {
|
||||
t.Error("expected a token in response")
|
||||
}
|
||||
if !strings.HasPrefix(resp["token"].(string), "silo_runner_") {
|
||||
t.Errorf("expected token to start with silo_runner_, got %s", resp["token"])
|
||||
}
|
||||
|
||||
// List runners
|
||||
req2 := httptest.NewRequest("GET", "/api/runners", nil)
|
||||
w2 := httptest.NewRecorder()
|
||||
r.ServeHTTP(w2, req2)
|
||||
|
||||
if w2.Code != http.StatusOK {
|
||||
t.Fatalf("list: expected 200, got %d", w2.Code)
|
||||
}
|
||||
|
||||
var runners []map[string]any
|
||||
json.Unmarshal(w2.Body.Bytes(), &runners)
|
||||
if len(runners) == 0 {
|
||||
t.Error("expected at least one runner")
|
||||
}
|
||||
// Token hash should not be exposed
|
||||
for _, runner := range runners {
|
||||
if runner["token_hash"] != nil {
|
||||
t.Error("token_hash should not be in response")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleDeleteRunner(t *testing.T) {
|
||||
s := newJobTestServer(t)
|
||||
r := newJobRouter(s)
|
||||
|
||||
// Register a runner
|
||||
body := `{"name": "test-runner-delete", "tags": ["create"]}`
|
||||
req := httptest.NewRequest("POST", "/api/runners", strings.NewReader(body))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
var resp map[string]any
|
||||
json.Unmarshal(w.Body.Bytes(), &resp)
|
||||
runnerID := resp["id"].(string)
|
||||
|
||||
// Delete the runner
|
||||
req2 := httptest.NewRequest("DELETE", "/api/runners/"+runnerID, nil)
|
||||
w2 := httptest.NewRecorder()
|
||||
r.ServeHTTP(w2, req2)
|
||||
|
||||
if w2.Code != http.StatusNoContent {
|
||||
t.Fatalf("delete: expected 204, got %d: %s", w2.Code, w2.Body.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateRunnerToken(t *testing.T) {
|
||||
raw, hash, prefix := generateRunnerToken()
|
||||
|
||||
if !strings.HasPrefix(raw, "silo_runner_") {
|
||||
t.Errorf("raw token should start with silo_runner_, got %s", raw[:20])
|
||||
}
|
||||
if len(hash) != 64 {
|
||||
t.Errorf("hash should be 64 hex chars, got %d", len(hash))
|
||||
}
|
||||
if len(prefix) != 20 {
|
||||
t.Errorf("prefix should be 20 chars, got %d: %s", len(prefix), prefix)
|
||||
}
|
||||
|
||||
// Two tokens should be different
|
||||
raw2, _, _ := generateRunnerToken()
|
||||
if raw == raw2 {
|
||||
t.Error("two generated tokens should be different")
|
||||
}
|
||||
}
|
||||
@@ -2,6 +2,8 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -148,6 +150,55 @@ func (s *Server) RequireWritable(next http.Handler) http.Handler {
|
||||
})
|
||||
}
|
||||
|
||||
// RequireRunnerAuth extracts and validates a runner token from the
|
||||
// Authorization header. On success, injects RunnerIdentity into context
|
||||
// and updates the runner's heartbeat.
|
||||
func (s *Server) RequireRunnerAuth(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
token := extractBearerToken(r)
|
||||
if token == "" || !strings.HasPrefix(token, "silo_runner_") {
|
||||
writeError(w, http.StatusUnauthorized, "unauthorized", "Runner token required")
|
||||
return
|
||||
}
|
||||
|
||||
hash := sha256.Sum256([]byte(token))
|
||||
tokenHash := hex.EncodeToString(hash[:])
|
||||
|
||||
runner, err := s.jobs.GetRunnerByToken(r.Context(), tokenHash)
|
||||
if err != nil || runner == nil {
|
||||
writeError(w, http.StatusUnauthorized, "unauthorized", "Invalid runner token")
|
||||
return
|
||||
}
|
||||
|
||||
// Update heartbeat on every authenticated request
|
||||
_ = s.jobs.Heartbeat(r.Context(), runner.ID)
|
||||
|
||||
identity := &auth.RunnerIdentity{
|
||||
ID: runner.ID,
|
||||
Name: runner.Name,
|
||||
Tags: runner.Tags,
|
||||
}
|
||||
ctx := auth.ContextWithRunner(r.Context(), identity)
|
||||
next.ServeHTTP(w, r.WithContext(ctx))
|
||||
})
|
||||
}
|
||||
|
||||
// RequireModule returns middleware that rejects requests with 404 when
|
||||
// the named module is not enabled.
|
||||
func (s *Server) RequireModule(id string) func(http.Handler) http.Handler {
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.modules.IsEnabled(id) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
w.Write([]byte(`{"error":"module '` + id + `' is not enabled"}`))
|
||||
return
|
||||
}
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func extractBearerToken(r *http.Request) string {
|
||||
h := r.Header.Get("Authorization")
|
||||
if strings.HasPrefix(h, "Bearer ") {
|
||||
|
||||
90
internal/api/ods_handlers_test.go
Normal file
90
internal/api/ods_handlers_test.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/kindredsystems/silo/internal/db"
|
||||
)
|
||||
|
||||
func newODSRouter(s *Server) http.Handler {
|
||||
r := chi.NewRouter()
|
||||
r.Get("/api/items/export.ods", s.HandleExportODS)
|
||||
r.Get("/api/items/template.ods", s.HandleODSTemplate)
|
||||
r.Post("/api/items/import.ods", s.HandleImportODS)
|
||||
r.Get("/api/projects/{code}/sheet.ods", s.HandleProjectSheetODS)
|
||||
return r
|
||||
}
|
||||
|
||||
func TestHandleExportODS(t *testing.T) {
|
||||
s := newTestServerWithSchemas(t)
|
||||
router := newODSRouter(s)
|
||||
|
||||
createItemDirect(t, s, "ODS-001", "ods export item", nil)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/items/export.ods", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status: got %d, want %d; body: %s", w.Code, http.StatusOK, w.Body.String())
|
||||
}
|
||||
|
||||
ct := w.Header().Get("Content-Type")
|
||||
if !strings.Contains(ct, "application/vnd.oasis.opendocument.spreadsheet") {
|
||||
t.Errorf("content-type: got %q, want ODS type", ct)
|
||||
}
|
||||
|
||||
// ODS is a ZIP file — first 2 bytes should be PK
|
||||
body := w.Body.Bytes()
|
||||
if len(body) < 2 || body[0] != 'P' || body[1] != 'K' {
|
||||
t.Error("response body does not start with PK (ZIP magic)")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleODSTemplate(t *testing.T) {
|
||||
s := newTestServerWithSchemas(t)
|
||||
router := newODSRouter(s)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/items/template.ods?schema=kindred-rd", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status: got %d, want %d; body: %s", w.Code, http.StatusOK, w.Body.String())
|
||||
}
|
||||
|
||||
ct := w.Header().Get("Content-Type")
|
||||
if !strings.Contains(ct, "application/vnd.oasis.opendocument.spreadsheet") {
|
||||
t.Errorf("content-type: got %q, want ODS type", ct)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleProjectSheetODS(t *testing.T) {
|
||||
s := newTestServerWithSchemas(t)
|
||||
router := newODSRouter(s)
|
||||
|
||||
// Create project and item
|
||||
ctx := httptest.NewRequest("GET", "/", nil).Context()
|
||||
proj := &db.Project{Code: "ODSPR", Name: "ODS Project"}
|
||||
s.projects.Create(ctx, proj)
|
||||
createItemDirect(t, s, "ODSPR-001", "project sheet item", nil)
|
||||
item, _ := s.items.GetByPartNumber(ctx, "ODSPR-001")
|
||||
s.projects.AddItemToProject(ctx, item.ID, proj.ID)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/projects/ODSPR/sheet.ods", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status: got %d, want %d; body: %s", w.Code, http.StatusOK, w.Body.String())
|
||||
}
|
||||
|
||||
ct := w.Header().Get("Content-Type")
|
||||
if !strings.Contains(ct, "application/vnd.oasis.opendocument.spreadsheet") {
|
||||
t.Errorf("content-type: got %q, want ODS type", ct)
|
||||
}
|
||||
}
|
||||
222
internal/api/revision_handlers_test.go
Normal file
222
internal/api/revision_handlers_test.go
Normal file
@@ -0,0 +1,222 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
)
|
||||
|
||||
func newRevisionRouter(s *Server) http.Handler {
|
||||
r := chi.NewRouter()
|
||||
r.Route("/api/items/{partNumber}", func(r chi.Router) {
|
||||
r.Get("/revisions", s.HandleListRevisions)
|
||||
r.Get("/revisions/compare", s.HandleCompareRevisions)
|
||||
r.Get("/revisions/{revision}", s.HandleGetRevision)
|
||||
r.Post("/revisions", s.HandleCreateRevision)
|
||||
r.Patch("/revisions/{revision}", s.HandleUpdateRevision)
|
||||
r.Post("/revisions/{revision}/rollback", s.HandleRollbackRevision)
|
||||
})
|
||||
return r
|
||||
}
|
||||
|
||||
func TestHandleListRevisions(t *testing.T) {
|
||||
s := newTestServer(t)
|
||||
router := newRevisionRouter(s)
|
||||
|
||||
createItemDirect(t, s, "REV-API-001", "revision list", nil)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/items/REV-API-001/revisions", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status: got %d, want %d; body: %s", w.Code, http.StatusOK, w.Body.String())
|
||||
}
|
||||
|
||||
var revisions []RevisionResponse
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &revisions); err != nil {
|
||||
t.Fatalf("decoding response: %v", err)
|
||||
}
|
||||
if len(revisions) != 1 {
|
||||
t.Errorf("expected 1 revision (initial), got %d", len(revisions))
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleListRevisionsNotFound(t *testing.T) {
|
||||
s := newTestServer(t)
|
||||
router := newRevisionRouter(s)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/items/NOEXIST/revisions", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusNotFound {
|
||||
t.Errorf("status: got %d, want %d", w.Code, http.StatusNotFound)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleGetRevision(t *testing.T) {
|
||||
s := newTestServer(t)
|
||||
router := newRevisionRouter(s)
|
||||
|
||||
createItemDirect(t, s, "REVGET-001", "get revision", nil)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/items/REVGET-001/revisions/1", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status: got %d, want %d; body: %s", w.Code, http.StatusOK, w.Body.String())
|
||||
}
|
||||
|
||||
var rev RevisionResponse
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &rev); err != nil {
|
||||
t.Fatalf("decoding response: %v", err)
|
||||
}
|
||||
if rev.RevisionNumber != 1 {
|
||||
t.Errorf("revision_number: got %d, want 1", rev.RevisionNumber)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleGetRevisionNotFound(t *testing.T) {
|
||||
s := newTestServer(t)
|
||||
router := newRevisionRouter(s)
|
||||
|
||||
createItemDirect(t, s, "REVNF-001", "rev not found", nil)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/items/REVNF-001/revisions/99", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusNotFound {
|
||||
t.Errorf("status: got %d, want %d", w.Code, http.StatusNotFound)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleCreateRevision(t *testing.T) {
|
||||
s := newTestServer(t)
|
||||
router := newRevisionRouter(s)
|
||||
|
||||
createItemDirect(t, s, "REVCR-001", "create revision", nil)
|
||||
|
||||
body := `{"properties":{"material":"steel"},"comment":"added material"}`
|
||||
req := authRequest(httptest.NewRequest("POST", "/api/items/REVCR-001/revisions", strings.NewReader(body)))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusCreated {
|
||||
t.Fatalf("status: got %d, want %d; body: %s", w.Code, http.StatusCreated, w.Body.String())
|
||||
}
|
||||
|
||||
var rev RevisionResponse
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &rev); err != nil {
|
||||
t.Fatalf("decoding response: %v", err)
|
||||
}
|
||||
if rev.RevisionNumber != 2 {
|
||||
t.Errorf("revision_number: got %d, want 2", rev.RevisionNumber)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleUpdateRevision(t *testing.T) {
|
||||
s := newTestServer(t)
|
||||
router := newRevisionRouter(s)
|
||||
|
||||
createItemDirect(t, s, "REVUP-001", "update revision", nil)
|
||||
|
||||
body := `{"status":"released","labels":["production"]}`
|
||||
req := authRequest(httptest.NewRequest("PATCH", "/api/items/REVUP-001/revisions/1", strings.NewReader(body)))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status: got %d, want %d; body: %s", w.Code, http.StatusOK, w.Body.String())
|
||||
}
|
||||
|
||||
var rev RevisionResponse
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &rev); err != nil {
|
||||
t.Fatalf("decoding response: %v", err)
|
||||
}
|
||||
if rev.Status != "released" {
|
||||
t.Errorf("status: got %q, want %q", rev.Status, "released")
|
||||
}
|
||||
if len(rev.Labels) != 1 || rev.Labels[0] != "production" {
|
||||
t.Errorf("labels: got %v, want [production]", rev.Labels)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleCompareRevisions(t *testing.T) {
|
||||
s := newTestServer(t)
|
||||
router := newRevisionRouter(s)
|
||||
|
||||
// Create item with properties, then create second revision with changed properties
|
||||
cost := 10.0
|
||||
createItemDirect(t, s, "REVCMP-001", "compare revisions", &cost)
|
||||
|
||||
body := `{"properties":{"standard_cost":20,"material":"aluminum"},"comment":"updated cost"}`
|
||||
req := authRequest(httptest.NewRequest("POST", "/api/items/REVCMP-001/revisions", strings.NewReader(body)))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
if w.Code != http.StatusCreated {
|
||||
t.Fatalf("create rev 2: status %d; body: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
// Compare rev 1 vs rev 2
|
||||
req = httptest.NewRequest("GET", "/api/items/REVCMP-001/revisions/compare?from=1&to=2", nil)
|
||||
w = httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status: got %d, want %d; body: %s", w.Code, http.StatusOK, w.Body.String())
|
||||
}
|
||||
|
||||
var diff RevisionDiffResponse
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &diff); err != nil {
|
||||
t.Fatalf("decoding response: %v", err)
|
||||
}
|
||||
if diff.FromRevision != 1 || diff.ToRevision != 2 {
|
||||
t.Errorf("revisions: got from=%d to=%d, want from=1 to=2", diff.FromRevision, diff.ToRevision)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleRollbackRevision(t *testing.T) {
|
||||
s := newTestServer(t)
|
||||
router := newRevisionRouter(s)
|
||||
|
||||
createItemDirect(t, s, "REVRB-001", "rollback test", nil)
|
||||
|
||||
// Create rev 2
|
||||
body := `{"properties":{"version":"v2"},"comment":"version 2"}`
|
||||
req := authRequest(httptest.NewRequest("POST", "/api/items/REVRB-001/revisions", strings.NewReader(body)))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
if w.Code != http.StatusCreated {
|
||||
t.Fatalf("create rev 2: status %d; body: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
// Rollback to rev 1 — should create rev 3
|
||||
body = `{"comment":"rolling back"}`
|
||||
req = authRequest(httptest.NewRequest("POST", "/api/items/REVRB-001/revisions/1/rollback", strings.NewReader(body)))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w = httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusCreated {
|
||||
t.Fatalf("status: got %d, want %d; body: %s", w.Code, http.StatusCreated, w.Body.String())
|
||||
}
|
||||
|
||||
var rev RevisionResponse
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &rev); err != nil {
|
||||
t.Fatalf("decoding response: %v", err)
|
||||
}
|
||||
if rev.RevisionNumber != 3 {
|
||||
t.Errorf("revision_number: got %d, want 3", rev.RevisionNumber)
|
||||
}
|
||||
}
|
||||
@@ -58,6 +58,7 @@ func NewRouter(server *Server, logger zerolog.Logger) http.Handler {
|
||||
r.Get("/auth/callback", server.HandleOIDCCallback)
|
||||
|
||||
// Public API endpoints (no auth required)
|
||||
r.Get("/api/modules", server.HandleGetModules)
|
||||
r.Get("/api/auth/config", server.HandleAuthConfig)
|
||||
|
||||
// API routes (require auth, no CSRF — token auth instead)
|
||||
@@ -101,6 +102,7 @@ func NewRouter(server *Server, logger zerolog.Logger) http.Handler {
|
||||
|
||||
// Projects (read: viewer, write: editor)
|
||||
r.Route("/projects", func(r chi.Router) {
|
||||
r.Use(server.RequireModule("projects"))
|
||||
r.Get("/", server.HandleListProjects)
|
||||
r.Get("/{code}", server.HandleGetProject)
|
||||
r.Get("/{code}/items", server.HandleGetProjectItems)
|
||||
@@ -150,6 +152,21 @@ func NewRouter(server *Server, logger zerolog.Logger) http.Handler {
|
||||
r.Get("/bom/export.csv", server.HandleExportBOMCSV)
|
||||
r.Get("/bom/export.ods", server.HandleExportBOMODS)
|
||||
|
||||
// DAG (gated by dag module)
|
||||
r.Route("/dag", func(r chi.Router) {
|
||||
r.Use(server.RequireModule("dag"))
|
||||
r.Get("/", server.HandleGetDAG)
|
||||
r.Get("/forward-cone/{nodeKey}", server.HandleGetForwardCone)
|
||||
r.Get("/dirty", server.HandleGetDirtySubgraph)
|
||||
|
||||
r.Group(func(r chi.Router) {
|
||||
r.Use(server.RequireWritable)
|
||||
r.Use(server.RequireRole(auth.RoleEditor))
|
||||
r.Put("/", server.HandleSyncDAG)
|
||||
r.Post("/mark-dirty/{nodeKey}", server.HandleMarkDirty)
|
||||
})
|
||||
})
|
||||
|
||||
r.Group(func(r chi.Router) {
|
||||
r.Use(server.RequireWritable)
|
||||
r.Use(server.RequireRole(auth.RoleEditor))
|
||||
@@ -175,12 +192,14 @@ func NewRouter(server *Server, logger zerolog.Logger) http.Handler {
|
||||
|
||||
// Audit (read-only, viewer role)
|
||||
r.Route("/audit", func(r chi.Router) {
|
||||
r.Use(server.RequireModule("audit"))
|
||||
r.Get("/completeness", server.HandleAuditCompleteness)
|
||||
r.Get("/completeness/{partNumber}", server.HandleAuditItemDetail)
|
||||
})
|
||||
|
||||
// Integrations (read: viewer, write: editor)
|
||||
r.Route("/integrations/odoo", func(r chi.Router) {
|
||||
r.Use(server.RequireModule("odoo"))
|
||||
r.Get("/config", server.HandleGetOdooConfig)
|
||||
r.Get("/sync-log", server.HandleGetOdooSyncLog)
|
||||
|
||||
@@ -201,12 +220,71 @@ func NewRouter(server *Server, logger zerolog.Logger) http.Handler {
|
||||
r.Post("/sheets/diff", server.HandleSheetDiff)
|
||||
})
|
||||
|
||||
// Jobs (read: viewer, write: editor)
|
||||
r.Route("/jobs", func(r chi.Router) {
|
||||
r.Use(server.RequireModule("jobs"))
|
||||
r.Get("/", server.HandleListJobs)
|
||||
r.Get("/{jobID}", server.HandleGetJob)
|
||||
r.Get("/{jobID}/logs", server.HandleGetJobLogs)
|
||||
|
||||
r.Group(func(r chi.Router) {
|
||||
r.Use(server.RequireWritable)
|
||||
r.Use(server.RequireRole(auth.RoleEditor))
|
||||
r.Post("/", server.HandleCreateJob)
|
||||
r.Post("/{jobID}/cancel", server.HandleCancelJob)
|
||||
})
|
||||
})
|
||||
|
||||
// Job definitions (read: viewer, reload: admin)
|
||||
r.Route("/job-definitions", func(r chi.Router) {
|
||||
r.Use(server.RequireModule("jobs"))
|
||||
r.Get("/", server.HandleListJobDefinitions)
|
||||
r.Get("/{name}", server.HandleGetJobDefinition)
|
||||
|
||||
r.Group(func(r chi.Router) {
|
||||
r.Use(server.RequireRole(auth.RoleAdmin))
|
||||
r.Post("/reload", server.HandleReloadJobDefinitions)
|
||||
})
|
||||
})
|
||||
|
||||
// Runners (admin)
|
||||
r.Route("/runners", func(r chi.Router) {
|
||||
r.Use(server.RequireModule("jobs"))
|
||||
r.Use(server.RequireRole(auth.RoleAdmin))
|
||||
r.Get("/", server.HandleListRunners)
|
||||
r.Post("/", server.HandleRegisterRunner)
|
||||
r.Delete("/{runnerID}", server.HandleDeleteRunner)
|
||||
})
|
||||
|
||||
// Part number generation (editor)
|
||||
r.Group(func(r chi.Router) {
|
||||
r.Use(server.RequireWritable)
|
||||
r.Use(server.RequireRole(auth.RoleEditor))
|
||||
r.Post("/generate-part-number", server.HandleGeneratePartNumber)
|
||||
})
|
||||
|
||||
// Admin settings (admin only)
|
||||
r.Route("/admin/settings", func(r chi.Router) {
|
||||
r.Use(server.RequireRole(auth.RoleAdmin))
|
||||
r.Get("/", server.HandleGetAllSettings)
|
||||
r.Get("/{module}", server.HandleGetModuleSettings)
|
||||
r.Put("/{module}", server.HandleUpdateModuleSettings)
|
||||
r.Post("/{module}/test", server.HandleTestModuleConnectivity)
|
||||
})
|
||||
})
|
||||
|
||||
// Runner-facing API (runner token auth, not user auth)
|
||||
r.Route("/api/runner", func(r chi.Router) {
|
||||
r.Use(server.RequireModule("jobs"))
|
||||
r.Use(server.RequireRunnerAuth)
|
||||
r.Post("/heartbeat", server.HandleRunnerHeartbeat)
|
||||
r.Post("/claim", server.HandleRunnerClaim)
|
||||
r.Post("/jobs/{jobID}/start", server.HandleRunnerStartJob)
|
||||
r.Put("/jobs/{jobID}/progress", server.HandleRunnerUpdateProgress)
|
||||
r.Post("/jobs/{jobID}/complete", server.HandleRunnerCompleteJob)
|
||||
r.Post("/jobs/{jobID}/fail", server.HandleRunnerFailJob)
|
||||
r.Post("/jobs/{jobID}/log", server.HandleRunnerAppendLog)
|
||||
r.Put("/jobs/{jobID}/dag", server.HandleRunnerSyncDAG)
|
||||
})
|
||||
|
||||
// React SPA — serve from web/dist at root, fallback to index.html
|
||||
|
||||
385
internal/api/runner_handlers.go
Normal file
385
internal/api/runner_handlers.go
Normal file
@@ -0,0 +1,385 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/kindredsystems/silo/internal/auth"
|
||||
"github.com/kindredsystems/silo/internal/db"
|
||||
"github.com/kindredsystems/silo/internal/jobdef"
|
||||
)
|
||||
|
||||
// HandleRunnerHeartbeat updates the runner's heartbeat timestamp.
|
||||
func (s *Server) HandleRunnerHeartbeat(w http.ResponseWriter, r *http.Request) {
|
||||
runner := auth.RunnerFromContext(r.Context())
|
||||
if runner == nil {
|
||||
writeError(w, http.StatusUnauthorized, "unauthorized", "Runner identity required")
|
||||
return
|
||||
}
|
||||
|
||||
// Heartbeat already updated by RequireRunnerAuth middleware
|
||||
writeJSON(w, http.StatusOK, map[string]string{"status": "ok"})
|
||||
}
|
||||
|
||||
// HandleRunnerClaim claims the next available job matching the runner's tags.
|
||||
func (s *Server) HandleRunnerClaim(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
runner := auth.RunnerFromContext(ctx)
|
||||
if runner == nil {
|
||||
writeError(w, http.StatusUnauthorized, "unauthorized", "Runner identity required")
|
||||
return
|
||||
}
|
||||
|
||||
job, err := s.jobs.ClaimJob(ctx, runner.ID, runner.Tags)
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to claim job")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to claim job")
|
||||
return
|
||||
}
|
||||
if job == nil {
|
||||
writeJSON(w, http.StatusNoContent, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// Look up the full definition to send to the runner
|
||||
var defPayload map[string]any
|
||||
if job.JobDefinitionID != nil {
|
||||
rec, err := s.jobs.GetDefinitionByID(ctx, *job.JobDefinitionID)
|
||||
if err == nil && rec != nil {
|
||||
defPayload = rec.Definition
|
||||
}
|
||||
}
|
||||
|
||||
s.broker.Publish("job.claimed", mustMarshal(map[string]any{
|
||||
"job_id": job.ID,
|
||||
"runner_id": runner.ID,
|
||||
"runner": runner.Name,
|
||||
}))
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]any{
|
||||
"job": job,
|
||||
"definition": defPayload,
|
||||
})
|
||||
}
|
||||
|
||||
// HandleRunnerStartJob transitions a claimed job to running.
|
||||
func (s *Server) HandleRunnerStartJob(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
runner := auth.RunnerFromContext(ctx)
|
||||
if runner == nil {
|
||||
writeError(w, http.StatusUnauthorized, "unauthorized", "Runner identity required")
|
||||
return
|
||||
}
|
||||
|
||||
jobID := chi.URLParam(r, "jobID")
|
||||
if err := s.jobs.StartJob(ctx, jobID, runner.ID); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "start_failed", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]string{"status": "running"})
|
||||
}
|
||||
|
||||
// HandleRunnerUpdateProgress updates a running job's progress.
|
||||
func (s *Server) HandleRunnerUpdateProgress(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
runner := auth.RunnerFromContext(ctx)
|
||||
if runner == nil {
|
||||
writeError(w, http.StatusUnauthorized, "unauthorized", "Runner identity required")
|
||||
return
|
||||
}
|
||||
|
||||
jobID := chi.URLParam(r, "jobID")
|
||||
var req struct {
|
||||
Progress int `json:"progress"`
|
||||
Message string `json:"message,omitempty"`
|
||||
}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "invalid_body", "Invalid JSON body")
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.jobs.UpdateProgress(ctx, jobID, runner.ID, req.Progress, req.Message); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "update_failed", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
s.broker.Publish("job.progress", mustMarshal(map[string]any{
|
||||
"job_id": jobID,
|
||||
"progress": req.Progress,
|
||||
"message": req.Message,
|
||||
}))
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]string{"status": "ok"})
|
||||
}
|
||||
|
||||
// HandleRunnerCompleteJob marks a job as completed.
|
||||
func (s *Server) HandleRunnerCompleteJob(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
runner := auth.RunnerFromContext(ctx)
|
||||
if runner == nil {
|
||||
writeError(w, http.StatusUnauthorized, "unauthorized", "Runner identity required")
|
||||
return
|
||||
}
|
||||
|
||||
jobID := chi.URLParam(r, "jobID")
|
||||
var req struct {
|
||||
Result map[string]any `json:"result,omitempty"`
|
||||
}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "invalid_body", "Invalid JSON body")
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.jobs.CompleteJob(ctx, jobID, runner.ID, req.Result); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "complete_failed", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
s.broker.Publish("job.completed", mustMarshal(map[string]any{
|
||||
"job_id": jobID,
|
||||
"runner_id": runner.ID,
|
||||
}))
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]string{"status": "completed"})
|
||||
}
|
||||
|
||||
// HandleRunnerFailJob marks a job as failed.
|
||||
func (s *Server) HandleRunnerFailJob(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
runner := auth.RunnerFromContext(ctx)
|
||||
if runner == nil {
|
||||
writeError(w, http.StatusUnauthorized, "unauthorized", "Runner identity required")
|
||||
return
|
||||
}
|
||||
|
||||
jobID := chi.URLParam(r, "jobID")
|
||||
var req struct {
|
||||
Error string `json:"error"`
|
||||
}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "invalid_body", "Invalid JSON body")
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.jobs.FailJob(ctx, jobID, runner.ID, req.Error); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "fail_failed", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
s.broker.Publish("job.failed", mustMarshal(map[string]any{
|
||||
"job_id": jobID,
|
||||
"runner_id": runner.ID,
|
||||
"error": req.Error,
|
||||
}))
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]string{"status": "failed"})
|
||||
}
|
||||
|
||||
// HandleRunnerAppendLog appends a log entry to a job.
|
||||
func (s *Server) HandleRunnerAppendLog(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
runner := auth.RunnerFromContext(ctx)
|
||||
if runner == nil {
|
||||
writeError(w, http.StatusUnauthorized, "unauthorized", "Runner identity required")
|
||||
return
|
||||
}
|
||||
|
||||
jobID := chi.URLParam(r, "jobID")
|
||||
var req struct {
|
||||
Level string `json:"level"`
|
||||
Message string `json:"message"`
|
||||
Metadata map[string]any `json:"metadata,omitempty"`
|
||||
}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "invalid_body", "Invalid JSON body")
|
||||
return
|
||||
}
|
||||
|
||||
if req.Level == "" {
|
||||
req.Level = "info"
|
||||
}
|
||||
|
||||
entry := &db.JobLogEntry{
|
||||
JobID: jobID,
|
||||
Level: req.Level,
|
||||
Message: req.Message,
|
||||
Metadata: req.Metadata,
|
||||
}
|
||||
if err := s.jobs.AppendLog(ctx, entry); err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to append job log")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to append log")
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusCreated, entry)
|
||||
}
|
||||
|
||||
// HandleRunnerSyncDAG allows a runner to push DAG results for a job's item.
|
||||
func (s *Server) HandleRunnerSyncDAG(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
runner := auth.RunnerFromContext(ctx)
|
||||
if runner == nil {
|
||||
writeError(w, http.StatusUnauthorized, "unauthorized", "Runner identity required")
|
||||
return
|
||||
}
|
||||
|
||||
jobID := chi.URLParam(r, "jobID")
|
||||
|
||||
// Get the job to find the item
|
||||
job, err := s.jobs.GetJob(ctx, jobID)
|
||||
if err != nil || job == nil {
|
||||
writeError(w, http.StatusNotFound, "not_found", "Job not found")
|
||||
return
|
||||
}
|
||||
if job.ItemID == nil {
|
||||
writeError(w, http.StatusBadRequest, "no_item", "Job has no associated item")
|
||||
return
|
||||
}
|
||||
|
||||
// Delegate to the DAG sync handler logic
|
||||
var req dagSyncRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "invalid_body", "Invalid JSON body")
|
||||
return
|
||||
}
|
||||
|
||||
if req.RevisionNumber == 0 {
|
||||
// Look up current revision
|
||||
item, err := s.items.GetByID(ctx, *job.ItemID)
|
||||
if err != nil || item == nil {
|
||||
writeError(w, http.StatusNotFound, "not_found", "Item not found")
|
||||
return
|
||||
}
|
||||
req.RevisionNumber = item.CurrentRevision
|
||||
}
|
||||
|
||||
// Convert and sync nodes
|
||||
nodes := make([]db.DAGNode, len(req.Nodes))
|
||||
for i, n := range req.Nodes {
|
||||
state := n.ValidationState
|
||||
if state == "" {
|
||||
state = "clean"
|
||||
}
|
||||
nodes[i] = db.DAGNode{
|
||||
NodeKey: n.NodeKey,
|
||||
NodeType: n.NodeType,
|
||||
PropertiesHash: n.PropertiesHash,
|
||||
ValidationState: state,
|
||||
Metadata: n.Metadata,
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.dag.SyncFeatureTree(ctx, *job.ItemID, req.RevisionNumber, nodes, nil); err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to sync DAG from runner")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to sync DAG")
|
||||
return
|
||||
}
|
||||
|
||||
// Build key→ID map and sync edges
|
||||
keyToID := make(map[string]string, len(nodes))
|
||||
for _, n := range nodes {
|
||||
keyToID[n.NodeKey] = n.ID
|
||||
}
|
||||
|
||||
if len(req.Edges) > 0 {
|
||||
if err := s.dag.DeleteEdgesForItem(ctx, *job.ItemID, req.RevisionNumber); err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to delete old edges")
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "Failed to sync DAG edges")
|
||||
return
|
||||
}
|
||||
for _, e := range req.Edges {
|
||||
sourceID, ok := keyToID[e.SourceKey]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
targetID, ok := keyToID[e.TargetKey]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
edgeType := e.EdgeType
|
||||
if edgeType == "" {
|
||||
edgeType = "depends_on"
|
||||
}
|
||||
edge := &db.DAGEdge{
|
||||
SourceNodeID: sourceID,
|
||||
TargetNodeID: targetID,
|
||||
EdgeType: edgeType,
|
||||
Metadata: e.Metadata,
|
||||
}
|
||||
if err := s.dag.CreateEdge(ctx, edge); err != nil {
|
||||
s.logger.Error().Err(err).Msg("failed to create edge from runner")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s.broker.Publish("dag.updated", mustMarshal(map[string]any{
|
||||
"item_id": *job.ItemID,
|
||||
"job_id": jobID,
|
||||
"runner": runner.Name,
|
||||
"node_count": len(req.Nodes),
|
||||
"edge_count": len(req.Edges),
|
||||
}))
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]any{
|
||||
"synced": true,
|
||||
"node_count": len(req.Nodes),
|
||||
"edge_count": len(req.Edges),
|
||||
})
|
||||
}
|
||||
|
||||
// generateRunnerToken creates a new runner token. Returns raw token, hash, and prefix.
|
||||
func generateRunnerToken() (raw, hash, prefix string) {
|
||||
rawBytes := make([]byte, 32)
|
||||
if _, err := rand.Read(rawBytes); err != nil {
|
||||
panic(fmt.Sprintf("generating random bytes: %v", err))
|
||||
}
|
||||
|
||||
raw = "silo_runner_" + hex.EncodeToString(rawBytes)
|
||||
|
||||
h := sha256.Sum256([]byte(raw))
|
||||
hash = hex.EncodeToString(h[:])
|
||||
|
||||
prefix = raw[:20] // "silo_runner_" + first 8 hex chars
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// loadAndUpsertJobDefs loads YAML definitions from a directory and upserts them into the database.
|
||||
func loadAndUpsertJobDefs(ctx context.Context, dir string, repo *db.JobRepository) (map[string]*jobdef.Definition, error) {
|
||||
defs, err := jobdef.LoadAll(dir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("loading job definitions: %w", err)
|
||||
}
|
||||
|
||||
for _, def := range defs {
|
||||
defJSON, _ := json.Marshal(def)
|
||||
var defMap map[string]any
|
||||
json.Unmarshal(defJSON, &defMap)
|
||||
|
||||
rec := &db.JobDefinitionRecord{
|
||||
Name: def.Name,
|
||||
Version: def.Version,
|
||||
TriggerType: def.Trigger.Type,
|
||||
ScopeType: def.Scope.Type,
|
||||
ComputeType: def.Compute.Type,
|
||||
RunnerTags: def.Runner.Tags,
|
||||
TimeoutSeconds: def.Timeout,
|
||||
MaxRetries: def.MaxRetries,
|
||||
Priority: def.Priority,
|
||||
Definition: defMap,
|
||||
Enabled: true,
|
||||
}
|
||||
if err := repo.UpsertDefinition(ctx, rec); err != nil {
|
||||
return nil, fmt.Errorf("upserting definition %s: %w", def.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
return defs, nil
|
||||
}
|
||||
100
internal/api/schema_handlers_test.go
Normal file
100
internal/api/schema_handlers_test.go
Normal file
@@ -0,0 +1,100 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
)
|
||||
|
||||
func newSchemaRouter(s *Server) http.Handler {
|
||||
r := chi.NewRouter()
|
||||
r.Get("/api/schemas", s.HandleListSchemas)
|
||||
r.Get("/api/schemas/{name}", s.HandleGetSchema)
|
||||
r.Get("/api/schemas/{name}/form", s.HandleGetFormDescriptor)
|
||||
return r
|
||||
}
|
||||
|
||||
func TestHandleListSchemas(t *testing.T) {
|
||||
s := newTestServerWithSchemas(t)
|
||||
router := newSchemaRouter(s)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/schemas", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status: got %d, want %d; body: %s", w.Code, http.StatusOK, w.Body.String())
|
||||
}
|
||||
|
||||
var schemas []map[string]any
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &schemas); err != nil {
|
||||
t.Fatalf("decoding response: %v", err)
|
||||
}
|
||||
if len(schemas) == 0 {
|
||||
t.Error("expected at least 1 schema")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleGetSchema(t *testing.T) {
|
||||
s := newTestServerWithSchemas(t)
|
||||
router := newSchemaRouter(s)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/schemas/kindred-rd", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status: got %d, want %d; body: %s", w.Code, http.StatusOK, w.Body.String())
|
||||
}
|
||||
|
||||
var schema map[string]any
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &schema); err != nil {
|
||||
t.Fatalf("decoding response: %v", err)
|
||||
}
|
||||
if schema["name"] != "kindred-rd" {
|
||||
t.Errorf("name: got %v, want %q", schema["name"], "kindred-rd")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleGetSchemaNotFound(t *testing.T) {
|
||||
s := newTestServerWithSchemas(t)
|
||||
router := newSchemaRouter(s)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/schemas/nonexistent", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusNotFound {
|
||||
t.Errorf("status: got %d, want %d", w.Code, http.StatusNotFound)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleGetFormDescriptor(t *testing.T) {
|
||||
s := newTestServerWithSchemas(t)
|
||||
router := newSchemaRouter(s)
|
||||
|
||||
req := httptest.NewRequest("GET", "/api/schemas/kindred-rd/form", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status: got %d, want %d; body: %s", w.Code, http.StatusOK, w.Body.String())
|
||||
}
|
||||
|
||||
var form map[string]any
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &form); err != nil {
|
||||
t.Fatalf("decoding response: %v", err)
|
||||
}
|
||||
// Form descriptor should have fields
|
||||
if _, ok := form["fields"]; !ok {
|
||||
// Some schemas may use "categories" or "segments" instead
|
||||
if _, ok := form["categories"]; !ok {
|
||||
if _, ok := form["segments"]; !ok {
|
||||
t.Error("form descriptor missing fields/categories/segments key")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
316
internal/api/settings_handlers.go
Normal file
316
internal/api/settings_handlers.go
Normal file
@@ -0,0 +1,316 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/kindredsystems/silo/internal/auth"
|
||||
)
|
||||
|
||||
// HandleGetAllSettings returns the full config grouped by module with secrets redacted.
|
||||
func (s *Server) HandleGetAllSettings(w http.ResponseWriter, r *http.Request) {
|
||||
resp := map[string]any{
|
||||
"core": s.buildCoreSettings(),
|
||||
"schemas": s.buildSchemasSettings(),
|
||||
"storage": s.buildStorageSettings(r.Context()),
|
||||
"database": s.buildDatabaseSettings(r.Context()),
|
||||
"auth": s.buildAuthSettings(),
|
||||
"projects": map[string]any{"enabled": s.modules.IsEnabled("projects")},
|
||||
"audit": map[string]any{"enabled": s.modules.IsEnabled("audit")},
|
||||
"odoo": s.buildOdooSettings(),
|
||||
"freecad": s.buildFreecadSettings(),
|
||||
"jobs": s.buildJobsSettings(),
|
||||
"dag": map[string]any{"enabled": s.modules.IsEnabled("dag")},
|
||||
}
|
||||
writeJSON(w, http.StatusOK, resp)
|
||||
}
|
||||
|
||||
// HandleGetModuleSettings returns settings for a single module.
|
||||
func (s *Server) HandleGetModuleSettings(w http.ResponseWriter, r *http.Request) {
|
||||
module := chi.URLParam(r, "module")
|
||||
|
||||
var settings any
|
||||
switch module {
|
||||
case "core":
|
||||
settings = s.buildCoreSettings()
|
||||
case "schemas":
|
||||
settings = s.buildSchemasSettings()
|
||||
case "storage":
|
||||
settings = s.buildStorageSettings(r.Context())
|
||||
case "database":
|
||||
settings = s.buildDatabaseSettings(r.Context())
|
||||
case "auth":
|
||||
settings = s.buildAuthSettings()
|
||||
case "projects":
|
||||
settings = map[string]any{"enabled": s.modules.IsEnabled("projects")}
|
||||
case "audit":
|
||||
settings = map[string]any{"enabled": s.modules.IsEnabled("audit")}
|
||||
case "odoo":
|
||||
settings = s.buildOdooSettings()
|
||||
case "freecad":
|
||||
settings = s.buildFreecadSettings()
|
||||
case "jobs":
|
||||
settings = s.buildJobsSettings()
|
||||
case "dag":
|
||||
settings = map[string]any{"enabled": s.modules.IsEnabled("dag")}
|
||||
default:
|
||||
writeError(w, http.StatusNotFound, "not_found", "Unknown module: "+module)
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, settings)
|
||||
}
|
||||
|
||||
// HandleUpdateModuleSettings handles module toggle and config overrides.
|
||||
func (s *Server) HandleUpdateModuleSettings(w http.ResponseWriter, r *http.Request) {
|
||||
module := chi.URLParam(r, "module")
|
||||
|
||||
// Validate module exists
|
||||
if s.modules.Get(module) == nil {
|
||||
writeError(w, http.StatusNotFound, "not_found", "Unknown module: "+module)
|
||||
return
|
||||
}
|
||||
|
||||
var body map[string]any
|
||||
if err := json.NewDecoder(r.Body).Decode(&body); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "invalid_json", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
user := auth.UserFromContext(r.Context())
|
||||
username := "system"
|
||||
if user != nil {
|
||||
username = user.Username
|
||||
}
|
||||
|
||||
var updated []string
|
||||
restartRequired := false
|
||||
|
||||
// Handle module toggle
|
||||
if enabledVal, ok := body["enabled"]; ok {
|
||||
enabled, ok := enabledVal.(bool)
|
||||
if !ok {
|
||||
writeError(w, http.StatusBadRequest, "invalid_value", "'enabled' must be a boolean")
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.modules.SetEnabled(module, enabled); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "toggle_failed", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.settings.SetModuleState(r.Context(), module, enabled, username); err != nil {
|
||||
s.logger.Error().Err(err).Str("module", module).Msg("failed to persist module state")
|
||||
writeError(w, http.StatusInternalServerError, "persist_failed", "Failed to save module state")
|
||||
return
|
||||
}
|
||||
|
||||
updated = append(updated, module+".enabled")
|
||||
}
|
||||
|
||||
// Handle config overrides (future use — persisted but not hot-reloaded)
|
||||
for key, value := range body {
|
||||
if key == "enabled" {
|
||||
continue
|
||||
}
|
||||
|
||||
fullKey := module + "." + key
|
||||
if err := s.settings.SetOverride(r.Context(), fullKey, value, username); err != nil {
|
||||
s.logger.Error().Err(err).Str("key", fullKey).Msg("failed to persist setting override")
|
||||
writeError(w, http.StatusInternalServerError, "persist_failed", "Failed to save setting: "+key)
|
||||
return
|
||||
}
|
||||
updated = append(updated, fullKey)
|
||||
|
||||
// These namespaces require a restart to take effect
|
||||
if strings.HasPrefix(fullKey, "database.") ||
|
||||
strings.HasPrefix(fullKey, "storage.") ||
|
||||
strings.HasPrefix(fullKey, "server.") ||
|
||||
strings.HasPrefix(fullKey, "schemas.") {
|
||||
restartRequired = true
|
||||
}
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]any{
|
||||
"updated": updated,
|
||||
"restart_required": restartRequired,
|
||||
})
|
||||
|
||||
// Publish SSE event
|
||||
s.broker.Publish("settings.changed", mustMarshal(map[string]any{
|
||||
"module": module,
|
||||
"changed_keys": updated,
|
||||
"updated_by": username,
|
||||
}))
|
||||
}
|
||||
|
||||
// HandleTestModuleConnectivity tests external connectivity for a module.
|
||||
func (s *Server) HandleTestModuleConnectivity(w http.ResponseWriter, r *http.Request) {
|
||||
module := chi.URLParam(r, "module")
|
||||
|
||||
start := time.Now()
|
||||
var success bool
|
||||
var message string
|
||||
|
||||
switch module {
|
||||
case "database":
|
||||
if err := s.db.Pool().Ping(r.Context()); err != nil {
|
||||
success = false
|
||||
message = "Database ping failed: " + err.Error()
|
||||
} else {
|
||||
success = true
|
||||
message = "Database connection OK"
|
||||
}
|
||||
case "storage":
|
||||
if s.storage == nil {
|
||||
success = false
|
||||
message = "Storage not configured"
|
||||
} else if err := s.storage.Ping(r.Context()); err != nil {
|
||||
success = false
|
||||
message = "Storage ping failed: " + err.Error()
|
||||
} else {
|
||||
success = true
|
||||
message = "Storage connection OK"
|
||||
}
|
||||
case "auth", "odoo":
|
||||
success = false
|
||||
message = "Connectivity test not implemented for " + module
|
||||
default:
|
||||
writeError(w, http.StatusBadRequest, "not_testable", "No connectivity test available for module: "+module)
|
||||
return
|
||||
}
|
||||
|
||||
latency := time.Since(start).Milliseconds()
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]any{
|
||||
"success": success,
|
||||
"message": message,
|
||||
"latency_ms": latency,
|
||||
})
|
||||
}
|
||||
|
||||
// --- build helpers (read config, redact secrets) ---
|
||||
|
||||
func redact(s string) string {
|
||||
if s == "" {
|
||||
return ""
|
||||
}
|
||||
return "****"
|
||||
}
|
||||
|
||||
func (s *Server) buildCoreSettings() map[string]any {
|
||||
return map[string]any{
|
||||
"enabled": true,
|
||||
"host": s.cfg.Server.Host,
|
||||
"port": s.cfg.Server.Port,
|
||||
"base_url": s.cfg.Server.BaseURL,
|
||||
"readonly": s.cfg.Server.ReadOnly,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) buildSchemasSettings() map[string]any {
|
||||
return map[string]any{
|
||||
"enabled": true,
|
||||
"directory": s.cfg.Schemas.Directory,
|
||||
"default": s.cfg.Schemas.Default,
|
||||
"count": len(s.schemas),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) buildStorageSettings(ctx context.Context) map[string]any {
|
||||
result := map[string]any{
|
||||
"enabled": true,
|
||||
"endpoint": s.cfg.Storage.Endpoint,
|
||||
"bucket": s.cfg.Storage.Bucket,
|
||||
"use_ssl": s.cfg.Storage.UseSSL,
|
||||
"region": s.cfg.Storage.Region,
|
||||
}
|
||||
if s.storage != nil {
|
||||
if err := s.storage.Ping(ctx); err != nil {
|
||||
result["status"] = "unavailable"
|
||||
} else {
|
||||
result["status"] = "ok"
|
||||
}
|
||||
} else {
|
||||
result["status"] = "not_configured"
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (s *Server) buildDatabaseSettings(ctx context.Context) map[string]any {
|
||||
result := map[string]any{
|
||||
"enabled": true,
|
||||
"host": s.cfg.Database.Host,
|
||||
"port": s.cfg.Database.Port,
|
||||
"name": s.cfg.Database.Name,
|
||||
"user": s.cfg.Database.User,
|
||||
"password": redact(s.cfg.Database.Password),
|
||||
"sslmode": s.cfg.Database.SSLMode,
|
||||
"max_connections": s.cfg.Database.MaxConnections,
|
||||
}
|
||||
if err := s.db.Pool().Ping(ctx); err != nil {
|
||||
result["status"] = "unavailable"
|
||||
} else {
|
||||
result["status"] = "ok"
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (s *Server) buildAuthSettings() map[string]any {
|
||||
return map[string]any{
|
||||
"enabled": s.modules.IsEnabled("auth"),
|
||||
"session_secret": redact(s.cfg.Auth.SessionSecret),
|
||||
"local": map[string]any{
|
||||
"enabled": s.cfg.Auth.Local.Enabled,
|
||||
"default_admin_username": s.cfg.Auth.Local.DefaultAdminUsername,
|
||||
"default_admin_password": redact(s.cfg.Auth.Local.DefaultAdminPassword),
|
||||
},
|
||||
"ldap": map[string]any{
|
||||
"enabled": s.cfg.Auth.LDAP.Enabled,
|
||||
"url": s.cfg.Auth.LDAP.URL,
|
||||
"base_dn": s.cfg.Auth.LDAP.BaseDN,
|
||||
"bind_dn": s.cfg.Auth.LDAP.BindDN,
|
||||
"bind_password": redact(s.cfg.Auth.LDAP.BindPassword),
|
||||
},
|
||||
"oidc": map[string]any{
|
||||
"enabled": s.cfg.Auth.OIDC.Enabled,
|
||||
"issuer_url": s.cfg.Auth.OIDC.IssuerURL,
|
||||
"client_id": s.cfg.Auth.OIDC.ClientID,
|
||||
"client_secret": redact(s.cfg.Auth.OIDC.ClientSecret),
|
||||
"redirect_url": s.cfg.Auth.OIDC.RedirectURL,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) buildOdooSettings() map[string]any {
|
||||
return map[string]any{
|
||||
"enabled": s.modules.IsEnabled("odoo"),
|
||||
"url": s.cfg.Odoo.URL,
|
||||
"database": s.cfg.Odoo.Database,
|
||||
"username": s.cfg.Odoo.Username,
|
||||
"api_key": redact(s.cfg.Odoo.APIKey),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) buildFreecadSettings() map[string]any {
|
||||
return map[string]any{
|
||||
"enabled": s.modules.IsEnabled("freecad"),
|
||||
"uri_scheme": s.cfg.FreeCAD.URIScheme,
|
||||
"executable": s.cfg.FreeCAD.Executable,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) buildJobsSettings() map[string]any {
|
||||
return map[string]any{
|
||||
"enabled": s.modules.IsEnabled("jobs"),
|
||||
"directory": s.cfg.Jobs.Directory,
|
||||
"runner_timeout": s.cfg.Jobs.RunnerTimeout,
|
||||
"job_timeout_check": s.cfg.Jobs.JobTimeoutCheck,
|
||||
"default_priority": s.cfg.Jobs.DefaultPriority,
|
||||
"definitions_count": len(s.jobDefs),
|
||||
}
|
||||
}
|
||||
285
internal/api/settings_handlers_test.go
Normal file
285
internal/api/settings_handlers_test.go
Normal file
@@ -0,0 +1,285 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/kindredsystems/silo/internal/auth"
|
||||
"github.com/kindredsystems/silo/internal/config"
|
||||
"github.com/kindredsystems/silo/internal/db"
|
||||
"github.com/kindredsystems/silo/internal/modules"
|
||||
"github.com/kindredsystems/silo/internal/schema"
|
||||
"github.com/kindredsystems/silo/internal/testutil"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
func newSettingsTestServer(t *testing.T) *Server {
|
||||
t.Helper()
|
||||
pool := testutil.MustConnectTestPool(t)
|
||||
database := db.NewFromPool(pool)
|
||||
broker := NewBroker(zerolog.Nop())
|
||||
state := NewServerState(zerolog.Nop(), nil, broker)
|
||||
cfg := &config.Config{
|
||||
Server: config.ServerConfig{Host: "0.0.0.0", Port: 8080},
|
||||
Database: config.DatabaseConfig{
|
||||
Host: "localhost", Port: 5432, Name: "silo_test",
|
||||
User: "silo", Password: "secret", SSLMode: "disable",
|
||||
MaxConnections: 10,
|
||||
},
|
||||
Storage: config.StorageConfig{
|
||||
Endpoint: "minio:9000", Bucket: "silo", Region: "us-east-1",
|
||||
AccessKey: "minioadmin", SecretKey: "miniosecret",
|
||||
},
|
||||
Schemas: config.SchemasConfig{Directory: "/etc/silo/schemas", Default: "kindred-rd"},
|
||||
Auth: config.AuthConfig{
|
||||
SessionSecret: "supersecret",
|
||||
Local: config.LocalAuth{Enabled: true, DefaultAdminUsername: "admin", DefaultAdminPassword: "changeme"},
|
||||
LDAP: config.LDAPAuth{Enabled: false, BindPassword: "ldapsecret"},
|
||||
OIDC: config.OIDCAuth{Enabled: false, ClientSecret: "oidcsecret"},
|
||||
},
|
||||
FreeCAD: config.FreeCADConfig{URIScheme: "silo"},
|
||||
Odoo: config.OdooConfig{URL: "https://odoo.example.com", APIKey: "odoo-api-key"},
|
||||
Jobs: config.JobsConfig{Directory: "/etc/silo/jobdefs", RunnerTimeout: 90, JobTimeoutCheck: 30, DefaultPriority: 100},
|
||||
}
|
||||
return NewServer(
|
||||
zerolog.Nop(),
|
||||
database,
|
||||
map[string]*schema.Schema{"test": {Name: "test"}},
|
||||
cfg.Schemas.Directory,
|
||||
nil, // storage
|
||||
nil, // authService
|
||||
nil, // sessionManager
|
||||
nil, // oidcBackend
|
||||
nil, // authConfig
|
||||
broker,
|
||||
state,
|
||||
nil, // jobDefs
|
||||
"", // jobDefsDir
|
||||
modules.NewRegistry(), // modules
|
||||
cfg,
|
||||
)
|
||||
}
|
||||
|
||||
func newSettingsRouter(s *Server) http.Handler {
|
||||
r := chi.NewRouter()
|
||||
r.Route("/api/admin/settings", func(r chi.Router) {
|
||||
r.Get("/", s.HandleGetAllSettings)
|
||||
r.Get("/{module}", s.HandleGetModuleSettings)
|
||||
r.Put("/{module}", s.HandleUpdateModuleSettings)
|
||||
r.Post("/{module}/test", s.HandleTestModuleConnectivity)
|
||||
})
|
||||
return r
|
||||
}
|
||||
|
||||
func adminSettingsRequest(r *http.Request) *http.Request {
|
||||
u := &auth.User{
|
||||
ID: "admin-id",
|
||||
Username: "testadmin",
|
||||
Role: auth.RoleAdmin,
|
||||
}
|
||||
return r.WithContext(auth.ContextWithUser(r.Context(), u))
|
||||
}
|
||||
|
||||
func viewerSettingsRequest(r *http.Request) *http.Request {
|
||||
u := &auth.User{
|
||||
ID: "viewer-id",
|
||||
Username: "testviewer",
|
||||
Role: auth.RoleViewer,
|
||||
}
|
||||
return r.WithContext(auth.ContextWithUser(r.Context(), u))
|
||||
}
|
||||
|
||||
func TestGetAllSettings(t *testing.T) {
|
||||
s := newSettingsTestServer(t)
|
||||
router := newSettingsRouter(s)
|
||||
|
||||
req := adminSettingsRequest(httptest.NewRequest("GET", "/api/admin/settings", nil))
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status: got %d, want %d; body: %s", w.Code, http.StatusOK, w.Body.String())
|
||||
}
|
||||
|
||||
var resp map[string]any
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil {
|
||||
t.Fatalf("decoding: %v", err)
|
||||
}
|
||||
|
||||
// Verify all module keys present
|
||||
expectedModules := []string{"core", "schemas", "storage", "database", "auth", "projects", "audit", "odoo", "freecad", "jobs", "dag"}
|
||||
for _, mod := range expectedModules {
|
||||
if _, ok := resp[mod]; !ok {
|
||||
t.Errorf("missing module key: %s", mod)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify secrets are redacted
|
||||
dbSettings, _ := resp["database"].(map[string]any)
|
||||
if dbSettings["password"] != "****" {
|
||||
t.Errorf("database password not redacted: got %v", dbSettings["password"])
|
||||
}
|
||||
|
||||
authSettings, _ := resp["auth"].(map[string]any)
|
||||
if authSettings["session_secret"] != "****" {
|
||||
t.Errorf("session_secret not redacted: got %v", authSettings["session_secret"])
|
||||
}
|
||||
|
||||
ldap, _ := authSettings["ldap"].(map[string]any)
|
||||
if ldap["bind_password"] != "****" {
|
||||
t.Errorf("ldap bind_password not redacted: got %v", ldap["bind_password"])
|
||||
}
|
||||
|
||||
oidc, _ := authSettings["oidc"].(map[string]any)
|
||||
if oidc["client_secret"] != "****" {
|
||||
t.Errorf("oidc client_secret not redacted: got %v", oidc["client_secret"])
|
||||
}
|
||||
|
||||
odoo, _ := resp["odoo"].(map[string]any)
|
||||
if odoo["api_key"] != "****" {
|
||||
t.Errorf("odoo api_key not redacted: got %v", odoo["api_key"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetModuleSettings(t *testing.T) {
|
||||
s := newSettingsTestServer(t)
|
||||
router := newSettingsRouter(s)
|
||||
|
||||
req := adminSettingsRequest(httptest.NewRequest("GET", "/api/admin/settings/jobs", nil))
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status: got %d, want %d; body: %s", w.Code, http.StatusOK, w.Body.String())
|
||||
}
|
||||
|
||||
var resp map[string]any
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil {
|
||||
t.Fatalf("decoding: %v", err)
|
||||
}
|
||||
|
||||
if resp["directory"] != "/etc/silo/jobdefs" {
|
||||
t.Errorf("jobs directory: got %v, want /etc/silo/jobdefs", resp["directory"])
|
||||
}
|
||||
if resp["runner_timeout"] != float64(90) {
|
||||
t.Errorf("runner_timeout: got %v, want 90", resp["runner_timeout"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetModuleSettings_Unknown(t *testing.T) {
|
||||
s := newSettingsTestServer(t)
|
||||
router := newSettingsRouter(s)
|
||||
|
||||
req := adminSettingsRequest(httptest.NewRequest("GET", "/api/admin/settings/nonexistent", nil))
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusNotFound {
|
||||
t.Errorf("status: got %d, want %d", w.Code, http.StatusNotFound)
|
||||
}
|
||||
}
|
||||
|
||||
func TestToggleModule(t *testing.T) {
|
||||
s := newSettingsTestServer(t)
|
||||
router := newSettingsRouter(s)
|
||||
|
||||
// Projects is enabled by default; disable it
|
||||
body := `{"enabled": false}`
|
||||
req := adminSettingsRequest(httptest.NewRequest("PUT", "/api/admin/settings/projects", strings.NewReader(body)))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status: got %d, want %d; body: %s", w.Code, http.StatusOK, w.Body.String())
|
||||
}
|
||||
|
||||
var resp map[string]any
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil {
|
||||
t.Fatalf("decoding: %v", err)
|
||||
}
|
||||
|
||||
updated, _ := resp["updated"].([]any)
|
||||
if len(updated) != 1 || updated[0] != "projects.enabled" {
|
||||
t.Errorf("updated: got %v, want [projects.enabled]", updated)
|
||||
}
|
||||
|
||||
// Verify registry state
|
||||
if s.modules.IsEnabled("projects") {
|
||||
t.Error("projects should be disabled after toggle")
|
||||
}
|
||||
}
|
||||
|
||||
func TestToggleModule_DependencyError(t *testing.T) {
|
||||
s := newSettingsTestServer(t)
|
||||
router := newSettingsRouter(s)
|
||||
|
||||
// DAG depends on Jobs. Jobs is disabled by default.
|
||||
// Enabling DAG without Jobs should fail.
|
||||
body := `{"enabled": true}`
|
||||
req := adminSettingsRequest(httptest.NewRequest("PUT", "/api/admin/settings/dag", strings.NewReader(body)))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusBadRequest {
|
||||
t.Errorf("status: got %d, want %d; body: %s", w.Code, http.StatusBadRequest, w.Body.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestToggleRequiredModule(t *testing.T) {
|
||||
s := newSettingsTestServer(t)
|
||||
router := newSettingsRouter(s)
|
||||
|
||||
body := `{"enabled": false}`
|
||||
req := adminSettingsRequest(httptest.NewRequest("PUT", "/api/admin/settings/core", strings.NewReader(body)))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusBadRequest {
|
||||
t.Errorf("status: got %d, want %d; body: %s", w.Code, http.StatusBadRequest, w.Body.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestTestConnectivity_Database(t *testing.T) {
|
||||
s := newSettingsTestServer(t)
|
||||
router := newSettingsRouter(s)
|
||||
|
||||
req := adminSettingsRequest(httptest.NewRequest("POST", "/api/admin/settings/database/test", nil))
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("status: got %d, want %d; body: %s", w.Code, http.StatusOK, w.Body.String())
|
||||
}
|
||||
|
||||
var resp map[string]any
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil {
|
||||
t.Fatalf("decoding: %v", err)
|
||||
}
|
||||
|
||||
if resp["success"] != true {
|
||||
t.Errorf("expected success=true, got %v; message: %v", resp["success"], resp["message"])
|
||||
}
|
||||
if resp["latency_ms"] == nil {
|
||||
t.Error("expected latency_ms in response")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTestConnectivity_NotTestable(t *testing.T) {
|
||||
s := newSettingsTestServer(t)
|
||||
router := newSettingsRouter(s)
|
||||
|
||||
req := adminSettingsRequest(httptest.NewRequest("POST", "/api/admin/settings/core/test", nil))
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusBadRequest {
|
||||
t.Errorf("status: got %d, want %d; body: %s", w.Code, http.StatusBadRequest, w.Body.String())
|
||||
}
|
||||
}
|
||||
24
internal/auth/runner.go
Normal file
24
internal/auth/runner.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package auth
|
||||
|
||||
import "context"
|
||||
|
||||
const runnerContextKey contextKey = iota + 1
|
||||
|
||||
// RunnerIdentity represents an authenticated runner in the request context.
|
||||
type RunnerIdentity struct {
|
||||
ID string
|
||||
Name string
|
||||
Tags []string
|
||||
}
|
||||
|
||||
// RunnerFromContext extracts the authenticated runner from the request context.
|
||||
// Returns nil if no runner is present.
|
||||
func RunnerFromContext(ctx context.Context) *RunnerIdentity {
|
||||
r, _ := ctx.Value(runnerContextKey).(*RunnerIdentity)
|
||||
return r
|
||||
}
|
||||
|
||||
// ContextWithRunner returns a new context carrying the given runner identity.
|
||||
func ContextWithRunner(ctx context.Context, r *RunnerIdentity) context.Context {
|
||||
return context.WithValue(ctx, runnerContextKey, r)
|
||||
}
|
||||
@@ -17,6 +17,26 @@ type Config struct {
|
||||
FreeCAD FreeCADConfig `yaml:"freecad"`
|
||||
Odoo OdooConfig `yaml:"odoo"`
|
||||
Auth AuthConfig `yaml:"auth"`
|
||||
Jobs JobsConfig `yaml:"jobs"`
|
||||
Modules ModulesConfig `yaml:"modules"`
|
||||
}
|
||||
|
||||
// ModulesConfig holds explicit enable/disable toggles for optional modules.
|
||||
// A nil pointer means "use the module's default state".
|
||||
type ModulesConfig struct {
|
||||
Auth *ModuleToggle `yaml:"auth"`
|
||||
Projects *ModuleToggle `yaml:"projects"`
|
||||
Audit *ModuleToggle `yaml:"audit"`
|
||||
Odoo *ModuleToggle `yaml:"odoo"`
|
||||
FreeCAD *ModuleToggle `yaml:"freecad"`
|
||||
Jobs *ModuleToggle `yaml:"jobs"`
|
||||
DAG *ModuleToggle `yaml:"dag"`
|
||||
}
|
||||
|
||||
// ModuleToggle holds an optional enabled flag. The pointer allows
|
||||
// distinguishing "not set" (nil) from "explicitly false".
|
||||
type ModuleToggle struct {
|
||||
Enabled *bool `yaml:"enabled"`
|
||||
}
|
||||
|
||||
// AuthConfig holds authentication and authorization settings.
|
||||
@@ -111,6 +131,14 @@ type FreeCADConfig struct {
|
||||
Executable string `yaml:"executable"`
|
||||
}
|
||||
|
||||
// JobsConfig holds worker/runner system settings.
|
||||
type JobsConfig struct {
|
||||
Directory string `yaml:"directory"` // default /etc/silo/jobdefs
|
||||
RunnerTimeout int `yaml:"runner_timeout"` // seconds, default 90
|
||||
JobTimeoutCheck int `yaml:"job_timeout_check"` // seconds, default 30
|
||||
DefaultPriority int `yaml:"default_priority"` // default 100
|
||||
}
|
||||
|
||||
// OdooConfig holds Odoo ERP integration settings.
|
||||
type OdooConfig struct {
|
||||
Enabled bool `yaml:"enabled"`
|
||||
@@ -157,6 +185,18 @@ func Load(path string) (*Config, error) {
|
||||
if cfg.FreeCAD.URIScheme == "" {
|
||||
cfg.FreeCAD.URIScheme = "silo"
|
||||
}
|
||||
if cfg.Jobs.Directory == "" {
|
||||
cfg.Jobs.Directory = "/etc/silo/jobdefs"
|
||||
}
|
||||
if cfg.Jobs.RunnerTimeout == 0 {
|
||||
cfg.Jobs.RunnerTimeout = 90
|
||||
}
|
||||
if cfg.Jobs.JobTimeoutCheck == 0 {
|
||||
cfg.Jobs.JobTimeoutCheck = 30
|
||||
}
|
||||
if cfg.Jobs.DefaultPriority == 0 {
|
||||
cfg.Jobs.DefaultPriority = 100
|
||||
}
|
||||
|
||||
// Override with environment variables
|
||||
if v := os.Getenv("SILO_DB_HOST"); v != "" {
|
||||
|
||||
520
internal/db/dag.go
Normal file
520
internal/db/dag.go
Normal file
@@ -0,0 +1,520 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
)
|
||||
|
||||
// DAGNode represents a feature-level node in the dependency graph.
|
||||
type DAGNode struct {
|
||||
ID string
|
||||
ItemID string
|
||||
RevisionNumber int
|
||||
NodeKey string
|
||||
NodeType string
|
||||
PropertiesHash *string
|
||||
ValidationState string
|
||||
ValidationMsg *string
|
||||
Metadata map[string]any
|
||||
CreatedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
}
|
||||
|
||||
// DAGEdge represents a dependency between two nodes.
|
||||
type DAGEdge struct {
|
||||
ID string
|
||||
SourceNodeID string
|
||||
TargetNodeID string
|
||||
EdgeType string
|
||||
Metadata map[string]any
|
||||
}
|
||||
|
||||
// DAGCrossEdge represents a dependency between nodes in different items.
|
||||
type DAGCrossEdge struct {
|
||||
ID string
|
||||
SourceNodeID string
|
||||
TargetNodeID string
|
||||
RelationshipID *string
|
||||
EdgeType string
|
||||
Metadata map[string]any
|
||||
}
|
||||
|
||||
// DAGRepository provides dependency graph database operations.
|
||||
type DAGRepository struct {
|
||||
db *DB
|
||||
}
|
||||
|
||||
// NewDAGRepository creates a new DAG repository.
|
||||
func NewDAGRepository(db *DB) *DAGRepository {
|
||||
return &DAGRepository{db: db}
|
||||
}
|
||||
|
||||
// GetNodes returns all DAG nodes for an item at a specific revision.
|
||||
func (r *DAGRepository) GetNodes(ctx context.Context, itemID string, revisionNumber int) ([]*DAGNode, error) {
|
||||
rows, err := r.db.pool.Query(ctx, `
|
||||
SELECT id, item_id, revision_number, node_key, node_type,
|
||||
properties_hash, validation_state, validation_msg,
|
||||
metadata, created_at, updated_at
|
||||
FROM dag_nodes
|
||||
WHERE item_id = $1 AND revision_number = $2
|
||||
ORDER BY node_key
|
||||
`, itemID, revisionNumber)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying DAG nodes: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
return scanDAGNodes(rows)
|
||||
}
|
||||
|
||||
// GetNodeByKey returns a single DAG node by item, revision, and key.
|
||||
func (r *DAGRepository) GetNodeByKey(ctx context.Context, itemID string, revisionNumber int, nodeKey string) (*DAGNode, error) {
|
||||
n := &DAGNode{}
|
||||
var metadataJSON []byte
|
||||
err := r.db.pool.QueryRow(ctx, `
|
||||
SELECT id, item_id, revision_number, node_key, node_type,
|
||||
properties_hash, validation_state, validation_msg,
|
||||
metadata, created_at, updated_at
|
||||
FROM dag_nodes
|
||||
WHERE item_id = $1 AND revision_number = $2 AND node_key = $3
|
||||
`, itemID, revisionNumber, nodeKey).Scan(
|
||||
&n.ID, &n.ItemID, &n.RevisionNumber, &n.NodeKey, &n.NodeType,
|
||||
&n.PropertiesHash, &n.ValidationState, &n.ValidationMsg,
|
||||
&metadataJSON, &n.CreatedAt, &n.UpdatedAt,
|
||||
)
|
||||
if err == pgx.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying DAG node: %w", err)
|
||||
}
|
||||
if metadataJSON != nil {
|
||||
if err := json.Unmarshal(metadataJSON, &n.Metadata); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling node metadata: %w", err)
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// GetNodeByID returns a single DAG node by its ID.
|
||||
func (r *DAGRepository) GetNodeByID(ctx context.Context, nodeID string) (*DAGNode, error) {
|
||||
n := &DAGNode{}
|
||||
var metadataJSON []byte
|
||||
err := r.db.pool.QueryRow(ctx, `
|
||||
SELECT id, item_id, revision_number, node_key, node_type,
|
||||
properties_hash, validation_state, validation_msg,
|
||||
metadata, created_at, updated_at
|
||||
FROM dag_nodes
|
||||
WHERE id = $1
|
||||
`, nodeID).Scan(
|
||||
&n.ID, &n.ItemID, &n.RevisionNumber, &n.NodeKey, &n.NodeType,
|
||||
&n.PropertiesHash, &n.ValidationState, &n.ValidationMsg,
|
||||
&metadataJSON, &n.CreatedAt, &n.UpdatedAt,
|
||||
)
|
||||
if err == pgx.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying DAG node by ID: %w", err)
|
||||
}
|
||||
if metadataJSON != nil {
|
||||
if err := json.Unmarshal(metadataJSON, &n.Metadata); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling node metadata: %w", err)
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// UpsertNode inserts or updates a single DAG node.
|
||||
func (r *DAGRepository) UpsertNode(ctx context.Context, n *DAGNode) error {
|
||||
metadataJSON, err := json.Marshal(n.Metadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling metadata: %w", err)
|
||||
}
|
||||
|
||||
err = r.db.pool.QueryRow(ctx, `
|
||||
INSERT INTO dag_nodes (item_id, revision_number, node_key, node_type,
|
||||
properties_hash, validation_state, validation_msg, metadata)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
||||
ON CONFLICT (item_id, revision_number, node_key)
|
||||
DO UPDATE SET
|
||||
node_type = EXCLUDED.node_type,
|
||||
properties_hash = EXCLUDED.properties_hash,
|
||||
validation_state = EXCLUDED.validation_state,
|
||||
validation_msg = EXCLUDED.validation_msg,
|
||||
metadata = EXCLUDED.metadata,
|
||||
updated_at = now()
|
||||
RETURNING id, created_at, updated_at
|
||||
`, n.ItemID, n.RevisionNumber, n.NodeKey, n.NodeType,
|
||||
n.PropertiesHash, n.ValidationState, n.ValidationMsg, metadataJSON,
|
||||
).Scan(&n.ID, &n.CreatedAt, &n.UpdatedAt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("upserting DAG node: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetEdges returns all edges for nodes belonging to an item at a specific revision.
|
||||
func (r *DAGRepository) GetEdges(ctx context.Context, itemID string, revisionNumber int) ([]*DAGEdge, error) {
|
||||
rows, err := r.db.pool.Query(ctx, `
|
||||
SELECT e.id, e.source_node_id, e.target_node_id, e.edge_type, e.metadata
|
||||
FROM dag_edges e
|
||||
JOIN dag_nodes src ON src.id = e.source_node_id
|
||||
WHERE src.item_id = $1 AND src.revision_number = $2
|
||||
ORDER BY e.source_node_id, e.target_node_id
|
||||
`, itemID, revisionNumber)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying DAG edges: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var edges []*DAGEdge
|
||||
for rows.Next() {
|
||||
e := &DAGEdge{}
|
||||
var metadataJSON []byte
|
||||
if err := rows.Scan(&e.ID, &e.SourceNodeID, &e.TargetNodeID, &e.EdgeType, &metadataJSON); err != nil {
|
||||
return nil, fmt.Errorf("scanning DAG edge: %w", err)
|
||||
}
|
||||
if metadataJSON != nil {
|
||||
if err := json.Unmarshal(metadataJSON, &e.Metadata); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling edge metadata: %w", err)
|
||||
}
|
||||
}
|
||||
edges = append(edges, e)
|
||||
}
|
||||
return edges, rows.Err()
|
||||
}
|
||||
|
||||
// CreateEdge inserts a new edge between two nodes.
|
||||
func (r *DAGRepository) CreateEdge(ctx context.Context, e *DAGEdge) error {
|
||||
if e.EdgeType == "" {
|
||||
e.EdgeType = "depends_on"
|
||||
}
|
||||
metadataJSON, err := json.Marshal(e.Metadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling edge metadata: %w", err)
|
||||
}
|
||||
|
||||
err = r.db.pool.QueryRow(ctx, `
|
||||
INSERT INTO dag_edges (source_node_id, target_node_id, edge_type, metadata)
|
||||
VALUES ($1, $2, $3, $4)
|
||||
ON CONFLICT (source_node_id, target_node_id, edge_type) DO NOTHING
|
||||
RETURNING id
|
||||
`, e.SourceNodeID, e.TargetNodeID, e.EdgeType, metadataJSON).Scan(&e.ID)
|
||||
if err == pgx.ErrNoRows {
|
||||
// Edge already exists, not an error
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating DAG edge: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteEdgesForItem removes all edges for nodes belonging to an item/revision.
|
||||
func (r *DAGRepository) DeleteEdgesForItem(ctx context.Context, itemID string, revisionNumber int) error {
|
||||
_, err := r.db.pool.Exec(ctx, `
|
||||
DELETE FROM dag_edges
|
||||
WHERE source_node_id IN (
|
||||
SELECT id FROM dag_nodes WHERE item_id = $1 AND revision_number = $2
|
||||
)
|
||||
`, itemID, revisionNumber)
|
||||
if err != nil {
|
||||
return fmt.Errorf("deleting edges for item: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetForwardCone returns all downstream dependent nodes reachable from the
|
||||
// given node via edges. This is the key query for interference detection.
|
||||
func (r *DAGRepository) GetForwardCone(ctx context.Context, nodeID string) ([]*DAGNode, error) {
|
||||
rows, err := r.db.pool.Query(ctx, `
|
||||
WITH RECURSIVE forward_cone AS (
|
||||
SELECT target_node_id AS node_id
|
||||
FROM dag_edges
|
||||
WHERE source_node_id = $1
|
||||
UNION
|
||||
SELECT e.target_node_id
|
||||
FROM dag_edges e
|
||||
JOIN forward_cone fc ON fc.node_id = e.source_node_id
|
||||
)
|
||||
SELECT n.id, n.item_id, n.revision_number, n.node_key, n.node_type,
|
||||
n.properties_hash, n.validation_state, n.validation_msg,
|
||||
n.metadata, n.created_at, n.updated_at
|
||||
FROM dag_nodes n
|
||||
JOIN forward_cone fc ON n.id = fc.node_id
|
||||
ORDER BY n.node_key
|
||||
`, nodeID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying forward cone: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
return scanDAGNodes(rows)
|
||||
}
|
||||
|
||||
// GetBackwardCone returns all upstream dependency nodes that the given
|
||||
// node depends on.
|
||||
func (r *DAGRepository) GetBackwardCone(ctx context.Context, nodeID string) ([]*DAGNode, error) {
|
||||
rows, err := r.db.pool.Query(ctx, `
|
||||
WITH RECURSIVE backward_cone AS (
|
||||
SELECT source_node_id AS node_id
|
||||
FROM dag_edges
|
||||
WHERE target_node_id = $1
|
||||
UNION
|
||||
SELECT e.source_node_id
|
||||
FROM dag_edges e
|
||||
JOIN backward_cone bc ON bc.node_id = e.target_node_id
|
||||
)
|
||||
SELECT n.id, n.item_id, n.revision_number, n.node_key, n.node_type,
|
||||
n.properties_hash, n.validation_state, n.validation_msg,
|
||||
n.metadata, n.created_at, n.updated_at
|
||||
FROM dag_nodes n
|
||||
JOIN backward_cone bc ON n.id = bc.node_id
|
||||
ORDER BY n.node_key
|
||||
`, nodeID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying backward cone: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
return scanDAGNodes(rows)
|
||||
}
|
||||
|
||||
// GetDirtySubgraph returns all non-clean nodes for an item.
|
||||
func (r *DAGRepository) GetDirtySubgraph(ctx context.Context, itemID string) ([]*DAGNode, error) {
|
||||
rows, err := r.db.pool.Query(ctx, `
|
||||
SELECT id, item_id, revision_number, node_key, node_type,
|
||||
properties_hash, validation_state, validation_msg,
|
||||
metadata, created_at, updated_at
|
||||
FROM dag_nodes
|
||||
WHERE item_id = $1 AND validation_state != 'clean'
|
||||
ORDER BY node_key
|
||||
`, itemID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying dirty subgraph: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
return scanDAGNodes(rows)
|
||||
}
|
||||
|
||||
// MarkDirty marks a node and all its downstream dependents as dirty.
|
||||
func (r *DAGRepository) MarkDirty(ctx context.Context, nodeID string) (int64, error) {
|
||||
result, err := r.db.pool.Exec(ctx, `
|
||||
WITH RECURSIVE forward_cone AS (
|
||||
SELECT $1::uuid AS node_id
|
||||
UNION
|
||||
SELECT e.target_node_id
|
||||
FROM dag_edges e
|
||||
JOIN forward_cone fc ON fc.node_id = e.source_node_id
|
||||
)
|
||||
UPDATE dag_nodes SET validation_state = 'dirty', updated_at = now()
|
||||
WHERE id IN (SELECT node_id FROM forward_cone)
|
||||
AND validation_state = 'clean'
|
||||
`, nodeID)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("marking dirty: %w", err)
|
||||
}
|
||||
return result.RowsAffected(), nil
|
||||
}
|
||||
|
||||
// MarkValidating sets a node's state to 'validating'.
|
||||
func (r *DAGRepository) MarkValidating(ctx context.Context, nodeID string) error {
|
||||
_, err := r.db.pool.Exec(ctx, `
|
||||
UPDATE dag_nodes SET validation_state = 'validating', updated_at = now()
|
||||
WHERE id = $1
|
||||
`, nodeID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marking validating: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkClean sets a node's state to 'clean' and updates its properties hash.
|
||||
func (r *DAGRepository) MarkClean(ctx context.Context, nodeID string, propertiesHash string) error {
|
||||
_, err := r.db.pool.Exec(ctx, `
|
||||
UPDATE dag_nodes
|
||||
SET validation_state = 'clean',
|
||||
properties_hash = $2,
|
||||
validation_msg = NULL,
|
||||
updated_at = now()
|
||||
WHERE id = $1
|
||||
`, nodeID, propertiesHash)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marking clean: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkFailed sets a node's state to 'failed' with an error message.
|
||||
func (r *DAGRepository) MarkFailed(ctx context.Context, nodeID string, message string) error {
|
||||
_, err := r.db.pool.Exec(ctx, `
|
||||
UPDATE dag_nodes
|
||||
SET validation_state = 'failed',
|
||||
validation_msg = $2,
|
||||
updated_at = now()
|
||||
WHERE id = $1
|
||||
`, nodeID, message)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marking failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasCycle checks whether adding an edge from sourceID to targetID would
|
||||
// create a cycle. It walks upward from sourceID to see if targetID is
|
||||
// already an ancestor.
|
||||
func (r *DAGRepository) HasCycle(ctx context.Context, sourceID, targetID string) (bool, error) {
|
||||
if sourceID == targetID {
|
||||
return true, nil
|
||||
}
|
||||
var hasCycle bool
|
||||
err := r.db.pool.QueryRow(ctx, `
|
||||
WITH RECURSIVE ancestors AS (
|
||||
SELECT source_node_id AS node_id
|
||||
FROM dag_edges
|
||||
WHERE target_node_id = $1
|
||||
UNION
|
||||
SELECT e.source_node_id
|
||||
FROM dag_edges e
|
||||
JOIN ancestors a ON a.node_id = e.target_node_id
|
||||
)
|
||||
SELECT EXISTS (
|
||||
SELECT 1 FROM ancestors WHERE node_id = $2
|
||||
)
|
||||
`, sourceID, targetID).Scan(&hasCycle)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("checking for cycle: %w", err)
|
||||
}
|
||||
return hasCycle, nil
|
||||
}
|
||||
|
||||
// SyncFeatureTree replaces the entire feature DAG for an item/revision
|
||||
// within a single transaction. It upserts nodes, replaces edges, and
|
||||
// marks changed nodes dirty.
|
||||
func (r *DAGRepository) SyncFeatureTree(ctx context.Context, itemID string, revisionNumber int, nodes []DAGNode, edges []DAGEdge) error {
|
||||
tx, err := r.db.pool.Begin(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("beginning transaction: %w", err)
|
||||
}
|
||||
defer tx.Rollback(ctx)
|
||||
|
||||
// Upsert all nodes
|
||||
for i := range nodes {
|
||||
n := &nodes[i]
|
||||
n.ItemID = itemID
|
||||
n.RevisionNumber = revisionNumber
|
||||
if n.ValidationState == "" {
|
||||
n.ValidationState = "clean"
|
||||
}
|
||||
|
||||
metadataJSON, err := json.Marshal(n.Metadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling node metadata: %w", err)
|
||||
}
|
||||
|
||||
err = tx.QueryRow(ctx, `
|
||||
INSERT INTO dag_nodes (item_id, revision_number, node_key, node_type,
|
||||
properties_hash, validation_state, validation_msg, metadata)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
||||
ON CONFLICT (item_id, revision_number, node_key)
|
||||
DO UPDATE SET
|
||||
node_type = EXCLUDED.node_type,
|
||||
properties_hash = EXCLUDED.properties_hash,
|
||||
metadata = EXCLUDED.metadata,
|
||||
updated_at = now()
|
||||
RETURNING id, created_at, updated_at
|
||||
`, n.ItemID, n.RevisionNumber, n.NodeKey, n.NodeType,
|
||||
n.PropertiesHash, n.ValidationState, n.ValidationMsg, metadataJSON,
|
||||
).Scan(&n.ID, &n.CreatedAt, &n.UpdatedAt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("upserting node %s: %w", n.NodeKey, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Build key→ID map for edge resolution
|
||||
keyToID := make(map[string]string, len(nodes))
|
||||
for _, n := range nodes {
|
||||
keyToID[n.NodeKey] = n.ID
|
||||
}
|
||||
|
||||
// Delete existing edges for this item/revision
|
||||
_, err = tx.Exec(ctx, `
|
||||
DELETE FROM dag_edges
|
||||
WHERE source_node_id IN (
|
||||
SELECT id FROM dag_nodes WHERE item_id = $1 AND revision_number = $2
|
||||
)
|
||||
`, itemID, revisionNumber)
|
||||
if err != nil {
|
||||
return fmt.Errorf("deleting old edges: %w", err)
|
||||
}
|
||||
|
||||
// Insert new edges
|
||||
for i := range edges {
|
||||
e := &edges[i]
|
||||
if e.EdgeType == "" {
|
||||
e.EdgeType = "depends_on"
|
||||
}
|
||||
|
||||
// Resolve source/target from node keys if IDs are not set
|
||||
sourceID := e.SourceNodeID
|
||||
targetID := e.TargetNodeID
|
||||
if sourceID == "" {
|
||||
return fmt.Errorf("edge %d: source_node_id is required", i)
|
||||
}
|
||||
if targetID == "" {
|
||||
return fmt.Errorf("edge %d: target_node_id is required", i)
|
||||
}
|
||||
|
||||
metadataJSON, err := json.Marshal(e.Metadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling edge metadata: %w", err)
|
||||
}
|
||||
|
||||
err = tx.QueryRow(ctx, `
|
||||
INSERT INTO dag_edges (source_node_id, target_node_id, edge_type, metadata)
|
||||
VALUES ($1, $2, $3, $4)
|
||||
RETURNING id
|
||||
`, sourceID, targetID, e.EdgeType, metadataJSON).Scan(&e.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating edge: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return tx.Commit(ctx)
|
||||
}
|
||||
|
||||
// DeleteNodesForItem removes all DAG nodes (and cascades to edges) for an item/revision.
|
||||
func (r *DAGRepository) DeleteNodesForItem(ctx context.Context, itemID string, revisionNumber int) error {
|
||||
_, err := r.db.pool.Exec(ctx, `
|
||||
DELETE FROM dag_nodes WHERE item_id = $1 AND revision_number = $2
|
||||
`, itemID, revisionNumber)
|
||||
if err != nil {
|
||||
return fmt.Errorf("deleting nodes for item: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func scanDAGNodes(rows pgx.Rows) ([]*DAGNode, error) {
|
||||
var nodes []*DAGNode
|
||||
for rows.Next() {
|
||||
n := &DAGNode{}
|
||||
var metadataJSON []byte
|
||||
err := rows.Scan(
|
||||
&n.ID, &n.ItemID, &n.RevisionNumber, &n.NodeKey, &n.NodeType,
|
||||
&n.PropertiesHash, &n.ValidationState, &n.ValidationMsg,
|
||||
&metadataJSON, &n.CreatedAt, &n.UpdatedAt,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("scanning DAG node: %w", err)
|
||||
}
|
||||
if metadataJSON != nil {
|
||||
if err := json.Unmarshal(metadataJSON, &n.Metadata); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling node metadata: %w", err)
|
||||
}
|
||||
}
|
||||
nodes = append(nodes, n)
|
||||
}
|
||||
return nodes, rows.Err()
|
||||
}
|
||||
121
internal/db/item_files_test.go
Normal file
121
internal/db/item_files_test.go
Normal file
@@ -0,0 +1,121 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestItemFileCreate(t *testing.T) {
|
||||
database := mustConnectTestDB(t)
|
||||
itemRepo := NewItemRepository(database)
|
||||
fileRepo := NewItemFileRepository(database)
|
||||
ctx := context.Background()
|
||||
|
||||
item := &Item{PartNumber: "FILE-001", ItemType: "part", Description: "file test"}
|
||||
if err := itemRepo.Create(ctx, item, nil); err != nil {
|
||||
t.Fatalf("Create item: %v", err)
|
||||
}
|
||||
|
||||
f := &ItemFile{
|
||||
ItemID: item.ID,
|
||||
Filename: "drawing.pdf",
|
||||
ContentType: "application/pdf",
|
||||
Size: 12345,
|
||||
ObjectKey: "items/FILE-001/files/abc/drawing.pdf",
|
||||
}
|
||||
if err := fileRepo.Create(ctx, f); err != nil {
|
||||
t.Fatalf("Create file: %v", err)
|
||||
}
|
||||
if f.ID == "" {
|
||||
t.Error("expected file ID to be set")
|
||||
}
|
||||
if f.CreatedAt.IsZero() {
|
||||
t.Error("expected created_at to be set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestItemFileListByItem(t *testing.T) {
|
||||
database := mustConnectTestDB(t)
|
||||
itemRepo := NewItemRepository(database)
|
||||
fileRepo := NewItemFileRepository(database)
|
||||
ctx := context.Background()
|
||||
|
||||
item := &Item{PartNumber: "FLIST-001", ItemType: "part", Description: "file list test"}
|
||||
itemRepo.Create(ctx, item, nil)
|
||||
|
||||
for i, name := range []string{"a.pdf", "b.step"} {
|
||||
fileRepo.Create(ctx, &ItemFile{
|
||||
ItemID: item.ID,
|
||||
Filename: name,
|
||||
ContentType: "application/octet-stream",
|
||||
Size: int64(i * 1000),
|
||||
ObjectKey: "items/FLIST-001/files/" + name,
|
||||
})
|
||||
}
|
||||
|
||||
files, err := fileRepo.ListByItem(ctx, item.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("ListByItem: %v", err)
|
||||
}
|
||||
if len(files) != 2 {
|
||||
t.Errorf("expected 2 files, got %d", len(files))
|
||||
}
|
||||
}
|
||||
|
||||
func TestItemFileGet(t *testing.T) {
|
||||
database := mustConnectTestDB(t)
|
||||
itemRepo := NewItemRepository(database)
|
||||
fileRepo := NewItemFileRepository(database)
|
||||
ctx := context.Background()
|
||||
|
||||
item := &Item{PartNumber: "FGET-001", ItemType: "part", Description: "file get test"}
|
||||
itemRepo.Create(ctx, item, nil)
|
||||
|
||||
f := &ItemFile{
|
||||
ItemID: item.ID,
|
||||
Filename: "model.FCStd",
|
||||
ContentType: "application/x-freecad",
|
||||
Size: 99999,
|
||||
ObjectKey: "items/FGET-001/files/xyz/model.FCStd",
|
||||
}
|
||||
fileRepo.Create(ctx, f)
|
||||
|
||||
got, err := fileRepo.Get(ctx, f.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("Get: %v", err)
|
||||
}
|
||||
if got.Filename != "model.FCStd" {
|
||||
t.Errorf("filename: got %q, want %q", got.Filename, "model.FCStd")
|
||||
}
|
||||
if got.Size != 99999 {
|
||||
t.Errorf("size: got %d, want %d", got.Size, 99999)
|
||||
}
|
||||
}
|
||||
|
||||
func TestItemFileDelete(t *testing.T) {
|
||||
database := mustConnectTestDB(t)
|
||||
itemRepo := NewItemRepository(database)
|
||||
fileRepo := NewItemFileRepository(database)
|
||||
ctx := context.Background()
|
||||
|
||||
item := &Item{PartNumber: "FDEL-001", ItemType: "part", Description: "file delete test"}
|
||||
itemRepo.Create(ctx, item, nil)
|
||||
|
||||
f := &ItemFile{
|
||||
ItemID: item.ID,
|
||||
Filename: "temp.bin",
|
||||
ContentType: "application/octet-stream",
|
||||
Size: 100,
|
||||
ObjectKey: "items/FDEL-001/files/tmp/temp.bin",
|
||||
}
|
||||
fileRepo.Create(ctx, f)
|
||||
|
||||
if err := fileRepo.Delete(ctx, f.ID); err != nil {
|
||||
t.Fatalf("Delete: %v", err)
|
||||
}
|
||||
|
||||
_, err := fileRepo.Get(ctx, f.ID)
|
||||
if err == nil {
|
||||
t.Error("expected error after delete, got nil")
|
||||
}
|
||||
}
|
||||
281
internal/db/items_edge_test.go
Normal file
281
internal/db/items_edge_test.go
Normal file
@@ -0,0 +1,281 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestItemCreateDuplicatePartNumber(t *testing.T) {
|
||||
database := mustConnectTestDB(t)
|
||||
repo := NewItemRepository(database)
|
||||
ctx := context.Background()
|
||||
|
||||
item := &Item{PartNumber: "DUP-001", ItemType: "part", Description: "first"}
|
||||
if err := repo.Create(ctx, item, nil); err != nil {
|
||||
t.Fatalf("Create: %v", err)
|
||||
}
|
||||
|
||||
dup := &Item{PartNumber: "DUP-001", ItemType: "part", Description: "duplicate"}
|
||||
err := repo.Create(ctx, dup, nil)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for duplicate part number, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "23505") && !strings.Contains(err.Error(), "duplicate") {
|
||||
t.Errorf("expected duplicate key error, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestItemDelete(t *testing.T) {
|
||||
database := mustConnectTestDB(t)
|
||||
repo := NewItemRepository(database)
|
||||
ctx := context.Background()
|
||||
|
||||
item := &Item{PartNumber: "HDEL-001", ItemType: "part", Description: "hard delete"}
|
||||
if err := repo.Create(ctx, item, nil); err != nil {
|
||||
t.Fatalf("Create: %v", err)
|
||||
}
|
||||
|
||||
if err := repo.Delete(ctx, item.ID); err != nil {
|
||||
t.Fatalf("Delete: %v", err)
|
||||
}
|
||||
|
||||
got, err := repo.GetByID(ctx, item.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("GetByID after delete: %v", err)
|
||||
}
|
||||
if got != nil {
|
||||
t.Error("expected nil after hard delete")
|
||||
}
|
||||
}
|
||||
|
||||
func TestItemListPagination(t *testing.T) {
|
||||
database := mustConnectTestDB(t)
|
||||
repo := NewItemRepository(database)
|
||||
ctx := context.Background()
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
item := &Item{
|
||||
PartNumber: fmt.Sprintf("PAGE-%04d", i),
|
||||
ItemType: "part",
|
||||
Description: fmt.Sprintf("page item %d", i),
|
||||
}
|
||||
if err := repo.Create(ctx, item, nil); err != nil {
|
||||
t.Fatalf("Create #%d: %v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch page of 2 with offset 2
|
||||
items, err := repo.List(ctx, ListOptions{Limit: 2, Offset: 2})
|
||||
if err != nil {
|
||||
t.Fatalf("List: %v", err)
|
||||
}
|
||||
if len(items) != 2 {
|
||||
t.Errorf("expected 2 items, got %d", len(items))
|
||||
}
|
||||
}
|
||||
|
||||
func TestItemListSearch(t *testing.T) {
|
||||
database := mustConnectTestDB(t)
|
||||
repo := NewItemRepository(database)
|
||||
ctx := context.Background()
|
||||
|
||||
repo.Create(ctx, &Item{PartNumber: "SRCH-001", ItemType: "part", Description: "alpha widget"}, nil)
|
||||
repo.Create(ctx, &Item{PartNumber: "SRCH-002", ItemType: "part", Description: "beta gadget"}, nil)
|
||||
repo.Create(ctx, &Item{PartNumber: "SRCH-003", ItemType: "part", Description: "alpha gizmo"}, nil)
|
||||
|
||||
items, err := repo.List(ctx, ListOptions{Search: "alpha"})
|
||||
if err != nil {
|
||||
t.Fatalf("List: %v", err)
|
||||
}
|
||||
if len(items) != 2 {
|
||||
t.Errorf("expected 2 items matching 'alpha', got %d", len(items))
|
||||
}
|
||||
}
|
||||
|
||||
func TestRevisionStatusUpdate(t *testing.T) {
|
||||
database := mustConnectTestDB(t)
|
||||
repo := NewItemRepository(database)
|
||||
ctx := context.Background()
|
||||
|
||||
item := &Item{PartNumber: "STAT-001", ItemType: "part", Description: "status test"}
|
||||
if err := repo.Create(ctx, item, map[string]any{"v": 1}); err != nil {
|
||||
t.Fatalf("Create: %v", err)
|
||||
}
|
||||
|
||||
status := "released"
|
||||
if err := repo.UpdateRevisionStatus(ctx, item.ID, 1, &status, nil); err != nil {
|
||||
t.Fatalf("UpdateRevisionStatus: %v", err)
|
||||
}
|
||||
|
||||
rev, err := repo.GetRevision(ctx, item.ID, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("GetRevision: %v", err)
|
||||
}
|
||||
if rev.Status != "released" {
|
||||
t.Errorf("status: got %q, want %q", rev.Status, "released")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRevisionLabelsUpdate(t *testing.T) {
|
||||
database := mustConnectTestDB(t)
|
||||
repo := NewItemRepository(database)
|
||||
ctx := context.Background()
|
||||
|
||||
item := &Item{PartNumber: "LBL-001", ItemType: "part", Description: "label test"}
|
||||
if err := repo.Create(ctx, item, nil); err != nil {
|
||||
t.Fatalf("Create: %v", err)
|
||||
}
|
||||
|
||||
labels := []string{"prototype", "urgent"}
|
||||
if err := repo.UpdateRevisionStatus(ctx, item.ID, 1, nil, labels); err != nil {
|
||||
t.Fatalf("UpdateRevisionStatus: %v", err)
|
||||
}
|
||||
|
||||
rev, err := repo.GetRevision(ctx, item.ID, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("GetRevision: %v", err)
|
||||
}
|
||||
if len(rev.Labels) != 2 {
|
||||
t.Errorf("labels count: got %d, want 2", len(rev.Labels))
|
||||
}
|
||||
}
|
||||
|
||||
func TestRevisionCompare(t *testing.T) {
|
||||
database := mustConnectTestDB(t)
|
||||
repo := NewItemRepository(database)
|
||||
ctx := context.Background()
|
||||
|
||||
item := &Item{PartNumber: "CMP-001", ItemType: "part", Description: "compare test"}
|
||||
if err := repo.Create(ctx, item, map[string]any{"color": "red", "weight": 10}); err != nil {
|
||||
t.Fatalf("Create: %v", err)
|
||||
}
|
||||
|
||||
// Rev 2: change color, remove weight, add size
|
||||
repo.CreateRevision(ctx, &Revision{
|
||||
ItemID: item.ID,
|
||||
Properties: map[string]any{"color": "blue", "size": "large"},
|
||||
})
|
||||
|
||||
diff, err := repo.CompareRevisions(ctx, item.ID, 1, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("CompareRevisions: %v", err)
|
||||
}
|
||||
|
||||
if len(diff.Added) == 0 {
|
||||
t.Error("expected added fields (size)")
|
||||
}
|
||||
if len(diff.Removed) == 0 {
|
||||
t.Error("expected removed fields (weight)")
|
||||
}
|
||||
if len(diff.Changed) == 0 {
|
||||
t.Error("expected changed fields (color)")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRevisionRollback(t *testing.T) {
|
||||
database := mustConnectTestDB(t)
|
||||
repo := NewItemRepository(database)
|
||||
ctx := context.Background()
|
||||
|
||||
item := &Item{PartNumber: "RBK-001", ItemType: "part", Description: "rollback test"}
|
||||
if err := repo.Create(ctx, item, map[string]any{"version": "original"}); err != nil {
|
||||
t.Fatalf("Create: %v", err)
|
||||
}
|
||||
|
||||
// Rev 2: change property
|
||||
repo.CreateRevision(ctx, &Revision{
|
||||
ItemID: item.ID,
|
||||
Properties: map[string]any{"version": "modified"},
|
||||
})
|
||||
|
||||
// Rollback to rev 1 — should create rev 3
|
||||
comment := "rollback to rev 1"
|
||||
rev3, err := repo.CreateRevisionFromExisting(ctx, item.ID, 1, comment, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("CreateRevisionFromExisting: %v", err)
|
||||
}
|
||||
if rev3.RevisionNumber != 3 {
|
||||
t.Errorf("revision number: got %d, want 3", rev3.RevisionNumber)
|
||||
}
|
||||
|
||||
// Rev 3 should have rev 1's properties
|
||||
got, err := repo.GetRevision(ctx, item.ID, 3)
|
||||
if err != nil {
|
||||
t.Fatalf("GetRevision: %v", err)
|
||||
}
|
||||
if got.Properties["version"] != "original" {
|
||||
t.Errorf("rolled back version: got %v, want %q", got.Properties["version"], "original")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProjectItemAssociationsByCode(t *testing.T) {
|
||||
database := mustConnectTestDB(t)
|
||||
projRepo := NewProjectRepository(database)
|
||||
itemRepo := NewItemRepository(database)
|
||||
ctx := context.Background()
|
||||
|
||||
proj := &Project{Code: "BYTAG", Name: "Tag Project"}
|
||||
projRepo.Create(ctx, proj)
|
||||
|
||||
item := &Item{PartNumber: "TAG-001", ItemType: "part", Description: "taggable"}
|
||||
itemRepo.Create(ctx, item, nil)
|
||||
|
||||
// Tag by code
|
||||
if err := projRepo.AddItemToProjectByCode(ctx, item.ID, "BYTAG"); err != nil {
|
||||
t.Fatalf("AddItemToProjectByCode: %v", err)
|
||||
}
|
||||
|
||||
projects, err := projRepo.GetProjectsForItem(ctx, item.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("GetProjectsForItem: %v", err)
|
||||
}
|
||||
if len(projects) != 1 {
|
||||
t.Fatalf("expected 1 project, got %d", len(projects))
|
||||
}
|
||||
if projects[0].Code != "BYTAG" {
|
||||
t.Errorf("project code: got %q, want %q", projects[0].Code, "BYTAG")
|
||||
}
|
||||
|
||||
// Untag by code
|
||||
if err := projRepo.RemoveItemFromProjectByCode(ctx, item.ID, "BYTAG"); err != nil {
|
||||
t.Fatalf("RemoveItemFromProjectByCode: %v", err)
|
||||
}
|
||||
|
||||
projects, _ = projRepo.GetProjectsForItem(ctx, item.ID)
|
||||
if len(projects) != 0 {
|
||||
t.Errorf("expected 0 projects after removal, got %d", len(projects))
|
||||
}
|
||||
}
|
||||
|
||||
func TestListByProject(t *testing.T) {
|
||||
database := mustConnectTestDB(t)
|
||||
projRepo := NewProjectRepository(database)
|
||||
itemRepo := NewItemRepository(database)
|
||||
ctx := context.Background()
|
||||
|
||||
proj := &Project{Code: "FILT", Name: "Filter Project"}
|
||||
projRepo.Create(ctx, proj)
|
||||
|
||||
// Create 3 items, tag only 2
|
||||
for i := 0; i < 3; i++ {
|
||||
item := &Item{
|
||||
PartNumber: fmt.Sprintf("FILT-%04d", i),
|
||||
ItemType: "part",
|
||||
Description: fmt.Sprintf("filter item %d", i),
|
||||
}
|
||||
itemRepo.Create(ctx, item, nil)
|
||||
if i < 2 {
|
||||
projRepo.AddItemToProjectByCode(ctx, item.ID, "FILT")
|
||||
}
|
||||
}
|
||||
|
||||
items, err := itemRepo.List(ctx, ListOptions{Project: "FILT"})
|
||||
if err != nil {
|
||||
t.Fatalf("List with project filter: %v", err)
|
||||
}
|
||||
if len(items) != 2 {
|
||||
t.Errorf("expected 2 items in project FILT, got %d", len(items))
|
||||
}
|
||||
}
|
||||
759
internal/db/jobs.go
Normal file
759
internal/db/jobs.go
Normal file
@@ -0,0 +1,759 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
)
|
||||
|
||||
// Runner represents a registered compute worker.
|
||||
type Runner struct {
|
||||
ID string
|
||||
Name string
|
||||
TokenHash string
|
||||
TokenPrefix string
|
||||
Tags []string
|
||||
Status string
|
||||
LastHeartbeat *time.Time
|
||||
LastJobID *string
|
||||
Metadata map[string]any
|
||||
CreatedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
}
|
||||
|
||||
// JobDefinitionRecord is a job definition stored in the database.
|
||||
type JobDefinitionRecord struct {
|
||||
ID string
|
||||
Name string
|
||||
Version int
|
||||
TriggerType string
|
||||
ScopeType string
|
||||
ComputeType string
|
||||
RunnerTags []string
|
||||
TimeoutSeconds int
|
||||
MaxRetries int
|
||||
Priority int
|
||||
Definition map[string]any
|
||||
Enabled bool
|
||||
CreatedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
}
|
||||
|
||||
// Job represents a single compute job instance.
|
||||
type Job struct {
|
||||
ID string
|
||||
JobDefinitionID *string
|
||||
DefinitionName string
|
||||
Status string
|
||||
Priority int
|
||||
ItemID *string
|
||||
ProjectID *string
|
||||
ScopeMetadata map[string]any
|
||||
RunnerID *string
|
||||
RunnerTags []string
|
||||
CreatedAt time.Time
|
||||
ClaimedAt *time.Time
|
||||
StartedAt *time.Time
|
||||
CompletedAt *time.Time
|
||||
TimeoutSeconds int
|
||||
ExpiresAt *time.Time
|
||||
Progress int
|
||||
ProgressMessage *string
|
||||
Result map[string]any
|
||||
ErrorMessage *string
|
||||
RetryCount int
|
||||
MaxRetries int
|
||||
CreatedBy *string
|
||||
CancelledBy *string
|
||||
}
|
||||
|
||||
// JobLogEntry is a single log line for a job.
|
||||
type JobLogEntry struct {
|
||||
ID string
|
||||
JobID string
|
||||
Timestamp time.Time
|
||||
Level string
|
||||
Message string
|
||||
Metadata map[string]any
|
||||
}
|
||||
|
||||
// JobRepository provides job and runner database operations.
|
||||
type JobRepository struct {
|
||||
db *DB
|
||||
}
|
||||
|
||||
// NewJobRepository creates a new job repository.
|
||||
func NewJobRepository(db *DB) *JobRepository {
|
||||
return &JobRepository{db: db}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Job Definitions
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// UpsertDefinition inserts or updates a job definition record.
|
||||
func (r *JobRepository) UpsertDefinition(ctx context.Context, d *JobDefinitionRecord) error {
|
||||
defJSON, err := json.Marshal(d.Definition)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling definition: %w", err)
|
||||
}
|
||||
|
||||
err = r.db.pool.QueryRow(ctx, `
|
||||
INSERT INTO job_definitions (name, version, trigger_type, scope_type, compute_type,
|
||||
runner_tags, timeout_seconds, max_retries, priority,
|
||||
definition, enabled)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)
|
||||
ON CONFLICT (name) DO UPDATE SET
|
||||
version = EXCLUDED.version,
|
||||
trigger_type = EXCLUDED.trigger_type,
|
||||
scope_type = EXCLUDED.scope_type,
|
||||
compute_type = EXCLUDED.compute_type,
|
||||
runner_tags = EXCLUDED.runner_tags,
|
||||
timeout_seconds = EXCLUDED.timeout_seconds,
|
||||
max_retries = EXCLUDED.max_retries,
|
||||
priority = EXCLUDED.priority,
|
||||
definition = EXCLUDED.definition,
|
||||
enabled = EXCLUDED.enabled,
|
||||
updated_at = now()
|
||||
RETURNING id, created_at, updated_at
|
||||
`, d.Name, d.Version, d.TriggerType, d.ScopeType, d.ComputeType,
|
||||
d.RunnerTags, d.TimeoutSeconds, d.MaxRetries, d.Priority,
|
||||
defJSON, d.Enabled,
|
||||
).Scan(&d.ID, &d.CreatedAt, &d.UpdatedAt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("upserting job definition: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetDefinition returns a job definition by name.
|
||||
func (r *JobRepository) GetDefinition(ctx context.Context, name string) (*JobDefinitionRecord, error) {
|
||||
d := &JobDefinitionRecord{}
|
||||
var defJSON []byte
|
||||
err := r.db.pool.QueryRow(ctx, `
|
||||
SELECT id, name, version, trigger_type, scope_type, compute_type,
|
||||
runner_tags, timeout_seconds, max_retries, priority,
|
||||
definition, enabled, created_at, updated_at
|
||||
FROM job_definitions WHERE name = $1
|
||||
`, name).Scan(
|
||||
&d.ID, &d.Name, &d.Version, &d.TriggerType, &d.ScopeType, &d.ComputeType,
|
||||
&d.RunnerTags, &d.TimeoutSeconds, &d.MaxRetries, &d.Priority,
|
||||
&defJSON, &d.Enabled, &d.CreatedAt, &d.UpdatedAt,
|
||||
)
|
||||
if err == pgx.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying job definition: %w", err)
|
||||
}
|
||||
if defJSON != nil {
|
||||
if err := json.Unmarshal(defJSON, &d.Definition); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling definition: %w", err)
|
||||
}
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// ListDefinitions returns all job definitions.
|
||||
func (r *JobRepository) ListDefinitions(ctx context.Context) ([]*JobDefinitionRecord, error) {
|
||||
rows, err := r.db.pool.Query(ctx, `
|
||||
SELECT id, name, version, trigger_type, scope_type, compute_type,
|
||||
runner_tags, timeout_seconds, max_retries, priority,
|
||||
definition, enabled, created_at, updated_at
|
||||
FROM job_definitions ORDER BY name
|
||||
`)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying job definitions: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
return scanJobDefinitions(rows)
|
||||
}
|
||||
|
||||
// GetDefinitionsByTrigger returns all enabled definitions matching a trigger type.
|
||||
func (r *JobRepository) GetDefinitionsByTrigger(ctx context.Context, triggerType string) ([]*JobDefinitionRecord, error) {
|
||||
rows, err := r.db.pool.Query(ctx, `
|
||||
SELECT id, name, version, trigger_type, scope_type, compute_type,
|
||||
runner_tags, timeout_seconds, max_retries, priority,
|
||||
definition, enabled, created_at, updated_at
|
||||
FROM job_definitions
|
||||
WHERE trigger_type = $1 AND enabled = true
|
||||
ORDER BY priority ASC, name
|
||||
`, triggerType)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying definitions by trigger: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
return scanJobDefinitions(rows)
|
||||
}
|
||||
|
||||
// GetDefinitionByID returns a job definition by ID.
|
||||
func (r *JobRepository) GetDefinitionByID(ctx context.Context, id string) (*JobDefinitionRecord, error) {
|
||||
d := &JobDefinitionRecord{}
|
||||
var defJSON []byte
|
||||
err := r.db.pool.QueryRow(ctx, `
|
||||
SELECT id, name, version, trigger_type, scope_type, compute_type,
|
||||
runner_tags, timeout_seconds, max_retries, priority,
|
||||
definition, enabled, created_at, updated_at
|
||||
FROM job_definitions WHERE id = $1
|
||||
`, id).Scan(
|
||||
&d.ID, &d.Name, &d.Version, &d.TriggerType, &d.ScopeType, &d.ComputeType,
|
||||
&d.RunnerTags, &d.TimeoutSeconds, &d.MaxRetries, &d.Priority,
|
||||
&defJSON, &d.Enabled, &d.CreatedAt, &d.UpdatedAt,
|
||||
)
|
||||
if err == pgx.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying job definition by ID: %w", err)
|
||||
}
|
||||
if defJSON != nil {
|
||||
if err := json.Unmarshal(defJSON, &d.Definition); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling definition: %w", err)
|
||||
}
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Jobs
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// CreateJob inserts a new job.
|
||||
func (r *JobRepository) CreateJob(ctx context.Context, j *Job) error {
|
||||
scopeJSON, err := json.Marshal(j.ScopeMetadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling scope metadata: %w", err)
|
||||
}
|
||||
|
||||
err = r.db.pool.QueryRow(ctx, `
|
||||
INSERT INTO jobs (job_definition_id, definition_name, status, priority,
|
||||
item_id, project_id, scope_metadata,
|
||||
runner_tags, timeout_seconds, max_retries, created_by)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)
|
||||
RETURNING id, created_at
|
||||
`, j.JobDefinitionID, j.DefinitionName, "pending", j.Priority,
|
||||
j.ItemID, j.ProjectID, scopeJSON,
|
||||
j.RunnerTags, j.TimeoutSeconds, j.MaxRetries, j.CreatedBy,
|
||||
).Scan(&j.ID, &j.CreatedAt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating job: %w", err)
|
||||
}
|
||||
j.Status = "pending"
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetJob returns a job by ID.
|
||||
func (r *JobRepository) GetJob(ctx context.Context, jobID string) (*Job, error) {
|
||||
j := &Job{}
|
||||
var scopeJSON, resultJSON []byte
|
||||
err := r.db.pool.QueryRow(ctx, `
|
||||
SELECT id, job_definition_id, definition_name, status, priority,
|
||||
item_id, project_id, scope_metadata, runner_id, runner_tags,
|
||||
created_at, claimed_at, started_at, completed_at,
|
||||
timeout_seconds, expires_at, progress, progress_message,
|
||||
result, error_message, retry_count, max_retries,
|
||||
created_by, cancelled_by
|
||||
FROM jobs WHERE id = $1
|
||||
`, jobID).Scan(
|
||||
&j.ID, &j.JobDefinitionID, &j.DefinitionName, &j.Status, &j.Priority,
|
||||
&j.ItemID, &j.ProjectID, &scopeJSON, &j.RunnerID, &j.RunnerTags,
|
||||
&j.CreatedAt, &j.ClaimedAt, &j.StartedAt, &j.CompletedAt,
|
||||
&j.TimeoutSeconds, &j.ExpiresAt, &j.Progress, &j.ProgressMessage,
|
||||
&resultJSON, &j.ErrorMessage, &j.RetryCount, &j.MaxRetries,
|
||||
&j.CreatedBy, &j.CancelledBy,
|
||||
)
|
||||
if err == pgx.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying job: %w", err)
|
||||
}
|
||||
if scopeJSON != nil {
|
||||
if err := json.Unmarshal(scopeJSON, &j.ScopeMetadata); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling scope metadata: %w", err)
|
||||
}
|
||||
}
|
||||
if resultJSON != nil {
|
||||
if err := json.Unmarshal(resultJSON, &j.Result); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling result: %w", err)
|
||||
}
|
||||
}
|
||||
return j, nil
|
||||
}
|
||||
|
||||
// ListJobs returns jobs matching optional filters.
|
||||
func (r *JobRepository) ListJobs(ctx context.Context, status, itemID string, limit, offset int) ([]*Job, error) {
|
||||
query := `
|
||||
SELECT id, job_definition_id, definition_name, status, priority,
|
||||
item_id, project_id, scope_metadata, runner_id, runner_tags,
|
||||
created_at, claimed_at, started_at, completed_at,
|
||||
timeout_seconds, expires_at, progress, progress_message,
|
||||
result, error_message, retry_count, max_retries,
|
||||
created_by, cancelled_by
|
||||
FROM jobs WHERE 1=1`
|
||||
args := []any{}
|
||||
argN := 1
|
||||
|
||||
if status != "" {
|
||||
query += fmt.Sprintf(" AND status = $%d", argN)
|
||||
args = append(args, status)
|
||||
argN++
|
||||
}
|
||||
if itemID != "" {
|
||||
query += fmt.Sprintf(" AND item_id = $%d", argN)
|
||||
args = append(args, itemID)
|
||||
argN++
|
||||
}
|
||||
|
||||
query += " ORDER BY created_at DESC"
|
||||
|
||||
if limit > 0 {
|
||||
query += fmt.Sprintf(" LIMIT $%d", argN)
|
||||
args = append(args, limit)
|
||||
argN++
|
||||
}
|
||||
if offset > 0 {
|
||||
query += fmt.Sprintf(" OFFSET $%d", argN)
|
||||
args = append(args, offset)
|
||||
}
|
||||
|
||||
rows, err := r.db.pool.Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying jobs: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
return scanJobs(rows)
|
||||
}
|
||||
|
||||
// ClaimJob atomically claims the next available job matching the runner's tags.
|
||||
// Uses SELECT FOR UPDATE SKIP LOCKED for exactly-once delivery.
|
||||
func (r *JobRepository) ClaimJob(ctx context.Context, runnerID string, tags []string) (*Job, error) {
|
||||
j := &Job{}
|
||||
var scopeJSON, resultJSON []byte
|
||||
err := r.db.pool.QueryRow(ctx, `
|
||||
WITH claimable AS (
|
||||
SELECT id FROM jobs
|
||||
WHERE status = 'pending' AND runner_tags <@ $2::text[]
|
||||
ORDER BY priority ASC, created_at ASC
|
||||
LIMIT 1
|
||||
FOR UPDATE SKIP LOCKED
|
||||
)
|
||||
UPDATE jobs SET
|
||||
status = 'claimed',
|
||||
runner_id = $1,
|
||||
claimed_at = now(),
|
||||
expires_at = now() + (timeout_seconds || ' seconds')::interval
|
||||
FROM claimable
|
||||
WHERE jobs.id = claimable.id
|
||||
RETURNING jobs.id, jobs.job_definition_id, jobs.definition_name, jobs.status,
|
||||
jobs.priority, jobs.item_id, jobs.project_id, jobs.scope_metadata,
|
||||
jobs.runner_id, jobs.runner_tags, jobs.created_at, jobs.claimed_at,
|
||||
jobs.started_at, jobs.completed_at, jobs.timeout_seconds, jobs.expires_at,
|
||||
jobs.progress, jobs.progress_message, jobs.result, jobs.error_message,
|
||||
jobs.retry_count, jobs.max_retries, jobs.created_by, jobs.cancelled_by
|
||||
`, runnerID, tags).Scan(
|
||||
&j.ID, &j.JobDefinitionID, &j.DefinitionName, &j.Status,
|
||||
&j.Priority, &j.ItemID, &j.ProjectID, &scopeJSON,
|
||||
&j.RunnerID, &j.RunnerTags, &j.CreatedAt, &j.ClaimedAt,
|
||||
&j.StartedAt, &j.CompletedAt, &j.TimeoutSeconds, &j.ExpiresAt,
|
||||
&j.Progress, &j.ProgressMessage, &resultJSON, &j.ErrorMessage,
|
||||
&j.RetryCount, &j.MaxRetries, &j.CreatedBy, &j.CancelledBy,
|
||||
)
|
||||
if err == pgx.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("claiming job: %w", err)
|
||||
}
|
||||
if scopeJSON != nil {
|
||||
if err := json.Unmarshal(scopeJSON, &j.ScopeMetadata); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling scope metadata: %w", err)
|
||||
}
|
||||
}
|
||||
if resultJSON != nil {
|
||||
if err := json.Unmarshal(resultJSON, &j.Result); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling result: %w", err)
|
||||
}
|
||||
}
|
||||
return j, nil
|
||||
}
|
||||
|
||||
// StartJob transitions a claimed job to running.
|
||||
func (r *JobRepository) StartJob(ctx context.Context, jobID, runnerID string) error {
|
||||
result, err := r.db.pool.Exec(ctx, `
|
||||
UPDATE jobs SET status = 'running', started_at = now()
|
||||
WHERE id = $1 AND runner_id = $2 AND status = 'claimed'
|
||||
`, jobID, runnerID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("starting job: %w", err)
|
||||
}
|
||||
if result.RowsAffected() == 0 {
|
||||
return fmt.Errorf("job %s not claimable by runner %s or not in claimed state", jobID, runnerID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateProgress updates a running job's progress.
|
||||
func (r *JobRepository) UpdateProgress(ctx context.Context, jobID, runnerID string, progress int, message string) error {
|
||||
var msg *string
|
||||
if message != "" {
|
||||
msg = &message
|
||||
}
|
||||
result, err := r.db.pool.Exec(ctx, `
|
||||
UPDATE jobs SET progress = $3, progress_message = $4
|
||||
WHERE id = $1 AND runner_id = $2 AND status IN ('claimed', 'running')
|
||||
`, jobID, runnerID, progress, msg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("updating progress: %w", err)
|
||||
}
|
||||
if result.RowsAffected() == 0 {
|
||||
return fmt.Errorf("job %s not owned by runner %s or not active", jobID, runnerID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CompleteJob marks a job as completed with optional result data.
|
||||
func (r *JobRepository) CompleteJob(ctx context.Context, jobID, runnerID string, resultData map[string]any) error {
|
||||
var resultJSON []byte
|
||||
var err error
|
||||
if resultData != nil {
|
||||
resultJSON, err = json.Marshal(resultData)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling result: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
res, err := r.db.pool.Exec(ctx, `
|
||||
UPDATE jobs SET
|
||||
status = 'completed',
|
||||
progress = 100,
|
||||
result = $3,
|
||||
completed_at = now()
|
||||
WHERE id = $1 AND runner_id = $2 AND status IN ('claimed', 'running')
|
||||
`, jobID, runnerID, resultJSON)
|
||||
if err != nil {
|
||||
return fmt.Errorf("completing job: %w", err)
|
||||
}
|
||||
if res.RowsAffected() == 0 {
|
||||
return fmt.Errorf("job %s not owned by runner %s or not active", jobID, runnerID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FailJob marks a job as failed with an error message.
|
||||
func (r *JobRepository) FailJob(ctx context.Context, jobID, runnerID string, errMsg string) error {
|
||||
res, err := r.db.pool.Exec(ctx, `
|
||||
UPDATE jobs SET
|
||||
status = 'failed',
|
||||
error_message = $3,
|
||||
completed_at = now()
|
||||
WHERE id = $1 AND runner_id = $2 AND status IN ('claimed', 'running')
|
||||
`, jobID, runnerID, errMsg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failing job: %w", err)
|
||||
}
|
||||
if res.RowsAffected() == 0 {
|
||||
return fmt.Errorf("job %s not owned by runner %s or not active", jobID, runnerID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CancelJob cancels a pending or active job.
|
||||
func (r *JobRepository) CancelJob(ctx context.Context, jobID string, cancelledBy string) error {
|
||||
res, err := r.db.pool.Exec(ctx, `
|
||||
UPDATE jobs SET
|
||||
status = 'cancelled',
|
||||
cancelled_by = $2,
|
||||
completed_at = now()
|
||||
WHERE id = $1 AND status IN ('pending', 'claimed', 'running')
|
||||
`, jobID, cancelledBy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cancelling job: %w", err)
|
||||
}
|
||||
if res.RowsAffected() == 0 {
|
||||
return fmt.Errorf("job %s not cancellable", jobID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TimeoutExpiredJobs marks expired claimed/running jobs as failed.
|
||||
// Returns the number of jobs timed out.
|
||||
func (r *JobRepository) TimeoutExpiredJobs(ctx context.Context) (int64, error) {
|
||||
result, err := r.db.pool.Exec(ctx, `
|
||||
UPDATE jobs SET
|
||||
status = 'failed',
|
||||
error_message = 'job timed out',
|
||||
completed_at = now()
|
||||
WHERE status IN ('claimed', 'running')
|
||||
AND expires_at IS NOT NULL
|
||||
AND expires_at < now()
|
||||
`)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("timing out expired jobs: %w", err)
|
||||
}
|
||||
return result.RowsAffected(), nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Job Log
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// AppendLog adds a log entry to a job.
|
||||
func (r *JobRepository) AppendLog(ctx context.Context, entry *JobLogEntry) error {
|
||||
metaJSON, err := json.Marshal(entry.Metadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling log metadata: %w", err)
|
||||
}
|
||||
|
||||
err = r.db.pool.QueryRow(ctx, `
|
||||
INSERT INTO job_log (job_id, level, message, metadata)
|
||||
VALUES ($1, $2, $3, $4)
|
||||
RETURNING id, timestamp
|
||||
`, entry.JobID, entry.Level, entry.Message, metaJSON,
|
||||
).Scan(&entry.ID, &entry.Timestamp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("appending job log: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetJobLogs returns all log entries for a job.
|
||||
func (r *JobRepository) GetJobLogs(ctx context.Context, jobID string) ([]*JobLogEntry, error) {
|
||||
rows, err := r.db.pool.Query(ctx, `
|
||||
SELECT id, job_id, timestamp, level, message, metadata
|
||||
FROM job_log WHERE job_id = $1 ORDER BY timestamp ASC
|
||||
`, jobID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying job logs: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var entries []*JobLogEntry
|
||||
for rows.Next() {
|
||||
e := &JobLogEntry{}
|
||||
var metaJSON []byte
|
||||
if err := rows.Scan(&e.ID, &e.JobID, &e.Timestamp, &e.Level, &e.Message, &metaJSON); err != nil {
|
||||
return nil, fmt.Errorf("scanning job log: %w", err)
|
||||
}
|
||||
if metaJSON != nil {
|
||||
if err := json.Unmarshal(metaJSON, &e.Metadata); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling log metadata: %w", err)
|
||||
}
|
||||
}
|
||||
entries = append(entries, e)
|
||||
}
|
||||
return entries, rows.Err()
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Runners
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// RegisterRunner creates a new runner record.
|
||||
func (r *JobRepository) RegisterRunner(ctx context.Context, runner *Runner) error {
|
||||
metaJSON, err := json.Marshal(runner.Metadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling runner metadata: %w", err)
|
||||
}
|
||||
|
||||
err = r.db.pool.QueryRow(ctx, `
|
||||
INSERT INTO runners (name, token_hash, token_prefix, tags, status, metadata)
|
||||
VALUES ($1, $2, $3, $4, 'offline', $5)
|
||||
RETURNING id, created_at, updated_at
|
||||
`, runner.Name, runner.TokenHash, runner.TokenPrefix, runner.Tags, metaJSON,
|
||||
).Scan(&runner.ID, &runner.CreatedAt, &runner.UpdatedAt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("registering runner: %w", err)
|
||||
}
|
||||
runner.Status = "offline"
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetRunnerByToken looks up a runner by token hash.
|
||||
func (r *JobRepository) GetRunnerByToken(ctx context.Context, tokenHash string) (*Runner, error) {
|
||||
runner := &Runner{}
|
||||
var metaJSON []byte
|
||||
err := r.db.pool.QueryRow(ctx, `
|
||||
SELECT id, name, token_hash, token_prefix, tags, status,
|
||||
last_heartbeat, last_job_id, metadata, created_at, updated_at
|
||||
FROM runners WHERE token_hash = $1
|
||||
`, tokenHash).Scan(
|
||||
&runner.ID, &runner.Name, &runner.TokenHash, &runner.TokenPrefix,
|
||||
&runner.Tags, &runner.Status, &runner.LastHeartbeat, &runner.LastJobID,
|
||||
&metaJSON, &runner.CreatedAt, &runner.UpdatedAt,
|
||||
)
|
||||
if err == pgx.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying runner by token: %w", err)
|
||||
}
|
||||
if metaJSON != nil {
|
||||
if err := json.Unmarshal(metaJSON, &runner.Metadata); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling runner metadata: %w", err)
|
||||
}
|
||||
}
|
||||
return runner, nil
|
||||
}
|
||||
|
||||
// GetRunner returns a runner by ID.
|
||||
func (r *JobRepository) GetRunner(ctx context.Context, runnerID string) (*Runner, error) {
|
||||
runner := &Runner{}
|
||||
var metaJSON []byte
|
||||
err := r.db.pool.QueryRow(ctx, `
|
||||
SELECT id, name, token_hash, token_prefix, tags, status,
|
||||
last_heartbeat, last_job_id, metadata, created_at, updated_at
|
||||
FROM runners WHERE id = $1
|
||||
`, runnerID).Scan(
|
||||
&runner.ID, &runner.Name, &runner.TokenHash, &runner.TokenPrefix,
|
||||
&runner.Tags, &runner.Status, &runner.LastHeartbeat, &runner.LastJobID,
|
||||
&metaJSON, &runner.CreatedAt, &runner.UpdatedAt,
|
||||
)
|
||||
if err == pgx.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying runner: %w", err)
|
||||
}
|
||||
if metaJSON != nil {
|
||||
if err := json.Unmarshal(metaJSON, &runner.Metadata); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling runner metadata: %w", err)
|
||||
}
|
||||
}
|
||||
return runner, nil
|
||||
}
|
||||
|
||||
// Heartbeat updates a runner's heartbeat timestamp and sets status to online.
|
||||
func (r *JobRepository) Heartbeat(ctx context.Context, runnerID string) error {
|
||||
res, err := r.db.pool.Exec(ctx, `
|
||||
UPDATE runners SET
|
||||
status = 'online',
|
||||
last_heartbeat = now(),
|
||||
updated_at = now()
|
||||
WHERE id = $1
|
||||
`, runnerID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("updating heartbeat: %w", err)
|
||||
}
|
||||
if res.RowsAffected() == 0 {
|
||||
return fmt.Errorf("runner %s not found", runnerID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListRunners returns all registered runners.
|
||||
func (r *JobRepository) ListRunners(ctx context.Context) ([]*Runner, error) {
|
||||
rows, err := r.db.pool.Query(ctx, `
|
||||
SELECT id, name, token_hash, token_prefix, tags, status,
|
||||
last_heartbeat, last_job_id, metadata, created_at, updated_at
|
||||
FROM runners ORDER BY name
|
||||
`)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying runners: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var runners []*Runner
|
||||
for rows.Next() {
|
||||
runner := &Runner{}
|
||||
var metaJSON []byte
|
||||
if err := rows.Scan(
|
||||
&runner.ID, &runner.Name, &runner.TokenHash, &runner.TokenPrefix,
|
||||
&runner.Tags, &runner.Status, &runner.LastHeartbeat, &runner.LastJobID,
|
||||
&metaJSON, &runner.CreatedAt, &runner.UpdatedAt,
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf("scanning runner: %w", err)
|
||||
}
|
||||
if metaJSON != nil {
|
||||
if err := json.Unmarshal(metaJSON, &runner.Metadata); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling runner metadata: %w", err)
|
||||
}
|
||||
}
|
||||
runners = append(runners, runner)
|
||||
}
|
||||
return runners, rows.Err()
|
||||
}
|
||||
|
||||
// DeleteRunner removes a runner by ID.
|
||||
func (r *JobRepository) DeleteRunner(ctx context.Context, runnerID string) error {
|
||||
res, err := r.db.pool.Exec(ctx, `DELETE FROM runners WHERE id = $1`, runnerID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("deleting runner: %w", err)
|
||||
}
|
||||
if res.RowsAffected() == 0 {
|
||||
return fmt.Errorf("runner %s not found", runnerID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExpireStaleRunners marks runners with no recent heartbeat as offline.
|
||||
func (r *JobRepository) ExpireStaleRunners(ctx context.Context, timeout time.Duration) (int64, error) {
|
||||
result, err := r.db.pool.Exec(ctx, `
|
||||
UPDATE runners SET status = 'offline', updated_at = now()
|
||||
WHERE status = 'online'
|
||||
AND last_heartbeat < now() - $1::interval
|
||||
`, timeout.String())
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("expiring stale runners: %w", err)
|
||||
}
|
||||
return result.RowsAffected(), nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func scanJobDefinitions(rows pgx.Rows) ([]*JobDefinitionRecord, error) {
|
||||
var defs []*JobDefinitionRecord
|
||||
for rows.Next() {
|
||||
d := &JobDefinitionRecord{}
|
||||
var defJSON []byte
|
||||
if err := rows.Scan(
|
||||
&d.ID, &d.Name, &d.Version, &d.TriggerType, &d.ScopeType, &d.ComputeType,
|
||||
&d.RunnerTags, &d.TimeoutSeconds, &d.MaxRetries, &d.Priority,
|
||||
&defJSON, &d.Enabled, &d.CreatedAt, &d.UpdatedAt,
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf("scanning job definition: %w", err)
|
||||
}
|
||||
if defJSON != nil {
|
||||
if err := json.Unmarshal(defJSON, &d.Definition); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling definition: %w", err)
|
||||
}
|
||||
}
|
||||
defs = append(defs, d)
|
||||
}
|
||||
return defs, rows.Err()
|
||||
}
|
||||
|
||||
func scanJobs(rows pgx.Rows) ([]*Job, error) {
|
||||
var jobs []*Job
|
||||
for rows.Next() {
|
||||
j := &Job{}
|
||||
var scopeJSON, resultJSON []byte
|
||||
if err := rows.Scan(
|
||||
&j.ID, &j.JobDefinitionID, &j.DefinitionName, &j.Status, &j.Priority,
|
||||
&j.ItemID, &j.ProjectID, &scopeJSON, &j.RunnerID, &j.RunnerTags,
|
||||
&j.CreatedAt, &j.ClaimedAt, &j.StartedAt, &j.CompletedAt,
|
||||
&j.TimeoutSeconds, &j.ExpiresAt, &j.Progress, &j.ProgressMessage,
|
||||
&resultJSON, &j.ErrorMessage, &j.RetryCount, &j.MaxRetries,
|
||||
&j.CreatedBy, &j.CancelledBy,
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf("scanning job: %w", err)
|
||||
}
|
||||
if scopeJSON != nil {
|
||||
if err := json.Unmarshal(scopeJSON, &j.ScopeMetadata); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling scope metadata: %w", err)
|
||||
}
|
||||
}
|
||||
if resultJSON != nil {
|
||||
if err := json.Unmarshal(resultJSON, &j.Result); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling result: %w", err)
|
||||
}
|
||||
}
|
||||
jobs = append(jobs, j)
|
||||
}
|
||||
return jobs, rows.Err()
|
||||
}
|
||||
105
internal/db/settings.go
Normal file
105
internal/db/settings.go
Normal file
@@ -0,0 +1,105 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// SettingsRepository provides access to module_state and settings_overrides tables.
|
||||
type SettingsRepository struct {
|
||||
db *DB
|
||||
}
|
||||
|
||||
// NewSettingsRepository creates a new SettingsRepository.
|
||||
func NewSettingsRepository(db *DB) *SettingsRepository {
|
||||
return &SettingsRepository{db: db}
|
||||
}
|
||||
|
||||
// GetModuleStates returns all module enabled/disabled states from the database.
|
||||
func (r *SettingsRepository) GetModuleStates(ctx context.Context) (map[string]bool, error) {
|
||||
rows, err := r.db.pool.Query(ctx,
|
||||
`SELECT module_id, enabled FROM module_state`)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying module states: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
states := make(map[string]bool)
|
||||
for rows.Next() {
|
||||
var id string
|
||||
var enabled bool
|
||||
if err := rows.Scan(&id, &enabled); err != nil {
|
||||
return nil, fmt.Errorf("scanning module state: %w", err)
|
||||
}
|
||||
states[id] = enabled
|
||||
}
|
||||
return states, rows.Err()
|
||||
}
|
||||
|
||||
// SetModuleState persists a module's enabled state. Uses upsert semantics.
|
||||
func (r *SettingsRepository) SetModuleState(ctx context.Context, moduleID string, enabled bool, updatedBy string) error {
|
||||
_, err := r.db.pool.Exec(ctx,
|
||||
`INSERT INTO module_state (module_id, enabled, updated_by, updated_at)
|
||||
VALUES ($1, $2, $3, now())
|
||||
ON CONFLICT (module_id) DO UPDATE
|
||||
SET enabled = EXCLUDED.enabled,
|
||||
updated_by = EXCLUDED.updated_by,
|
||||
updated_at = now()`,
|
||||
moduleID, enabled, updatedBy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting module state: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetOverrides returns all settings overrides from the database.
|
||||
func (r *SettingsRepository) GetOverrides(ctx context.Context) (map[string]json.RawMessage, error) {
|
||||
rows, err := r.db.pool.Query(ctx,
|
||||
`SELECT key, value FROM settings_overrides`)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying settings overrides: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
overrides := make(map[string]json.RawMessage)
|
||||
for rows.Next() {
|
||||
var key string
|
||||
var value json.RawMessage
|
||||
if err := rows.Scan(&key, &value); err != nil {
|
||||
return nil, fmt.Errorf("scanning settings override: %w", err)
|
||||
}
|
||||
overrides[key] = value
|
||||
}
|
||||
return overrides, rows.Err()
|
||||
}
|
||||
|
||||
// SetOverride persists a settings override. Uses upsert semantics.
|
||||
func (r *SettingsRepository) SetOverride(ctx context.Context, key string, value any, updatedBy string) error {
|
||||
jsonVal, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling override value: %w", err)
|
||||
}
|
||||
_, err = r.db.pool.Exec(ctx,
|
||||
`INSERT INTO settings_overrides (key, value, updated_by, updated_at)
|
||||
VALUES ($1, $2, $3, now())
|
||||
ON CONFLICT (key) DO UPDATE
|
||||
SET value = EXCLUDED.value,
|
||||
updated_by = EXCLUDED.updated_by,
|
||||
updated_at = now()`,
|
||||
key, jsonVal, updatedBy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting override: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteOverride removes a settings override.
|
||||
func (r *SettingsRepository) DeleteOverride(ctx context.Context, key string) error {
|
||||
_, err := r.db.pool.Exec(ctx,
|
||||
`DELETE FROM settings_overrides WHERE key = $1`, key)
|
||||
if err != nil {
|
||||
return fmt.Errorf("deleting override: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
166
internal/jobdef/jobdef.go
Normal file
166
internal/jobdef/jobdef.go
Normal file
@@ -0,0 +1,166 @@
|
||||
// Package jobdef handles YAML job definition parsing and validation.
|
||||
package jobdef
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// Definition represents a compute job definition loaded from YAML.
|
||||
type Definition struct {
|
||||
Name string `yaml:"name" json:"name"`
|
||||
Version int `yaml:"version" json:"version"`
|
||||
Description string `yaml:"description" json:"description"`
|
||||
Trigger TriggerConfig `yaml:"trigger" json:"trigger"`
|
||||
Scope ScopeConfig `yaml:"scope" json:"scope"`
|
||||
Compute ComputeConfig `yaml:"compute" json:"compute"`
|
||||
Runner RunnerConfig `yaml:"runner" json:"runner"`
|
||||
Timeout int `yaml:"timeout" json:"timeout"`
|
||||
MaxRetries int `yaml:"max_retries" json:"max_retries"`
|
||||
Priority int `yaml:"priority" json:"priority"`
|
||||
}
|
||||
|
||||
// TriggerConfig describes when a job is created.
|
||||
type TriggerConfig struct {
|
||||
Type string `yaml:"type" json:"type"`
|
||||
Filter map[string]string `yaml:"filter,omitempty" json:"filter,omitempty"`
|
||||
}
|
||||
|
||||
// ScopeConfig describes what a job operates on.
|
||||
type ScopeConfig struct {
|
||||
Type string `yaml:"type" json:"type"`
|
||||
}
|
||||
|
||||
// ComputeConfig describes the computation to perform.
|
||||
type ComputeConfig struct {
|
||||
Type string `yaml:"type" json:"type"`
|
||||
Command string `yaml:"command" json:"command"`
|
||||
Args map[string]any `yaml:"args,omitempty" json:"args,omitempty"`
|
||||
}
|
||||
|
||||
// RunnerConfig describes runner requirements.
|
||||
type RunnerConfig struct {
|
||||
Tags []string `yaml:"tags" json:"tags"`
|
||||
}
|
||||
|
||||
// DefinitionFile wraps a definition for YAML parsing.
|
||||
type DefinitionFile struct {
|
||||
Job Definition `yaml:"job"`
|
||||
}
|
||||
|
||||
var validTriggerTypes = map[string]bool{
|
||||
"revision_created": true,
|
||||
"bom_changed": true,
|
||||
"manual": true,
|
||||
"schedule": true,
|
||||
}
|
||||
|
||||
var validScopeTypes = map[string]bool{
|
||||
"item": true,
|
||||
"assembly": true,
|
||||
"project": true,
|
||||
}
|
||||
|
||||
var validComputeTypes = map[string]bool{
|
||||
"validate": true,
|
||||
"rebuild": true,
|
||||
"diff": true,
|
||||
"export": true,
|
||||
"custom": true,
|
||||
}
|
||||
|
||||
// Load reads a job definition from a YAML file.
|
||||
func Load(path string) (*Definition, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading job definition file: %w", err)
|
||||
}
|
||||
|
||||
var df DefinitionFile
|
||||
if err := yaml.Unmarshal(data, &df); err != nil {
|
||||
return nil, fmt.Errorf("parsing job definition YAML: %w", err)
|
||||
}
|
||||
|
||||
def := &df.Job
|
||||
|
||||
// Apply defaults
|
||||
if def.Timeout <= 0 {
|
||||
def.Timeout = 600
|
||||
}
|
||||
if def.MaxRetries <= 0 {
|
||||
def.MaxRetries = 1
|
||||
}
|
||||
if def.Priority <= 0 {
|
||||
def.Priority = 100
|
||||
}
|
||||
if def.Version <= 0 {
|
||||
def.Version = 1
|
||||
}
|
||||
|
||||
if err := def.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("validating %s: %w", path, err)
|
||||
}
|
||||
|
||||
return def, nil
|
||||
}
|
||||
|
||||
// LoadAll reads all job definitions from a directory.
|
||||
func LoadAll(dir string) (map[string]*Definition, error) {
|
||||
defs := make(map[string]*Definition)
|
||||
|
||||
entries, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading job definitions directory: %w", err)
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
if !strings.HasSuffix(entry.Name(), ".yaml") && !strings.HasSuffix(entry.Name(), ".yml") {
|
||||
continue
|
||||
}
|
||||
|
||||
path := filepath.Join(dir, entry.Name())
|
||||
def, err := Load(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("loading %s: %w", entry.Name(), err)
|
||||
}
|
||||
defs[def.Name] = def
|
||||
}
|
||||
|
||||
return defs, nil
|
||||
}
|
||||
|
||||
// Validate checks that the definition is well-formed.
|
||||
func (d *Definition) Validate() error {
|
||||
if d.Name == "" {
|
||||
return fmt.Errorf("job definition name is required")
|
||||
}
|
||||
if d.Trigger.Type == "" {
|
||||
return fmt.Errorf("trigger type is required")
|
||||
}
|
||||
if !validTriggerTypes[d.Trigger.Type] {
|
||||
return fmt.Errorf("invalid trigger type %q", d.Trigger.Type)
|
||||
}
|
||||
if d.Scope.Type == "" {
|
||||
return fmt.Errorf("scope type is required")
|
||||
}
|
||||
if !validScopeTypes[d.Scope.Type] {
|
||||
return fmt.Errorf("invalid scope type %q", d.Scope.Type)
|
||||
}
|
||||
if d.Compute.Type == "" {
|
||||
return fmt.Errorf("compute type is required")
|
||||
}
|
||||
if !validComputeTypes[d.Compute.Type] {
|
||||
return fmt.Errorf("invalid compute type %q", d.Compute.Type)
|
||||
}
|
||||
if d.Compute.Command == "" {
|
||||
return fmt.Errorf("compute command is required")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
328
internal/jobdef/jobdef_test.go
Normal file
328
internal/jobdef/jobdef_test.go
Normal file
@@ -0,0 +1,328 @@
|
||||
package jobdef
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLoadValid(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
content := `
|
||||
job:
|
||||
name: test-job
|
||||
version: 1
|
||||
description: "A test job"
|
||||
trigger:
|
||||
type: manual
|
||||
scope:
|
||||
type: item
|
||||
compute:
|
||||
type: validate
|
||||
command: create-validate
|
||||
runner:
|
||||
tags: [create]
|
||||
timeout: 300
|
||||
max_retries: 2
|
||||
priority: 50
|
||||
`
|
||||
path := filepath.Join(dir, "test-job.yaml")
|
||||
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
|
||||
t.Fatalf("writing test file: %v", err)
|
||||
}
|
||||
|
||||
def, err := Load(path)
|
||||
if err != nil {
|
||||
t.Fatalf("Load: %v", err)
|
||||
}
|
||||
|
||||
if def.Name != "test-job" {
|
||||
t.Errorf("name = %q, want %q", def.Name, "test-job")
|
||||
}
|
||||
if def.Version != 1 {
|
||||
t.Errorf("version = %d, want 1", def.Version)
|
||||
}
|
||||
if def.Trigger.Type != "manual" {
|
||||
t.Errorf("trigger type = %q, want %q", def.Trigger.Type, "manual")
|
||||
}
|
||||
if def.Scope.Type != "item" {
|
||||
t.Errorf("scope type = %q, want %q", def.Scope.Type, "item")
|
||||
}
|
||||
if def.Compute.Type != "validate" {
|
||||
t.Errorf("compute type = %q, want %q", def.Compute.Type, "validate")
|
||||
}
|
||||
if def.Compute.Command != "create-validate" {
|
||||
t.Errorf("compute command = %q, want %q", def.Compute.Command, "create-validate")
|
||||
}
|
||||
if len(def.Runner.Tags) != 1 || def.Runner.Tags[0] != "create" {
|
||||
t.Errorf("runner tags = %v, want [create]", def.Runner.Tags)
|
||||
}
|
||||
if def.Timeout != 300 {
|
||||
t.Errorf("timeout = %d, want 300", def.Timeout)
|
||||
}
|
||||
if def.MaxRetries != 2 {
|
||||
t.Errorf("max_retries = %d, want 2", def.MaxRetries)
|
||||
}
|
||||
if def.Priority != 50 {
|
||||
t.Errorf("priority = %d, want 50", def.Priority)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadDefaults(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
content := `
|
||||
job:
|
||||
name: minimal
|
||||
trigger:
|
||||
type: manual
|
||||
scope:
|
||||
type: item
|
||||
compute:
|
||||
type: custom
|
||||
command: do-something
|
||||
`
|
||||
path := filepath.Join(dir, "minimal.yaml")
|
||||
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
|
||||
t.Fatalf("writing test file: %v", err)
|
||||
}
|
||||
|
||||
def, err := Load(path)
|
||||
if err != nil {
|
||||
t.Fatalf("Load: %v", err)
|
||||
}
|
||||
|
||||
if def.Timeout != 600 {
|
||||
t.Errorf("default timeout = %d, want 600", def.Timeout)
|
||||
}
|
||||
if def.MaxRetries != 1 {
|
||||
t.Errorf("default max_retries = %d, want 1", def.MaxRetries)
|
||||
}
|
||||
if def.Priority != 100 {
|
||||
t.Errorf("default priority = %d, want 100", def.Priority)
|
||||
}
|
||||
if def.Version != 1 {
|
||||
t.Errorf("default version = %d, want 1", def.Version)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadInvalidTriggerType(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
content := `
|
||||
job:
|
||||
name: bad-trigger
|
||||
trigger:
|
||||
type: invalid_trigger
|
||||
scope:
|
||||
type: item
|
||||
compute:
|
||||
type: validate
|
||||
command: create-validate
|
||||
`
|
||||
path := filepath.Join(dir, "bad.yaml")
|
||||
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
|
||||
t.Fatalf("writing test file: %v", err)
|
||||
}
|
||||
|
||||
_, err := Load(path)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for invalid trigger type")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadMissingName(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
content := `
|
||||
job:
|
||||
trigger:
|
||||
type: manual
|
||||
scope:
|
||||
type: item
|
||||
compute:
|
||||
type: validate
|
||||
command: create-validate
|
||||
`
|
||||
path := filepath.Join(dir, "no-name.yaml")
|
||||
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
|
||||
t.Fatalf("writing test file: %v", err)
|
||||
}
|
||||
|
||||
_, err := Load(path)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for missing name")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadMissingCommand(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
content := `
|
||||
job:
|
||||
name: no-command
|
||||
trigger:
|
||||
type: manual
|
||||
scope:
|
||||
type: item
|
||||
compute:
|
||||
type: validate
|
||||
`
|
||||
path := filepath.Join(dir, "no-cmd.yaml")
|
||||
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
|
||||
t.Fatalf("writing test file: %v", err)
|
||||
}
|
||||
|
||||
_, err := Load(path)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for missing command")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadAllDirectory(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
job1 := `
|
||||
job:
|
||||
name: job-one
|
||||
trigger:
|
||||
type: manual
|
||||
scope:
|
||||
type: item
|
||||
compute:
|
||||
type: validate
|
||||
command: create-validate
|
||||
`
|
||||
job2 := `
|
||||
job:
|
||||
name: job-two
|
||||
trigger:
|
||||
type: revision_created
|
||||
scope:
|
||||
type: assembly
|
||||
compute:
|
||||
type: export
|
||||
command: create-export
|
||||
`
|
||||
if err := os.WriteFile(filepath.Join(dir, "one.yaml"), []byte(job1), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(dir, "two.yml"), []byte(job2), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Non-YAML file should be ignored
|
||||
if err := os.WriteFile(filepath.Join(dir, "readme.txt"), []byte("ignore me"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defs, err := LoadAll(dir)
|
||||
if err != nil {
|
||||
t.Fatalf("LoadAll: %v", err)
|
||||
}
|
||||
|
||||
if len(defs) != 2 {
|
||||
t.Fatalf("loaded %d definitions, want 2", len(defs))
|
||||
}
|
||||
if _, ok := defs["job-one"]; !ok {
|
||||
t.Error("job-one not found")
|
||||
}
|
||||
if _, ok := defs["job-two"]; !ok {
|
||||
t.Error("job-two not found")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadAllEmptyDirectory(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
defs, err := LoadAll(dir)
|
||||
if err != nil {
|
||||
t.Fatalf("LoadAll: %v", err)
|
||||
}
|
||||
if len(defs) != 0 {
|
||||
t.Errorf("loaded %d definitions from empty dir, want 0", len(defs))
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadWithFilter(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
content := `
|
||||
job:
|
||||
name: filtered-job
|
||||
trigger:
|
||||
type: revision_created
|
||||
filter:
|
||||
item_type: assembly
|
||||
scope:
|
||||
type: assembly
|
||||
compute:
|
||||
type: validate
|
||||
command: create-validate
|
||||
`
|
||||
path := filepath.Join(dir, "filtered.yaml")
|
||||
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
|
||||
t.Fatalf("writing test file: %v", err)
|
||||
}
|
||||
|
||||
def, err := Load(path)
|
||||
if err != nil {
|
||||
t.Fatalf("Load: %v", err)
|
||||
}
|
||||
|
||||
if def.Trigger.Filter["item_type"] != "assembly" {
|
||||
t.Errorf("filter item_type = %q, want %q", def.Trigger.Filter["item_type"], "assembly")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadWithArgs(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
content := `
|
||||
job:
|
||||
name: args-job
|
||||
trigger:
|
||||
type: manual
|
||||
scope:
|
||||
type: item
|
||||
compute:
|
||||
type: export
|
||||
command: create-export
|
||||
args:
|
||||
format: step
|
||||
include_mesh: true
|
||||
`
|
||||
path := filepath.Join(dir, "args.yaml")
|
||||
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
|
||||
t.Fatalf("writing test file: %v", err)
|
||||
}
|
||||
|
||||
def, err := Load(path)
|
||||
if err != nil {
|
||||
t.Fatalf("Load: %v", err)
|
||||
}
|
||||
|
||||
if def.Compute.Args["format"] != "step" {
|
||||
t.Errorf("args format = %v, want %q", def.Compute.Args["format"], "step")
|
||||
}
|
||||
if def.Compute.Args["include_mesh"] != true {
|
||||
t.Errorf("args include_mesh = %v, want true", def.Compute.Args["include_mesh"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateInvalidScopeType(t *testing.T) {
|
||||
d := &Definition{
|
||||
Name: "test",
|
||||
Trigger: TriggerConfig{Type: "manual"},
|
||||
Scope: ScopeConfig{Type: "galaxy"},
|
||||
Compute: ComputeConfig{Type: "validate", Command: "create-validate"},
|
||||
}
|
||||
if err := d.Validate(); err == nil {
|
||||
t.Fatal("expected error for invalid scope type")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateInvalidComputeType(t *testing.T) {
|
||||
d := &Definition{
|
||||
Name: "test",
|
||||
Trigger: TriggerConfig{Type: "manual"},
|
||||
Scope: ScopeConfig{Type: "item"},
|
||||
Compute: ComputeConfig{Type: "teleport", Command: "beam-up"},
|
||||
}
|
||||
if err := d.Validate(); err == nil {
|
||||
t.Fatal("expected error for invalid compute type")
|
||||
}
|
||||
}
|
||||
84
internal/modules/loader.go
Normal file
84
internal/modules/loader.go
Normal file
@@ -0,0 +1,84 @@
|
||||
package modules
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgxpool"
|
||||
"github.com/kindredsystems/silo/internal/config"
|
||||
)
|
||||
|
||||
// LoadState applies module state from config YAML and database overrides.
|
||||
//
|
||||
// Precedence (highest wins):
|
||||
// 1. Database module_state table
|
||||
// 2. YAML modules.* toggles
|
||||
// 3. Backward-compat YAML fields (auth.enabled, odoo.enabled)
|
||||
// 4. Module defaults (set by NewRegistry)
|
||||
func LoadState(r *Registry, cfg *config.Config, pool *pgxpool.Pool) error {
|
||||
// Step 1: Apply backward-compat top-level YAML fields.
|
||||
// auth.enabled and odoo.enabled existed before the modules section.
|
||||
// Only apply if the new modules.* section doesn't override them.
|
||||
if cfg.Modules.Auth == nil {
|
||||
r.setEnabledUnchecked(Auth, cfg.Auth.Enabled)
|
||||
}
|
||||
if cfg.Modules.Odoo == nil {
|
||||
r.setEnabledUnchecked(Odoo, cfg.Odoo.Enabled)
|
||||
}
|
||||
|
||||
// Step 2: Apply explicit modules.* YAML toggles (override defaults + compat).
|
||||
applyToggle(r, Auth, cfg.Modules.Auth)
|
||||
applyToggle(r, Projects, cfg.Modules.Projects)
|
||||
applyToggle(r, Audit, cfg.Modules.Audit)
|
||||
applyToggle(r, Odoo, cfg.Modules.Odoo)
|
||||
applyToggle(r, FreeCAD, cfg.Modules.FreeCAD)
|
||||
applyToggle(r, Jobs, cfg.Modules.Jobs)
|
||||
applyToggle(r, DAG, cfg.Modules.DAG)
|
||||
|
||||
// Step 3: Apply database overrides (highest precedence).
|
||||
if pool != nil {
|
||||
if err := loadFromDB(r, pool); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Step 4: Validate the final state.
|
||||
return r.ValidateDependencies()
|
||||
}
|
||||
|
||||
// applyToggle sets a module's state from a YAML ModuleToggle if present.
|
||||
func applyToggle(r *Registry, id string, toggle *config.ModuleToggle) {
|
||||
if toggle == nil || toggle.Enabled == nil {
|
||||
return
|
||||
}
|
||||
r.setEnabledUnchecked(id, *toggle.Enabled)
|
||||
}
|
||||
|
||||
// setEnabledUnchecked sets module state without dependency validation.
|
||||
// Used during loading when the full state is being assembled incrementally.
|
||||
func (r *Registry) setEnabledUnchecked(id string, enabled bool) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
if m, ok := r.modules[id]; ok && !m.Required {
|
||||
m.enabled = enabled
|
||||
}
|
||||
}
|
||||
|
||||
// loadFromDB reads module_state rows and applies them to the registry.
|
||||
func loadFromDB(r *Registry, pool *pgxpool.Pool) error {
|
||||
rows, err := pool.Query(context.Background(),
|
||||
`SELECT module_id, enabled FROM module_state`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var id string
|
||||
var enabled bool
|
||||
if err := rows.Scan(&id, &enabled); err != nil {
|
||||
return err
|
||||
}
|
||||
r.setEnabledUnchecked(id, enabled)
|
||||
}
|
||||
return rows.Err()
|
||||
}
|
||||
88
internal/modules/loader_test.go
Normal file
88
internal/modules/loader_test.go
Normal file
@@ -0,0 +1,88 @@
|
||||
package modules
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/kindredsystems/silo/internal/config"
|
||||
)
|
||||
|
||||
func boolPtr(v bool) *bool { return &v }
|
||||
|
||||
func TestLoadState_DefaultsOnly(t *testing.T) {
|
||||
r := NewRegistry()
|
||||
cfg := &config.Config{}
|
||||
|
||||
if err := LoadState(r, cfg, nil); err != nil {
|
||||
t.Fatalf("LoadState: %v", err)
|
||||
}
|
||||
|
||||
// Auth defaults to true from registry, but cfg.Auth.Enabled is false
|
||||
// (zero value) and backward-compat applies, so auth ends up disabled.
|
||||
if r.IsEnabled(Auth) {
|
||||
t.Error("auth should be disabled (cfg.Auth.Enabled is false by default)")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadState_BackwardCompat(t *testing.T) {
|
||||
r := NewRegistry()
|
||||
cfg := &config.Config{}
|
||||
cfg.Auth.Enabled = true
|
||||
cfg.Odoo.Enabled = true
|
||||
|
||||
if err := LoadState(r, cfg, nil); err != nil {
|
||||
t.Fatalf("LoadState: %v", err)
|
||||
}
|
||||
|
||||
if !r.IsEnabled(Auth) {
|
||||
t.Error("auth should be enabled via cfg.Auth.Enabled")
|
||||
}
|
||||
if !r.IsEnabled(Odoo) {
|
||||
t.Error("odoo should be enabled via cfg.Odoo.Enabled")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadState_YAMLModulesOverrideCompat(t *testing.T) {
|
||||
r := NewRegistry()
|
||||
cfg := &config.Config{}
|
||||
cfg.Auth.Enabled = true // compat says enabled
|
||||
cfg.Modules.Auth = &config.ModuleToggle{Enabled: boolPtr(false)} // explicit says disabled
|
||||
|
||||
if err := LoadState(r, cfg, nil); err != nil {
|
||||
t.Fatalf("LoadState: %v", err)
|
||||
}
|
||||
|
||||
if r.IsEnabled(Auth) {
|
||||
t.Error("modules.auth.enabled=false should override auth.enabled=true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadState_EnableJobsAndDAG(t *testing.T) {
|
||||
r := NewRegistry()
|
||||
cfg := &config.Config{}
|
||||
cfg.Auth.Enabled = true
|
||||
cfg.Modules.Jobs = &config.ModuleToggle{Enabled: boolPtr(true)}
|
||||
cfg.Modules.DAG = &config.ModuleToggle{Enabled: boolPtr(true)}
|
||||
|
||||
if err := LoadState(r, cfg, nil); err != nil {
|
||||
t.Fatalf("LoadState: %v", err)
|
||||
}
|
||||
|
||||
if !r.IsEnabled(Jobs) {
|
||||
t.Error("jobs should be enabled")
|
||||
}
|
||||
if !r.IsEnabled(DAG) {
|
||||
t.Error("dag should be enabled")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadState_InvalidDependency(t *testing.T) {
|
||||
r := NewRegistry()
|
||||
cfg := &config.Config{}
|
||||
// Auth disabled (default), but enable jobs which depends on auth.
|
||||
cfg.Modules.Jobs = &config.ModuleToggle{Enabled: boolPtr(true)}
|
||||
|
||||
err := LoadState(r, cfg, nil)
|
||||
if err == nil {
|
||||
t.Error("should fail: jobs enabled but auth disabled")
|
||||
}
|
||||
}
|
||||
163
internal/modules/modules.go
Normal file
163
internal/modules/modules.go
Normal file
@@ -0,0 +1,163 @@
|
||||
// Package modules provides the module registry for Silo.
|
||||
// Each module groups API endpoints, UI views, and configuration.
|
||||
// Modules can be required (always on) or optional (admin-toggleable).
|
||||
package modules
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Module IDs.
|
||||
const (
|
||||
Core = "core"
|
||||
Schemas = "schemas"
|
||||
Storage = "storage"
|
||||
Auth = "auth"
|
||||
Projects = "projects"
|
||||
Audit = "audit"
|
||||
Odoo = "odoo"
|
||||
FreeCAD = "freecad"
|
||||
Jobs = "jobs"
|
||||
DAG = "dag"
|
||||
)
|
||||
|
||||
// ModuleInfo describes a module's metadata.
|
||||
type ModuleInfo struct {
|
||||
ID string
|
||||
Name string
|
||||
Description string
|
||||
Required bool // cannot be disabled
|
||||
DefaultEnabled bool // initial state for optional modules
|
||||
DependsOn []string // module IDs this module requires
|
||||
Version string
|
||||
}
|
||||
|
||||
// registry entries with their runtime enabled state.
|
||||
type moduleState struct {
|
||||
ModuleInfo
|
||||
enabled bool
|
||||
}
|
||||
|
||||
// Registry holds all module definitions and their enabled state.
|
||||
type Registry struct {
|
||||
mu sync.RWMutex
|
||||
modules map[string]*moduleState
|
||||
}
|
||||
|
||||
// builtinModules defines the complete set of Silo modules.
|
||||
var builtinModules = []ModuleInfo{
|
||||
{ID: Core, Name: "Core PDM", Description: "Items, revisions, files, BOM, search, import/export", Required: true, Version: "0.2"},
|
||||
{ID: Schemas, Name: "Schemas", Description: "Part numbering schema parsing and segment management", Required: true},
|
||||
{ID: Storage, Name: "Storage", Description: "MinIO/S3 file storage, presigned uploads", Required: true},
|
||||
{ID: Auth, Name: "Authentication", Description: "Local, LDAP, OIDC authentication and RBAC", DefaultEnabled: true},
|
||||
{ID: Projects, Name: "Projects", Description: "Project management and item tagging", DefaultEnabled: true},
|
||||
{ID: Audit, Name: "Audit", Description: "Audit logging, completeness scoring", DefaultEnabled: true},
|
||||
{ID: Odoo, Name: "Odoo ERP", Description: "Odoo integration (config, sync-log, push/pull)", DependsOn: []string{Auth}},
|
||||
{ID: FreeCAD, Name: "Create Integration", Description: "URI scheme, executable path, client settings", DefaultEnabled: true},
|
||||
{ID: Jobs, Name: "Job Queue", Description: "Async compute jobs, runner management", DependsOn: []string{Auth}},
|
||||
{ID: DAG, Name: "Dependency DAG", Description: "Feature DAG sync, validation states, interference detection", DependsOn: []string{Jobs}},
|
||||
}
|
||||
|
||||
// NewRegistry creates a registry with all builtin modules set to their default state.
|
||||
func NewRegistry() *Registry {
|
||||
r := &Registry{modules: make(map[string]*moduleState, len(builtinModules))}
|
||||
for _, m := range builtinModules {
|
||||
enabled := m.Required || m.DefaultEnabled
|
||||
r.modules[m.ID] = &moduleState{ModuleInfo: m, enabled: enabled}
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// IsEnabled returns whether a module is currently enabled.
|
||||
func (r *Registry) IsEnabled(id string) bool {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
if m, ok := r.modules[id]; ok {
|
||||
return m.enabled
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// SetEnabled changes a module's enabled state with dependency validation.
|
||||
func (r *Registry) SetEnabled(id string, enabled bool) error {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
m, ok := r.modules[id]
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown module %q", id)
|
||||
}
|
||||
|
||||
if m.Required {
|
||||
return fmt.Errorf("module %q is required and cannot be disabled", id)
|
||||
}
|
||||
|
||||
if enabled {
|
||||
// Check that all dependencies are enabled.
|
||||
for _, dep := range m.DependsOn {
|
||||
if dm, ok := r.modules[dep]; ok && !dm.enabled {
|
||||
return fmt.Errorf("cannot enable %q: dependency %q is disabled", id, dep)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Check that no enabled module depends on this one.
|
||||
for _, other := range r.modules {
|
||||
if !other.enabled || other.ID == id {
|
||||
continue
|
||||
}
|
||||
for _, dep := range other.DependsOn {
|
||||
if dep == id {
|
||||
return fmt.Errorf("cannot disable %q: module %q depends on it", id, other.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
m.enabled = enabled
|
||||
return nil
|
||||
}
|
||||
|
||||
// All returns info for every module, sorted by ID.
|
||||
func (r *Registry) All() []ModuleInfo {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
|
||||
out := make([]ModuleInfo, 0, len(r.modules))
|
||||
for _, m := range r.modules {
|
||||
out = append(out, m.ModuleInfo)
|
||||
}
|
||||
sort.Slice(out, func(i, j int) bool { return out[i].ID < out[j].ID })
|
||||
return out
|
||||
}
|
||||
|
||||
// Get returns info for a single module, or nil if not found.
|
||||
func (r *Registry) Get(id string) *ModuleInfo {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
if m, ok := r.modules[id]; ok {
|
||||
info := m.ModuleInfo
|
||||
return &info
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateDependencies checks that every enabled module's dependencies
|
||||
// are also enabled. Returns the first violation found.
|
||||
func (r *Registry) ValidateDependencies() error {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
|
||||
for _, m := range r.modules {
|
||||
if !m.enabled {
|
||||
continue
|
||||
}
|
||||
for _, dep := range m.DependsOn {
|
||||
if dm, ok := r.modules[dep]; ok && !dm.enabled {
|
||||
return fmt.Errorf("module %q is enabled but its dependency %q is disabled", m.ID, dep)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
169
internal/modules/modules_test.go
Normal file
169
internal/modules/modules_test.go
Normal file
@@ -0,0 +1,169 @@
|
||||
package modules
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewRegistry_DefaultState(t *testing.T) {
|
||||
r := NewRegistry()
|
||||
|
||||
// Required modules are always enabled.
|
||||
for _, id := range []string{Core, Schemas, Storage} {
|
||||
if !r.IsEnabled(id) {
|
||||
t.Errorf("required module %q should be enabled by default", id)
|
||||
}
|
||||
}
|
||||
|
||||
// Optional modules with DefaultEnabled=true.
|
||||
for _, id := range []string{Auth, Projects, Audit, FreeCAD} {
|
||||
if !r.IsEnabled(id) {
|
||||
t.Errorf("module %q should be enabled by default", id)
|
||||
}
|
||||
}
|
||||
|
||||
// Optional modules with DefaultEnabled=false.
|
||||
for _, id := range []string{Odoo, Jobs, DAG} {
|
||||
if r.IsEnabled(id) {
|
||||
t.Errorf("module %q should be disabled by default", id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetEnabled_BasicToggle(t *testing.T) {
|
||||
r := NewRegistry()
|
||||
|
||||
// Disable an optional module with no dependents.
|
||||
if err := r.SetEnabled(Projects, false); err != nil {
|
||||
t.Fatalf("disabling projects: %v", err)
|
||||
}
|
||||
if r.IsEnabled(Projects) {
|
||||
t.Error("projects should be disabled after SetEnabled(false)")
|
||||
}
|
||||
|
||||
// Re-enable it.
|
||||
if err := r.SetEnabled(Projects, true); err != nil {
|
||||
t.Fatalf("enabling projects: %v", err)
|
||||
}
|
||||
if !r.IsEnabled(Projects) {
|
||||
t.Error("projects should be enabled after SetEnabled(true)")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCannotDisableRequired(t *testing.T) {
|
||||
r := NewRegistry()
|
||||
|
||||
for _, id := range []string{Core, Schemas, Storage} {
|
||||
if err := r.SetEnabled(id, false); err == nil {
|
||||
t.Errorf("disabling required module %q should return error", id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDependencyChain_EnableWithoutDep(t *testing.T) {
|
||||
r := NewRegistry()
|
||||
|
||||
// Jobs depends on Auth. Auth is enabled by default, so enabling jobs works.
|
||||
if err := r.SetEnabled(Jobs, true); err != nil {
|
||||
t.Fatalf("enabling jobs (auth enabled): %v", err)
|
||||
}
|
||||
|
||||
// DAG depends on Jobs. Jobs is now enabled, so enabling dag works.
|
||||
if err := r.SetEnabled(DAG, true); err != nil {
|
||||
t.Fatalf("enabling dag (jobs enabled): %v", err)
|
||||
}
|
||||
|
||||
// Now try with deps disabled. Start fresh.
|
||||
r2 := NewRegistry()
|
||||
|
||||
// DAG depends on Jobs, which is disabled by default.
|
||||
if err := r2.SetEnabled(DAG, true); err == nil {
|
||||
t.Error("enabling dag without jobs should fail")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDisableDependedOn(t *testing.T) {
|
||||
r := NewRegistry()
|
||||
|
||||
// Enable the full chain: auth (already on) → jobs → dag.
|
||||
if err := r.SetEnabled(Jobs, true); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := r.SetEnabled(DAG, true); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Cannot disable jobs while dag depends on it.
|
||||
if err := r.SetEnabled(Jobs, false); err == nil {
|
||||
t.Error("disabling jobs while dag is enabled should fail")
|
||||
}
|
||||
|
||||
// Disable dag first, then jobs should work.
|
||||
if err := r.SetEnabled(DAG, false); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := r.SetEnabled(Jobs, false); err != nil {
|
||||
t.Fatalf("disabling jobs after dag disabled: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCannotDisableAuthWhileJobsEnabled(t *testing.T) {
|
||||
r := NewRegistry()
|
||||
|
||||
if err := r.SetEnabled(Jobs, true); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Auth is depended on by jobs.
|
||||
if err := r.SetEnabled(Auth, false); err == nil {
|
||||
t.Error("disabling auth while jobs is enabled should fail")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnknownModule(t *testing.T) {
|
||||
r := NewRegistry()
|
||||
|
||||
if r.IsEnabled("nonexistent") {
|
||||
t.Error("unknown module should not be enabled")
|
||||
}
|
||||
if err := r.SetEnabled("nonexistent", true); err == nil {
|
||||
t.Error("setting unknown module should return error")
|
||||
}
|
||||
if r.Get("nonexistent") != nil {
|
||||
t.Error("getting unknown module should return nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAll_ReturnsAllModules(t *testing.T) {
|
||||
r := NewRegistry()
|
||||
all := r.All()
|
||||
|
||||
if len(all) != 10 {
|
||||
t.Errorf("expected 10 modules, got %d", len(all))
|
||||
}
|
||||
|
||||
// Should be sorted by ID.
|
||||
for i := 1; i < len(all); i++ {
|
||||
if all[i].ID < all[i-1].ID {
|
||||
t.Errorf("modules not sorted: %s before %s", all[i-1].ID, all[i].ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateDependencies(t *testing.T) {
|
||||
r := NewRegistry()
|
||||
|
||||
// Default state should be valid.
|
||||
if err := r.ValidateDependencies(); err != nil {
|
||||
t.Fatalf("default state should be valid: %v", err)
|
||||
}
|
||||
|
||||
// Force an invalid state by directly mutating (bypassing SetEnabled).
|
||||
r.mu.Lock()
|
||||
r.modules[Jobs].enabled = true
|
||||
r.modules[Auth].enabled = false
|
||||
r.mu.Unlock()
|
||||
|
||||
if err := r.ValidateDependencies(); err == nil {
|
||||
t.Error("should detect jobs enabled without auth")
|
||||
}
|
||||
}
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/kindredsystems/silo/internal/schema"
|
||||
)
|
||||
@@ -99,8 +100,11 @@ func (g *Generator) resolveSegment(
|
||||
return g.formatSerial(seg, next), nil
|
||||
|
||||
case "date":
|
||||
// TODO: implement date formatting
|
||||
return "", fmt.Errorf("date segments not yet implemented")
|
||||
layout := seg.Value
|
||||
if layout == "" {
|
||||
layout = "20060102"
|
||||
}
|
||||
return time.Now().UTC().Format(layout), nil
|
||||
|
||||
default:
|
||||
return "", fmt.Errorf("unknown segment type: %s", seg.Type)
|
||||
@@ -174,7 +178,84 @@ func (g *Generator) Validate(partNumber string, schemaName string) error {
|
||||
return fmt.Errorf("unknown schema: %s", schemaName)
|
||||
}
|
||||
|
||||
// TODO: parse part number and validate each segment
|
||||
_ = s
|
||||
parts := strings.Split(partNumber, s.Separator)
|
||||
if len(parts) != len(s.Segments) {
|
||||
return fmt.Errorf("expected %d segments, got %d", len(s.Segments), len(parts))
|
||||
}
|
||||
|
||||
for i, seg := range s.Segments {
|
||||
val := parts[i]
|
||||
if err := g.validateSegment(&seg, val); err != nil {
|
||||
return fmt.Errorf("segment %s: %w", seg.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateSegment checks that a single segment value is valid.
|
||||
func (g *Generator) validateSegment(seg *schema.Segment, val string) error {
|
||||
switch seg.Type {
|
||||
case "constant":
|
||||
if val != seg.Value {
|
||||
return fmt.Errorf("expected %q, got %q", seg.Value, val)
|
||||
}
|
||||
|
||||
case "enum":
|
||||
if _, ok := seg.Values[val]; !ok {
|
||||
return fmt.Errorf("invalid enum value: %s", val)
|
||||
}
|
||||
|
||||
case "string":
|
||||
if seg.Length > 0 && len(val) != seg.Length {
|
||||
return fmt.Errorf("value must be exactly %d characters", seg.Length)
|
||||
}
|
||||
if seg.MinLength > 0 && len(val) < seg.MinLength {
|
||||
return fmt.Errorf("value must be at least %d characters", seg.MinLength)
|
||||
}
|
||||
if seg.MaxLength > 0 && len(val) > seg.MaxLength {
|
||||
return fmt.Errorf("value must be at most %d characters", seg.MaxLength)
|
||||
}
|
||||
if seg.Case == "upper" && val != strings.ToUpper(val) {
|
||||
return fmt.Errorf("value must be uppercase")
|
||||
}
|
||||
if seg.Case == "lower" && val != strings.ToLower(val) {
|
||||
return fmt.Errorf("value must be lowercase")
|
||||
}
|
||||
if seg.Validation.Pattern != "" {
|
||||
re := regexp.MustCompile(seg.Validation.Pattern)
|
||||
if !re.MatchString(val) {
|
||||
msg := seg.Validation.Message
|
||||
if msg == "" {
|
||||
msg = fmt.Sprintf("value does not match pattern %s", seg.Validation.Pattern)
|
||||
}
|
||||
return fmt.Errorf("%s", msg)
|
||||
}
|
||||
}
|
||||
|
||||
case "serial":
|
||||
if seg.Length > 0 && len(val) != seg.Length {
|
||||
return fmt.Errorf("value must be exactly %d characters", seg.Length)
|
||||
}
|
||||
for _, ch := range val {
|
||||
if ch < '0' || ch > '9' {
|
||||
return fmt.Errorf("serial must be numeric")
|
||||
}
|
||||
}
|
||||
|
||||
case "date":
|
||||
layout := seg.Value
|
||||
if layout == "" {
|
||||
layout = "20060102"
|
||||
}
|
||||
expected := time.Now().UTC().Format(layout)
|
||||
if len(val) != len(expected) {
|
||||
return fmt.Errorf("date segment length mismatch: expected %d, got %d", len(expected), len(val))
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown segment type: %s", seg.Type)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3,7 +3,9 @@ package partnum
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kindredsystems/silo/internal/schema"
|
||||
)
|
||||
@@ -165,3 +167,199 @@ func TestGenerateConstantSegment(t *testing.T) {
|
||||
t.Errorf("got %q, want %q", pn, "KS-0001")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateDateSegmentDefault(t *testing.T) {
|
||||
s := &schema.Schema{
|
||||
Name: "date-test",
|
||||
Version: 1,
|
||||
Separator: "-",
|
||||
Segments: []schema.Segment{
|
||||
{Name: "date", Type: "date"},
|
||||
{Name: "serial", Type: "serial", Length: 3},
|
||||
},
|
||||
}
|
||||
gen := NewGenerator(map[string]*schema.Schema{"date-test": s}, &mockSeqStore{})
|
||||
|
||||
pn, err := gen.Generate(context.Background(), Input{
|
||||
SchemaName: "date-test",
|
||||
Values: map[string]string{},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Generate returned error: %v", err)
|
||||
}
|
||||
|
||||
// Default format: YYYYMMDD-NNN
|
||||
want := time.Now().UTC().Format("20060102") + "-001"
|
||||
if pn != want {
|
||||
t.Errorf("got %q, want %q", pn, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateDateSegmentCustomFormat(t *testing.T) {
|
||||
s := &schema.Schema{
|
||||
Name: "date-custom",
|
||||
Version: 1,
|
||||
Separator: "-",
|
||||
Segments: []schema.Segment{
|
||||
{Name: "date", Type: "date", Value: "0601"},
|
||||
{Name: "serial", Type: "serial", Length: 4},
|
||||
},
|
||||
}
|
||||
gen := NewGenerator(map[string]*schema.Schema{"date-custom": s}, &mockSeqStore{})
|
||||
|
||||
pn, err := gen.Generate(context.Background(), Input{
|
||||
SchemaName: "date-custom",
|
||||
Values: map[string]string{},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Generate returned error: %v", err)
|
||||
}
|
||||
|
||||
// Format "0601" produces YYMM
|
||||
if matched, _ := regexp.MatchString(`^\d{4}-\d{4}$`, pn); !matched {
|
||||
t.Errorf("got %q, want pattern YYMM-NNNN", pn)
|
||||
}
|
||||
|
||||
want := time.Now().UTC().Format("0601") + "-0001"
|
||||
if pn != want {
|
||||
t.Errorf("got %q, want %q", pn, want)
|
||||
}
|
||||
}
|
||||
|
||||
// --- Validation tests ---
|
||||
|
||||
func TestValidateBasic(t *testing.T) {
|
||||
s := testSchema()
|
||||
gen := NewGenerator(map[string]*schema.Schema{"test": s}, &mockSeqStore{})
|
||||
|
||||
if err := gen.Validate("F01-0001", "test"); err != nil {
|
||||
t.Fatalf("expected valid, got error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateWrongSegmentCount(t *testing.T) {
|
||||
s := testSchema()
|
||||
gen := NewGenerator(map[string]*schema.Schema{"test": s}, &mockSeqStore{})
|
||||
|
||||
if err := gen.Validate("F01-0001-EXTRA", "test"); err == nil {
|
||||
t.Fatal("expected error for wrong segment count")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateInvalidEnum(t *testing.T) {
|
||||
s := testSchema()
|
||||
gen := NewGenerator(map[string]*schema.Schema{"test": s}, &mockSeqStore{})
|
||||
|
||||
if err := gen.Validate("ZZZ-0001", "test"); err == nil {
|
||||
t.Fatal("expected error for invalid enum value")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateNonNumericSerial(t *testing.T) {
|
||||
s := testSchema()
|
||||
gen := NewGenerator(map[string]*schema.Schema{"test": s}, &mockSeqStore{})
|
||||
|
||||
if err := gen.Validate("F01-ABCD", "test"); err == nil {
|
||||
t.Fatal("expected error for non-numeric serial")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateSerialWrongLength(t *testing.T) {
|
||||
s := testSchema()
|
||||
gen := NewGenerator(map[string]*schema.Schema{"test": s}, &mockSeqStore{})
|
||||
|
||||
if err := gen.Validate("F01-01", "test"); err == nil {
|
||||
t.Fatal("expected error for wrong serial length")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateConstantSegment(t *testing.T) {
|
||||
s := &schema.Schema{
|
||||
Name: "const-val",
|
||||
Version: 1,
|
||||
Separator: "-",
|
||||
Segments: []schema.Segment{
|
||||
{Name: "prefix", Type: "constant", Value: "KS"},
|
||||
{Name: "serial", Type: "serial", Length: 4},
|
||||
},
|
||||
}
|
||||
gen := NewGenerator(map[string]*schema.Schema{"const-val": s}, &mockSeqStore{})
|
||||
|
||||
if err := gen.Validate("KS-0001", "const-val"); err != nil {
|
||||
t.Fatalf("expected valid, got error: %v", err)
|
||||
}
|
||||
if err := gen.Validate("XX-0001", "const-val"); err == nil {
|
||||
t.Fatal("expected error for wrong constant value")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateUnknownSchema(t *testing.T) {
|
||||
gen := NewGenerator(map[string]*schema.Schema{}, &mockSeqStore{})
|
||||
|
||||
if err := gen.Validate("F01-0001", "nonexistent"); err == nil {
|
||||
t.Fatal("expected error for unknown schema")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateDateSegment(t *testing.T) {
|
||||
s := &schema.Schema{
|
||||
Name: "date-val",
|
||||
Version: 1,
|
||||
Separator: "-",
|
||||
Segments: []schema.Segment{
|
||||
{Name: "date", Type: "date"},
|
||||
{Name: "serial", Type: "serial", Length: 3},
|
||||
},
|
||||
}
|
||||
gen := NewGenerator(map[string]*schema.Schema{"date-val": s}, &mockSeqStore{})
|
||||
|
||||
today := time.Now().UTC().Format("20060102")
|
||||
if err := gen.Validate(today+"-001", "date-val"); err != nil {
|
||||
t.Fatalf("expected valid, got error: %v", err)
|
||||
}
|
||||
if err := gen.Validate("20-001", "date-val"); err == nil {
|
||||
t.Fatal("expected error for wrong date length")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateGeneratedOutput(t *testing.T) {
|
||||
s := testSchema()
|
||||
gen := NewGenerator(map[string]*schema.Schema{"test": s}, &mockSeqStore{})
|
||||
|
||||
pn, err := gen.Generate(context.Background(), Input{
|
||||
SchemaName: "test",
|
||||
Values: map[string]string{"category": "F01"},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Generate error: %v", err)
|
||||
}
|
||||
if err := gen.Validate(pn, "test"); err != nil {
|
||||
t.Fatalf("generated part number %q failed validation: %v", pn, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateDateSegmentYearOnly(t *testing.T) {
|
||||
s := &schema.Schema{
|
||||
Name: "date-year",
|
||||
Version: 1,
|
||||
Separator: "-",
|
||||
Segments: []schema.Segment{
|
||||
{Name: "year", Type: "date", Value: "2006"},
|
||||
{Name: "serial", Type: "serial", Length: 4},
|
||||
},
|
||||
}
|
||||
gen := NewGenerator(map[string]*schema.Schema{"date-year": s}, &mockSeqStore{})
|
||||
|
||||
pn, err := gen.Generate(context.Background(), Input{
|
||||
SchemaName: "date-year",
|
||||
Values: map[string]string{},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Generate returned error: %v", err)
|
||||
}
|
||||
|
||||
want := time.Now().UTC().Format("2006") + "-0001"
|
||||
if pn != want {
|
||||
t.Errorf("got %q, want %q", pn, want)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package testutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
@@ -80,6 +79,9 @@ func TruncateAll(t *testing.T, pool *pgxpool.Pool) {
|
||||
|
||||
_, err := pool.Exec(context.Background(), `
|
||||
TRUNCATE
|
||||
settings_overrides, module_state,
|
||||
job_log, jobs, job_definitions, runners,
|
||||
dag_cross_edges, dag_edges, dag_nodes,
|
||||
audit_log, sync_log, api_tokens, sessions, item_files,
|
||||
item_projects, relationships, revisions, inventory, items,
|
||||
projects, sequences_by_name, users, property_migrations
|
||||
@@ -109,6 +111,4 @@ func findProjectRoot(t *testing.T) string {
|
||||
}
|
||||
dir = parent
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("unreachable"))
|
||||
}
|
||||
|
||||
26
jobdefs/assembly-validate.yaml
Normal file
26
jobdefs/assembly-validate.yaml
Normal file
@@ -0,0 +1,26 @@
|
||||
job:
|
||||
name: assembly-validate
|
||||
version: 1
|
||||
description: "Validate assembly by rebuilding its dependency subgraph"
|
||||
|
||||
trigger:
|
||||
type: revision_created
|
||||
filter:
|
||||
item_type: assembly
|
||||
|
||||
scope:
|
||||
type: assembly
|
||||
|
||||
compute:
|
||||
type: validate
|
||||
command: create-validate
|
||||
args:
|
||||
rebuild_mode: incremental
|
||||
check_interference: true
|
||||
|
||||
runner:
|
||||
tags: [create]
|
||||
|
||||
timeout: 900
|
||||
max_retries: 2
|
||||
priority: 50
|
||||
24
jobdefs/part-export-step.yaml
Normal file
24
jobdefs/part-export-step.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
job:
|
||||
name: part-export-step
|
||||
version: 1
|
||||
description: "Export a part to STEP format"
|
||||
|
||||
trigger:
|
||||
type: manual
|
||||
|
||||
scope:
|
||||
type: item
|
||||
|
||||
compute:
|
||||
type: export
|
||||
command: create-export
|
||||
args:
|
||||
format: step
|
||||
output_key_template: "exports/{part_number}_rev{revision}.step"
|
||||
|
||||
runner:
|
||||
tags: [create]
|
||||
|
||||
timeout: 300
|
||||
max_retries: 1
|
||||
priority: 100
|
||||
67
migrations/014_dag_nodes_edges.sql
Normal file
67
migrations/014_dag_nodes_edges.sql
Normal file
@@ -0,0 +1,67 @@
|
||||
-- Dependency DAG: feature-level nodes and edges within items.
|
||||
-- Migration: 014_dag_nodes_edges
|
||||
-- Date: 2026-02
|
||||
|
||||
BEGIN;
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- DAG Nodes (feature-level nodes within an item's revision)
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
CREATE TABLE dag_nodes (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
item_id UUID NOT NULL REFERENCES items(id) ON DELETE CASCADE,
|
||||
revision_number INTEGER NOT NULL,
|
||||
node_key TEXT NOT NULL,
|
||||
node_type TEXT NOT NULL,
|
||||
properties_hash TEXT,
|
||||
validation_state TEXT NOT NULL DEFAULT 'clean',
|
||||
validation_msg TEXT,
|
||||
metadata JSONB DEFAULT '{}',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
||||
UNIQUE(item_id, revision_number, node_key)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_dag_nodes_item ON dag_nodes(item_id);
|
||||
CREATE INDEX idx_dag_nodes_item_rev ON dag_nodes(item_id, revision_number);
|
||||
CREATE INDEX idx_dag_nodes_state ON dag_nodes(validation_state)
|
||||
WHERE validation_state != 'clean';
|
||||
CREATE INDEX idx_dag_nodes_type ON dag_nodes(node_type);
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- DAG Edges (dependencies between nodes within a single item)
|
||||
-- Direction: source → target means "target depends on source"
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
CREATE TABLE dag_edges (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
source_node_id UUID NOT NULL REFERENCES dag_nodes(id) ON DELETE CASCADE,
|
||||
target_node_id UUID NOT NULL REFERENCES dag_nodes(id) ON DELETE CASCADE,
|
||||
edge_type TEXT NOT NULL DEFAULT 'depends_on',
|
||||
metadata JSONB DEFAULT '{}',
|
||||
UNIQUE(source_node_id, target_node_id, edge_type),
|
||||
CONSTRAINT no_self_edge CHECK (source_node_id != target_node_id)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_dag_edges_source ON dag_edges(source_node_id);
|
||||
CREATE INDEX idx_dag_edges_target ON dag_edges(target_node_id);
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- Cross-item DAG edges (linking feature nodes across BOM boundaries)
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
CREATE TABLE dag_cross_edges (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
source_node_id UUID NOT NULL REFERENCES dag_nodes(id) ON DELETE CASCADE,
|
||||
target_node_id UUID NOT NULL REFERENCES dag_nodes(id) ON DELETE CASCADE,
|
||||
relationship_id UUID REFERENCES relationships(id) ON DELETE SET NULL,
|
||||
edge_type TEXT NOT NULL DEFAULT 'assembly_ref',
|
||||
metadata JSONB DEFAULT '{}',
|
||||
UNIQUE(source_node_id, target_node_id)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_dag_cross_source ON dag_cross_edges(source_node_id);
|
||||
CREATE INDEX idx_dag_cross_target ON dag_cross_edges(target_node_id);
|
||||
|
||||
COMMIT;
|
||||
109
migrations/015_jobs_runners.sql
Normal file
109
migrations/015_jobs_runners.sql
Normal file
@@ -0,0 +1,109 @@
|
||||
-- Worker system: runners, job definitions, jobs, and job log.
|
||||
-- Migration: 015_jobs_runners
|
||||
-- Date: 2026-02
|
||||
|
||||
BEGIN;
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- Runners (registered compute workers)
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
CREATE TABLE runners (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
name TEXT UNIQUE NOT NULL,
|
||||
token_hash TEXT NOT NULL,
|
||||
token_prefix TEXT NOT NULL,
|
||||
tags TEXT[] NOT NULL DEFAULT '{}',
|
||||
status TEXT NOT NULL DEFAULT 'offline',
|
||||
last_heartbeat TIMESTAMPTZ,
|
||||
last_job_id UUID,
|
||||
metadata JSONB DEFAULT '{}',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT now()
|
||||
);
|
||||
|
||||
CREATE INDEX idx_runners_status ON runners(status);
|
||||
CREATE INDEX idx_runners_token ON runners(token_hash);
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- Job Definitions (parsed from YAML, stored for reference and FK)
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
CREATE TABLE job_definitions (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
name TEXT UNIQUE NOT NULL,
|
||||
version INTEGER NOT NULL DEFAULT 1,
|
||||
trigger_type TEXT NOT NULL,
|
||||
scope_type TEXT NOT NULL,
|
||||
compute_type TEXT NOT NULL,
|
||||
runner_tags TEXT[] NOT NULL DEFAULT '{}',
|
||||
timeout_seconds INTEGER NOT NULL DEFAULT 600,
|
||||
max_retries INTEGER NOT NULL DEFAULT 1,
|
||||
priority INTEGER NOT NULL DEFAULT 100,
|
||||
definition JSONB NOT NULL,
|
||||
enabled BOOLEAN NOT NULL DEFAULT true,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT now()
|
||||
);
|
||||
|
||||
CREATE INDEX idx_job_defs_trigger ON job_definitions(trigger_type);
|
||||
CREATE INDEX idx_job_defs_enabled ON job_definitions(enabled) WHERE enabled = true;
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- Jobs (individual compute job instances)
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
CREATE TYPE job_status AS ENUM (
|
||||
'pending', 'claimed', 'running', 'completed', 'failed', 'cancelled'
|
||||
);
|
||||
|
||||
CREATE TABLE jobs (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
job_definition_id UUID REFERENCES job_definitions(id) ON DELETE SET NULL,
|
||||
definition_name TEXT NOT NULL,
|
||||
status job_status NOT NULL DEFAULT 'pending',
|
||||
priority INTEGER NOT NULL DEFAULT 100,
|
||||
item_id UUID REFERENCES items(id) ON DELETE CASCADE,
|
||||
project_id UUID REFERENCES projects(id) ON DELETE SET NULL,
|
||||
scope_metadata JSONB DEFAULT '{}',
|
||||
runner_id UUID REFERENCES runners(id) ON DELETE SET NULL,
|
||||
runner_tags TEXT[] NOT NULL DEFAULT '{}',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
||||
claimed_at TIMESTAMPTZ,
|
||||
started_at TIMESTAMPTZ,
|
||||
completed_at TIMESTAMPTZ,
|
||||
timeout_seconds INTEGER NOT NULL DEFAULT 600,
|
||||
expires_at TIMESTAMPTZ,
|
||||
progress INTEGER DEFAULT 0,
|
||||
progress_message TEXT,
|
||||
result JSONB,
|
||||
error_message TEXT,
|
||||
retry_count INTEGER NOT NULL DEFAULT 0,
|
||||
max_retries INTEGER NOT NULL DEFAULT 1,
|
||||
created_by TEXT,
|
||||
cancelled_by TEXT
|
||||
);
|
||||
|
||||
CREATE INDEX idx_jobs_status ON jobs(status);
|
||||
CREATE INDEX idx_jobs_pending ON jobs(status, priority, created_at)
|
||||
WHERE status = 'pending';
|
||||
CREATE INDEX idx_jobs_item ON jobs(item_id);
|
||||
CREATE INDEX idx_jobs_runner ON jobs(runner_id);
|
||||
CREATE INDEX idx_jobs_definition ON jobs(job_definition_id);
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- Job Log (append-only progress entries)
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
CREATE TABLE job_log (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
job_id UUID NOT NULL REFERENCES jobs(id) ON DELETE CASCADE,
|
||||
timestamp TIMESTAMPTZ NOT NULL DEFAULT now(),
|
||||
level TEXT NOT NULL DEFAULT 'info',
|
||||
message TEXT NOT NULL,
|
||||
metadata JSONB DEFAULT '{}'
|
||||
);
|
||||
|
||||
CREATE INDEX idx_job_log_job ON job_log(job_id, timestamp);
|
||||
|
||||
COMMIT;
|
||||
15
migrations/016_module_system.sql
Normal file
15
migrations/016_module_system.sql
Normal file
@@ -0,0 +1,15 @@
|
||||
-- 016_module_system.sql — settings overrides and module state persistence
|
||||
|
||||
CREATE TABLE IF NOT EXISTS settings_overrides (
|
||||
key TEXT PRIMARY KEY,
|
||||
value JSONB NOT NULL,
|
||||
updated_by TEXT NOT NULL,
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT now()
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS module_state (
|
||||
module_id TEXT PRIMARY KEY,
|
||||
enabled BOOLEAN NOT NULL,
|
||||
updated_by TEXT NOT NULL,
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT now()
|
||||
);
|
||||
@@ -1,18 +1,23 @@
|
||||
#!/bin/bash
|
||||
# Deploy Silo to silo.kindred.internal
|
||||
# Deploy Silo to a target host
|
||||
#
|
||||
# Usage: ./scripts/deploy.sh [host]
|
||||
# host defaults to silo.kindred.internal
|
||||
# host defaults to SILO_DEPLOY_TARGET env var, or silo.example.internal
|
||||
#
|
||||
# Prerequisites:
|
||||
# - SSH access to the target host
|
||||
# - /etc/silo/silod.env must exist on target with credentials filled in
|
||||
# - PostgreSQL reachable from target at psql.kindred.internal
|
||||
# - MinIO reachable from target at minio.kindred.internal
|
||||
# - PostgreSQL reachable from target (set SILO_DB_HOST to override)
|
||||
# - MinIO reachable from target (set SILO_MINIO_HOST to override)
|
||||
#
|
||||
# Environment variables:
|
||||
# SILO_DEPLOY_TARGET - target host (default: silo.example.internal)
|
||||
# SILO_DB_HOST - PostgreSQL host (default: psql.example.internal)
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
TARGET="${1:-silo.kindred.internal}"
|
||||
TARGET="${1:-${SILO_DEPLOY_TARGET:-silo.example.internal}}"
|
||||
DB_HOST="${SILO_DB_HOST:-psql.example.internal}"
|
||||
DEPLOY_DIR="/opt/silo"
|
||||
CONFIG_DIR="/etc/silo"
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
@@ -104,7 +109,7 @@ echo " Files installed to $DEPLOY_DIR"
|
||||
REMOTE
|
||||
|
||||
echo "[6/6] Running migrations and starting service..."
|
||||
ssh "$TARGET" bash -s <<'REMOTE'
|
||||
ssh "$TARGET" DB_HOST="$DB_HOST" bash -s <<'REMOTE'
|
||||
set -euo pipefail
|
||||
|
||||
DEPLOY_DIR="/opt/silo"
|
||||
@@ -123,14 +128,14 @@ if command -v psql &>/dev/null && [ -n "${SILO_DB_PASSWORD:-}" ]; then
|
||||
for f in "$DEPLOY_DIR/migrations/"*.sql; do
|
||||
echo " $(basename "$f")"
|
||||
PGPASSWORD="$SILO_DB_PASSWORD" psql \
|
||||
-h psql.kindred.internal -p 5432 \
|
||||
-h "$DB_HOST" -p 5432 \
|
||||
-U silo -d silo \
|
||||
-f "$f" -q 2>&1 | grep -v "already exists" || true
|
||||
done
|
||||
echo " Migrations complete."
|
||||
else
|
||||
echo " WARNING: psql not available or SILO_DB_PASSWORD not set, skipping migrations."
|
||||
echo " Run migrations manually: PGPASSWORD=... psql -h psql.kindred.internal -U silo -d silo -f /opt/silo/migrations/NNN_name.sql"
|
||||
echo " Run migrations manually: PGPASSWORD=... psql -h $DB_HOST -U silo -d silo -f /opt/silo/migrations/NNN_name.sql"
|
||||
fi
|
||||
|
||||
# Start service
|
||||
|
||||
344
scripts/setup-docker.sh
Executable file
344
scripts/setup-docker.sh
Executable file
@@ -0,0 +1,344 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Silo Docker Setup Script
|
||||
# Generates .env and config.docker.yaml for the all-in-one Docker Compose stack.
|
||||
#
|
||||
# Usage:
|
||||
# ./scripts/setup-docker.sh # interactive
|
||||
# ./scripts/setup-docker.sh --non-interactive # use defaults / env vars
|
||||
# ./scripts/setup-docker.sh --domain silo.example.com
|
||||
# ./scripts/setup-docker.sh --with-nginx
|
||||
#
|
||||
# Output:
|
||||
# deployments/.env - Docker Compose environment variables
|
||||
# deployments/config.docker.yaml - Silo server configuration
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Colors (disabled if not a terminal)
|
||||
if [[ -t 1 ]]; then
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
BOLD='\033[1m'
|
||||
NC='\033[0m'
|
||||
else
|
||||
RED='' GREEN='' YELLOW='' BLUE='' BOLD='' NC=''
|
||||
fi
|
||||
|
||||
log_info() { echo -e "${BLUE}[INFO]${NC} $*"; }
|
||||
log_success() { echo -e "${GREEN}[OK]${NC} $*"; }
|
||||
log_warn() { echo -e "${YELLOW}[WARN]${NC} $*"; }
|
||||
log_error() { echo -e "${RED}[ERROR]${NC} $*" >&2; }
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Defaults
|
||||
# ---------------------------------------------------------------------------
|
||||
DOMAIN="localhost"
|
||||
NON_INTERACTIVE=false
|
||||
WITH_NGINX=false
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_DIR="${SCRIPT_DIR}/.."
|
||||
OUTPUT_DIR="${PROJECT_DIR}/deployments"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Parse arguments
|
||||
# ---------------------------------------------------------------------------
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--non-interactive) NON_INTERACTIVE=true; shift ;;
|
||||
--domain) DOMAIN="$2"; shift 2 ;;
|
||||
--with-nginx) WITH_NGINX=true; shift ;;
|
||||
--output-dir) OUTPUT_DIR="$2"; shift 2 ;;
|
||||
-h|--help)
|
||||
echo "Usage: $0 [OPTIONS]"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --non-interactive Use defaults and env vars, no prompts"
|
||||
echo " --domain DOMAIN Server hostname (default: localhost)"
|
||||
echo " --with-nginx Print instructions for the nginx profile"
|
||||
echo " --output-dir DIR Output directory (default: ./deployments)"
|
||||
echo " -h, --help Show this help"
|
||||
exit 0
|
||||
;;
|
||||
*) log_error "Unknown option: $1"; exit 1 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
generate_secret() {
|
||||
local len="${1:-32}"
|
||||
openssl rand -hex "$len" 2>/dev/null \
|
||||
|| head -c "$len" /dev/urandom | od -An -tx1 | tr -d ' \n'
|
||||
}
|
||||
|
||||
prompt() {
|
||||
local var_name="$1" prompt_text="$2" default="$3"
|
||||
if [[ "$NON_INTERACTIVE" == "true" ]]; then
|
||||
eval "$var_name=\"$default\""
|
||||
return
|
||||
fi
|
||||
local input
|
||||
read -r -p "$(echo -e "${BOLD}${prompt_text}${NC} [${default}]: ")" input
|
||||
eval "$var_name=\"${input:-$default}\""
|
||||
}
|
||||
|
||||
prompt_secret() {
|
||||
local var_name="$1" prompt_text="$2" default="$3"
|
||||
if [[ "$NON_INTERACTIVE" == "true" ]]; then
|
||||
eval "$var_name=\"$default\""
|
||||
return
|
||||
fi
|
||||
local input
|
||||
read -r -p "$(echo -e "${BOLD}${prompt_text}${NC} [auto-generated]: ")" input
|
||||
eval "$var_name=\"${input:-$default}\""
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Banner
|
||||
# ---------------------------------------------------------------------------
|
||||
echo ""
|
||||
echo -e "${BOLD}Silo Docker Setup${NC}"
|
||||
echo "Generates configuration for the all-in-one Docker Compose stack."
|
||||
echo ""
|
||||
|
||||
# Check for existing files
|
||||
if [[ -f "${OUTPUT_DIR}/.env" ]]; then
|
||||
log_warn "deployments/.env already exists."
|
||||
if [[ "$NON_INTERACTIVE" == "false" ]]; then
|
||||
read -r -p "Overwrite? [y/N]: " overwrite
|
||||
if [[ "${overwrite,,}" != "y" ]]; then
|
||||
log_info "Aborted."
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Gather configuration
|
||||
# ---------------------------------------------------------------------------
|
||||
log_info "Gathering configuration..."
|
||||
echo ""
|
||||
|
||||
# Domain / base URL
|
||||
prompt DOMAIN "Server domain" "$DOMAIN"
|
||||
|
||||
if [[ "$WITH_NGINX" == "true" ]]; then
|
||||
BASE_URL="http://${DOMAIN}"
|
||||
elif [[ "$DOMAIN" == "localhost" ]]; then
|
||||
BASE_URL="http://localhost:8080"
|
||||
else
|
||||
BASE_URL="http://${DOMAIN}:8080"
|
||||
fi
|
||||
|
||||
# PostgreSQL
|
||||
PG_PASSWORD_DEFAULT="$(generate_secret 16)"
|
||||
prompt_secret POSTGRES_PASSWORD "PostgreSQL password" "$PG_PASSWORD_DEFAULT"
|
||||
|
||||
# MinIO
|
||||
MINIO_AK_DEFAULT="$(generate_secret 10)"
|
||||
MINIO_SK_DEFAULT="$(generate_secret 16)"
|
||||
prompt_secret MINIO_ACCESS_KEY "MinIO access key" "$MINIO_AK_DEFAULT"
|
||||
prompt_secret MINIO_SECRET_KEY "MinIO secret key" "$MINIO_SK_DEFAULT"
|
||||
|
||||
# OpenLDAP
|
||||
LDAP_ADMIN_PW_DEFAULT="$(generate_secret 16)"
|
||||
prompt_secret LDAP_ADMIN_PASSWORD "LDAP admin password" "$LDAP_ADMIN_PW_DEFAULT"
|
||||
prompt LDAP_USERS "LDAP initial username" "siloadmin"
|
||||
LDAP_USER_PW_DEFAULT="$(generate_secret 12)"
|
||||
prompt_secret LDAP_PASSWORDS "LDAP initial user password" "$LDAP_USER_PW_DEFAULT"
|
||||
|
||||
# Session secret
|
||||
SESSION_SECRET="$(generate_secret 32)"
|
||||
|
||||
# Silo local admin (fallback when LDAP is unavailable)
|
||||
prompt SILO_ADMIN_USERNAME "Silo local admin username" "admin"
|
||||
ADMIN_PW_DEFAULT="$(generate_secret 12)"
|
||||
prompt_secret SILO_ADMIN_PASSWORD "Silo local admin password" "$ADMIN_PW_DEFAULT"
|
||||
|
||||
echo ""
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Write .env
|
||||
# ---------------------------------------------------------------------------
|
||||
log_info "Writing ${OUTPUT_DIR}/.env ..."
|
||||
|
||||
cat > "${OUTPUT_DIR}/.env" << EOF
|
||||
# Generated by setup-docker.sh on $(date +%Y-%m-%d)
|
||||
# Used by: docker compose -f deployments/docker-compose.allinone.yaml
|
||||
|
||||
# PostgreSQL
|
||||
POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
|
||||
|
||||
# MinIO
|
||||
MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY}
|
||||
MINIO_SECRET_KEY=${MINIO_SECRET_KEY}
|
||||
|
||||
# OpenLDAP
|
||||
LDAP_ADMIN_PASSWORD=${LDAP_ADMIN_PASSWORD}
|
||||
LDAP_USERS=${LDAP_USERS}
|
||||
LDAP_PASSWORDS=${LDAP_PASSWORDS}
|
||||
|
||||
# Silo
|
||||
SILO_SESSION_SECRET=${SESSION_SECRET}
|
||||
SILO_ADMIN_USERNAME=${SILO_ADMIN_USERNAME}
|
||||
SILO_ADMIN_PASSWORD=${SILO_ADMIN_PASSWORD}
|
||||
SILO_BASE_URL=${BASE_URL}
|
||||
|
||||
# Uncomment if using OIDC (Keycloak)
|
||||
# SILO_OIDC_CLIENT_SECRET=
|
||||
EOF
|
||||
|
||||
chmod 600 "${OUTPUT_DIR}/.env"
|
||||
log_success "deployments/.env written"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Write config.docker.yaml
|
||||
# ---------------------------------------------------------------------------
|
||||
log_info "Writing ${OUTPUT_DIR}/config.docker.yaml ..."
|
||||
|
||||
# Note: Values wrapped in ${VAR} (inside the single-quoted YAMLEOF blocks)
|
||||
# are NOT expanded by bash — they are written literally into the YAML file
|
||||
# and expanded at runtime by the Go config loader via os.ExpandEnv().
|
||||
# The base_url and cors origin use the bash variable directly since
|
||||
# SILO_SERVER_BASE_URL is not a supported direct override in the Go loader.
|
||||
{
|
||||
cat << 'YAMLEOF'
|
||||
# Silo Configuration — Docker Compose (all-in-one)
|
||||
# Generated by scripts/setup-docker.sh
|
||||
#
|
||||
# Values using ${VAR} syntax are expanded from environment variables at
|
||||
# startup. Direct env var overrides (SILO_DB_PASSWORD, etc.) take precedence
|
||||
# over YAML values — see docs/CONFIGURATION.md for the full reference.
|
||||
|
||||
server:
|
||||
host: "0.0.0.0"
|
||||
port: 8080
|
||||
YAMLEOF
|
||||
|
||||
cat << EOF
|
||||
base_url: "${BASE_URL}"
|
||||
EOF
|
||||
|
||||
cat << 'YAMLEOF'
|
||||
|
||||
database:
|
||||
host: "postgres"
|
||||
port: 5432
|
||||
name: "silo"
|
||||
user: "silo"
|
||||
password: "${SILO_DB_PASSWORD}"
|
||||
sslmode: "disable"
|
||||
max_connections: 10
|
||||
|
||||
storage:
|
||||
endpoint: "minio:9000"
|
||||
access_key: "${SILO_MINIO_ACCESS_KEY}"
|
||||
secret_key: "${SILO_MINIO_SECRET_KEY}"
|
||||
bucket: "silo-files"
|
||||
use_ssl: false
|
||||
region: "us-east-1"
|
||||
|
||||
schemas:
|
||||
directory: "/etc/silo/schemas"
|
||||
default: "kindred-rd"
|
||||
|
||||
freecad:
|
||||
uri_scheme: "silo"
|
||||
|
||||
auth:
|
||||
enabled: true
|
||||
session_secret: "${SILO_SESSION_SECRET}"
|
||||
|
||||
# Local accounts (fallback when LDAP is unavailable)
|
||||
local:
|
||||
enabled: true
|
||||
default_admin_username: "${SILO_ADMIN_USERNAME}"
|
||||
default_admin_password: "${SILO_ADMIN_PASSWORD}"
|
||||
|
||||
# OpenLDAP (provided by the Docker Compose stack)
|
||||
ldap:
|
||||
enabled: true
|
||||
url: "ldap://openldap:1389"
|
||||
base_dn: "dc=silo,dc=local"
|
||||
user_search_dn: "ou=users,dc=silo,dc=local"
|
||||
user_attr: "cn"
|
||||
email_attr: "mail"
|
||||
display_attr: "cn"
|
||||
group_attr: "memberOf"
|
||||
role_mapping:
|
||||
admin:
|
||||
- "cn=silo-admins,ou=groups,dc=silo,dc=local"
|
||||
editor:
|
||||
- "cn=silo-users,ou=groups,dc=silo,dc=local"
|
||||
viewer:
|
||||
- "cn=silo-viewers,ou=groups,dc=silo,dc=local"
|
||||
tls_skip_verify: false
|
||||
|
||||
oidc:
|
||||
enabled: false
|
||||
|
||||
cors:
|
||||
allowed_origins:
|
||||
YAMLEOF
|
||||
|
||||
cat << EOF
|
||||
- "${BASE_URL}"
|
||||
EOF
|
||||
} > "${OUTPUT_DIR}/config.docker.yaml"
|
||||
|
||||
log_success "deployments/config.docker.yaml written"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Summary
|
||||
# ---------------------------------------------------------------------------
|
||||
echo ""
|
||||
echo -e "${BOLD}============================================${NC}"
|
||||
echo -e "${BOLD}Setup complete!${NC}"
|
||||
echo -e "${BOLD}============================================${NC}"
|
||||
echo ""
|
||||
echo "Generated files:"
|
||||
echo " deployments/.env - secrets and credentials"
|
||||
echo " deployments/config.docker.yaml - server configuration"
|
||||
echo ""
|
||||
echo -e "${BOLD}Credentials:${NC}"
|
||||
echo " PostgreSQL: silo / ${POSTGRES_PASSWORD}"
|
||||
echo " MinIO: ${MINIO_ACCESS_KEY} / ${MINIO_SECRET_KEY}"
|
||||
echo " MinIO Console: http://localhost:9001"
|
||||
echo " LDAP Admin: cn=admin,dc=silo,dc=local / ${LDAP_ADMIN_PASSWORD}"
|
||||
echo " LDAP User: ${LDAP_USERS} / ${LDAP_PASSWORDS}"
|
||||
echo " Silo Admin: ${SILO_ADMIN_USERNAME} / ${SILO_ADMIN_PASSWORD} (local fallback)"
|
||||
echo " Base URL: ${BASE_URL}"
|
||||
echo ""
|
||||
echo -e "${BOLD}Next steps:${NC}"
|
||||
echo ""
|
||||
echo " # Start the stack"
|
||||
if [[ "$WITH_NGINX" == "true" ]]; then
|
||||
echo " docker compose -f deployments/docker-compose.allinone.yaml --profile nginx up -d"
|
||||
else
|
||||
echo " docker compose -f deployments/docker-compose.allinone.yaml up -d"
|
||||
fi
|
||||
echo ""
|
||||
echo " # Check status"
|
||||
echo " docker compose -f deployments/docker-compose.allinone.yaml ps"
|
||||
echo ""
|
||||
echo " # View logs"
|
||||
echo " docker compose -f deployments/docker-compose.allinone.yaml logs -f silo"
|
||||
echo ""
|
||||
echo " # Open in browser"
|
||||
echo " ${BASE_URL}"
|
||||
echo ""
|
||||
echo " # Log in with LDAP: ${LDAP_USERS} / <password above>"
|
||||
echo " # Or local admin: ${SILO_ADMIN_USERNAME} / <password above>"
|
||||
echo ""
|
||||
if [[ "$WITH_NGINX" != "true" ]]; then
|
||||
echo " To add nginx later:"
|
||||
echo " docker compose -f deployments/docker-compose.allinone.yaml --profile nginx up -d"
|
||||
echo ""
|
||||
fi
|
||||
echo "Save these credentials somewhere safe. The passwords in deployments/.env"
|
||||
echo "are the source of truth for the running stack."
|
||||
echo ""
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Silo Host Setup Script
|
||||
# Run this once on silo.kindred.internal to prepare for deployment
|
||||
# Run this once on silo.example.internal to prepare for deployment
|
||||
#
|
||||
# Usage:
|
||||
# sudo ./setup-host.sh
|
||||
@@ -24,11 +24,13 @@ BLUE='\033[0;34m'
|
||||
NC='\033[0m'
|
||||
|
||||
# Configuration
|
||||
REPO_URL="${SILO_REPO_URL:-https://gitea.kindred.internal/kindred/silo-0062.git}"
|
||||
REPO_URL="${SILO_REPO_URL:-https://git.kindred-systems.com/kindred/silo.git}"
|
||||
REPO_BRANCH="${SILO_BRANCH:-main}"
|
||||
INSTALL_DIR="/opt/silo"
|
||||
CONFIG_DIR="/etc/silo"
|
||||
GO_VERSION="1.23.0"
|
||||
GO_VERSION="1.24.0"
|
||||
DB_HOST="${SILO_DB_HOST:-psql.example.internal}"
|
||||
MINIO_HOST="${SILO_MINIO_HOST:-minio.example.internal}"
|
||||
|
||||
log_info() { echo -e "${BLUE}[INFO]${NC} $*"; }
|
||||
log_success() { echo -e "${GREEN}[OK]${NC} $*"; }
|
||||
@@ -155,21 +157,28 @@ log_success "Directories created"
|
||||
ENV_FILE="${CONFIG_DIR}/silod.env"
|
||||
if [[ ! -f "${ENV_FILE}" ]]; then
|
||||
log_info "Creating environment file..."
|
||||
cat > "${ENV_FILE}" << 'EOF'
|
||||
cat > "${ENV_FILE}" << EOF
|
||||
# Silo daemon environment variables
|
||||
# Fill in the values below
|
||||
|
||||
# Database credentials (psql.kindred.internal)
|
||||
# Database credentials (${DB_HOST})
|
||||
# Database: silo, User: silo
|
||||
SILO_DB_PASSWORD=
|
||||
|
||||
# MinIO credentials (minio.kindred.internal)
|
||||
# MinIO credentials (${MINIO_HOST})
|
||||
# User: silouser
|
||||
SILO_MINIO_ACCESS_KEY=silouser
|
||||
SILO_MINIO_SECRET_KEY=
|
||||
|
||||
# Authentication
|
||||
# Session secret (required when auth is enabled)
|
||||
SILO_SESSION_SECRET=
|
||||
# Default admin account (created on first startup if both are set)
|
||||
SILO_ADMIN_USERNAME=admin
|
||||
SILO_ADMIN_PASSWORD=
|
||||
|
||||
# Optional overrides
|
||||
# SILO_SERVER_BASE_URL=http://silo.kindred.internal:8080
|
||||
# SILO_SERVER_BASE_URL=http://\$(hostname -f):8080
|
||||
EOF
|
||||
chmod 600 "${ENV_FILE}"
|
||||
chown root:silo "${ENV_FILE}"
|
||||
@@ -214,10 +223,10 @@ echo "1. Edit ${ENV_FILE} and fill in credentials:"
|
||||
echo " sudo nano ${ENV_FILE}"
|
||||
echo ""
|
||||
echo "2. Verify database connectivity:"
|
||||
echo " psql -h psql.kindred.internal -U silo -d silo -c 'SELECT 1'"
|
||||
echo " psql -h ${DB_HOST} -U silo -d silo -c 'SELECT 1'"
|
||||
echo ""
|
||||
echo "3. Verify MinIO connectivity:"
|
||||
echo " curl -I http://minio.kindred.internal:9000/minio/health/live"
|
||||
echo " curl -I http://${MINIO_HOST}:9000/minio/health/live"
|
||||
echo ""
|
||||
echo "4. Run the deployment:"
|
||||
echo " sudo ${INSTALL_DIR}/src/scripts/deploy.sh"
|
||||
|
||||
@@ -7,8 +7,8 @@
|
||||
# sudo ./scripts/setup-ipa-nginx.sh
|
||||
#
|
||||
# Prerequisites:
|
||||
# - FreeIPA server at ipa.kindred.internal
|
||||
# - DNS configured for silo.kindred.internal
|
||||
# - FreeIPA server at ipa.example.internal
|
||||
# - DNS configured for the silo host (set SILO_HOSTNAME to override default)
|
||||
# - Admin credentials for IPA enrollment
|
||||
|
||||
set -euo pipefail
|
||||
@@ -21,12 +21,12 @@ BLUE='\033[0;34m'
|
||||
NC='\033[0m'
|
||||
|
||||
# Configuration
|
||||
IPA_SERVER="${IPA_SERVER:-ipa.kindred.internal}"
|
||||
IPA_DOMAIN="${IPA_DOMAIN:-kindred.internal}"
|
||||
IPA_SERVER="${IPA_SERVER:-ipa.example.internal}"
|
||||
IPA_DOMAIN="${IPA_DOMAIN:-example.internal}"
|
||||
IPA_REALM="${IPA_REALM:-KINDRED.INTERNAL}"
|
||||
HOSTNAME="silo.kindred.internal"
|
||||
SILO_HOSTNAME="${SILO_HOSTNAME:-silo.example.internal}"
|
||||
CERT_DIR="/etc/ssl/silo"
|
||||
SILO_PORT=8080
|
||||
SILO_PORT="${SILO_PORT:-8080}"
|
||||
|
||||
log_info() { echo -e "${BLUE}[INFO]${NC} $*"; }
|
||||
log_success() { echo -e "${GREEN}[OK]${NC} $*"; }
|
||||
@@ -77,8 +77,8 @@ log_success "Packages installed"
|
||||
#
|
||||
# Step 2: Set hostname
|
||||
#
|
||||
log_info "Setting hostname to ${HOSTNAME}..."
|
||||
hostnamectl set-hostname "${HOSTNAME}"
|
||||
log_info "Setting hostname to ${SILO_HOSTNAME}..."
|
||||
hostnamectl set-hostname "${SILO_HOSTNAME}"
|
||||
log_success "Hostname set"
|
||||
|
||||
#
|
||||
@@ -95,7 +95,7 @@ else
|
||||
--server="${IPA_SERVER}" \
|
||||
--domain="${IPA_DOMAIN}" \
|
||||
--realm="${IPA_REALM}" \
|
||||
--hostname="${HOSTNAME}" \
|
||||
--hostname="${SILO_HOSTNAME}" \
|
||||
--mkhomedir \
|
||||
--enable-dns-updates \
|
||||
--unattended \
|
||||
@@ -105,7 +105,7 @@ else
|
||||
--server="${IPA_SERVER}" \
|
||||
--domain="${IPA_DOMAIN}" \
|
||||
--realm="${IPA_REALM}" \
|
||||
--hostname="${HOSTNAME}" \
|
||||
--hostname="${SILO_HOSTNAME}" \
|
||||
--mkhomedir \
|
||||
--enable-dns-updates
|
||||
}
|
||||
@@ -135,9 +135,9 @@ else
|
||||
ipa-getcert request \
|
||||
-f "${CERT_DIR}/silo.crt" \
|
||||
-k "${CERT_DIR}/silo.key" \
|
||||
-K "HTTP/${HOSTNAME}" \
|
||||
-D "${HOSTNAME}" \
|
||||
-N "CN=${HOSTNAME}" \
|
||||
-K "HTTP/${SILO_HOSTNAME}" \
|
||||
-D "${SILO_HOSTNAME}" \
|
||||
-N "CN=${SILO_HOSTNAME}" \
|
||||
-C "systemctl reload nginx"
|
||||
|
||||
log_info "Waiting for certificate to be issued..."
|
||||
@@ -186,14 +186,14 @@ if [[ -f /etc/nginx/sites-enabled/default ]]; then
|
||||
fi
|
||||
|
||||
# Create silo nginx config
|
||||
cat > /etc/nginx/sites-available/silo << 'NGINX_EOF'
|
||||
cat > /etc/nginx/sites-available/silo << NGINX_EOF
|
||||
# Silo API Server - Nginx Reverse Proxy Configuration
|
||||
|
||||
# Redirect HTTP to HTTPS
|
||||
server {
|
||||
listen 80;
|
||||
listen [::]:80;
|
||||
server_name silo.kindred.internal;
|
||||
server_name ${SILO_HOSTNAME};
|
||||
|
||||
# Allow certmonger/ACME challenges
|
||||
location /.well-known/ {
|
||||
@@ -201,7 +201,7 @@ server {
|
||||
}
|
||||
|
||||
location / {
|
||||
return 301 https://$server_name$request_uri;
|
||||
return 301 https://\\$server_name\\$request_uri;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -209,11 +209,11 @@ server {
|
||||
server {
|
||||
listen 443 ssl http2;
|
||||
listen [::]:443 ssl http2;
|
||||
server_name silo.kindred.internal;
|
||||
server_name ${SILO_HOSTNAME};
|
||||
|
||||
# SSL certificates (managed by certmonger/IPA)
|
||||
ssl_certificate /etc/ssl/silo/silo.crt;
|
||||
ssl_certificate_key /etc/ssl/silo/silo.key;
|
||||
ssl_certificate ${CERT_DIR}/silo.crt;
|
||||
ssl_certificate_key ${CERT_DIR}/silo.key;
|
||||
|
||||
# SSL configuration
|
||||
ssl_protocols TLSv1.2 TLSv1.3;
|
||||
@@ -226,7 +226,7 @@ server {
|
||||
# OCSP stapling
|
||||
ssl_stapling on;
|
||||
ssl_stapling_verify on;
|
||||
ssl_trusted_certificate /etc/ssl/silo/ca.crt;
|
||||
ssl_trusted_certificate ${CERT_DIR}/ca.crt;
|
||||
|
||||
# Security headers
|
||||
add_header X-Frame-Options "SAMEORIGIN" always;
|
||||
@@ -240,19 +240,19 @@ server {
|
||||
|
||||
# Proxy settings
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:8080;
|
||||
proxy_pass http://127.0.0.1:${SILO_PORT};
|
||||
proxy_http_version 1.1;
|
||||
|
||||
# Headers
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Host $host;
|
||||
proxy_set_header X-Forwarded-Port $server_port;
|
||||
proxy_set_header Host \\$host;
|
||||
proxy_set_header X-Real-IP \\$remote_addr;
|
||||
proxy_set_header X-Forwarded-For \\$proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto \\$scheme;
|
||||
proxy_set_header X-Forwarded-Host \\$host;
|
||||
proxy_set_header X-Forwarded-Port \\$server_port;
|
||||
|
||||
# WebSocket support (for future use)
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Upgrade \\$http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
|
||||
# Timeouts
|
||||
@@ -343,14 +343,14 @@ echo " getcert list"
|
||||
echo ""
|
||||
echo "2. Update silo config to use correct base URL:"
|
||||
echo " sudo nano /etc/silo/config.yaml"
|
||||
echo " # Change base_url to: https://silo.kindred.internal"
|
||||
echo " # Change base_url to: https://${SILO_HOSTNAME}"
|
||||
echo ""
|
||||
echo "3. Restart silo service:"
|
||||
echo " sudo systemctl restart silod"
|
||||
echo ""
|
||||
echo "4. Test the setup:"
|
||||
echo " curl -k https://silo.kindred.internal/health"
|
||||
echo " curl https://silo.kindred.internal/health # after trusting IPA CA"
|
||||
echo " curl -k https://${SILO_HOSTNAME}/health"
|
||||
echo " curl https://${SILO_HOSTNAME}/health # after trusting IPA CA"
|
||||
echo ""
|
||||
echo "5. Trust IPA CA on client machines:"
|
||||
echo " # The CA cert is at: ${CERT_DIR}/ca.crt"
|
||||
|
||||
10
web/package-lock.json
generated
10
web/package-lock.json
generated
@@ -8,6 +8,7 @@
|
||||
"name": "silo-web",
|
||||
"version": "0.0.0",
|
||||
"dependencies": {
|
||||
"lucide-react": "^0.564.0",
|
||||
"react": "^19.0.0",
|
||||
"react-dom": "^19.0.0",
|
||||
"react-router-dom": "^7.0.0"
|
||||
@@ -1499,6 +1500,15 @@
|
||||
"yallist": "^3.0.2"
|
||||
}
|
||||
},
|
||||
"node_modules/lucide-react": {
|
||||
"version": "0.564.0",
|
||||
"resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.564.0.tgz",
|
||||
"integrity": "sha512-JJ8GVTQqFwuliifD48U6+h7DXEHdkhJ/E87kksGByII3qHxtPciVb8T8woQONHBQgHVOl7rSMrrip3SeVNy7Fg==",
|
||||
"license": "ISC",
|
||||
"peerDependencies": {
|
||||
"react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/ms": {
|
||||
"version": "2.1.3",
|
||||
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
"preview": "vite preview"
|
||||
},
|
||||
"dependencies": {
|
||||
"lucide-react": "^0.564.0",
|
||||
"react": "^19.0.0",
|
||||
"react-dom": "^19.0.0",
|
||||
"react-router-dom": "^7.0.0"
|
||||
|
||||
@@ -73,7 +73,7 @@ export function AppShell() {
|
||||
padding: "var(--d-nav-py) var(--d-nav-px)",
|
||||
borderRadius: "var(--d-nav-radius)",
|
||||
textDecoration: "none",
|
||||
transition: "all 0.2s",
|
||||
transition: "all 0.15s ease",
|
||||
})}
|
||||
>
|
||||
{link.label}
|
||||
@@ -100,7 +100,7 @@ export function AppShell() {
|
||||
<span
|
||||
style={{
|
||||
display: "inline-block",
|
||||
padding: "0.15rem 0.5rem",
|
||||
padding: "0.25rem 0.5rem",
|
||||
borderRadius: "1rem",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 600,
|
||||
@@ -113,9 +113,9 @@ export function AppShell() {
|
||||
onClick={toggleDensity}
|
||||
title={`Switch to ${density === "comfortable" ? "compact" : "comfortable"} view`}
|
||||
style={{
|
||||
padding: "0.2rem 0.5rem",
|
||||
fontSize: "0.7rem",
|
||||
borderRadius: "0.3rem",
|
||||
padding: "0.25rem 0.5rem",
|
||||
fontSize: "var(--font-sm)",
|
||||
borderRadius: "0.25rem",
|
||||
cursor: "pointer",
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
background: "var(--ctp-surface0)",
|
||||
@@ -129,9 +129,9 @@ export function AppShell() {
|
||||
<button
|
||||
onClick={logout}
|
||||
style={{
|
||||
padding: "0.35rem 0.75rem",
|
||||
fontSize: "0.8rem",
|
||||
borderRadius: "0.4rem",
|
||||
padding: "0.25rem 0.75rem",
|
||||
fontSize: "var(--font-table)",
|
||||
borderRadius: "0.5rem",
|
||||
cursor: "pointer",
|
||||
border: "none",
|
||||
background: "var(--ctp-surface1)",
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { useEffect, useRef } from 'react';
|
||||
import { useEffect, useRef } from "react";
|
||||
import { Check } from "lucide-react";
|
||||
|
||||
export interface ContextMenuItem {
|
||||
label: string;
|
||||
@@ -24,76 +25,95 @@ export function ContextMenu({ x, y, items, onClose }: ContextMenuProps) {
|
||||
if (ref.current && !ref.current.contains(e.target as Node)) onClose();
|
||||
};
|
||||
const handleKey = (e: KeyboardEvent) => {
|
||||
if (e.key === 'Escape') onClose();
|
||||
if (e.key === "Escape") onClose();
|
||||
};
|
||||
const handleScroll = () => onClose();
|
||||
|
||||
document.addEventListener('mousedown', handleClick);
|
||||
document.addEventListener('keydown', handleKey);
|
||||
window.addEventListener('scroll', handleScroll, true);
|
||||
document.addEventListener("mousedown", handleClick);
|
||||
document.addEventListener("keydown", handleKey);
|
||||
window.addEventListener("scroll", handleScroll, true);
|
||||
return () => {
|
||||
document.removeEventListener('mousedown', handleClick);
|
||||
document.removeEventListener('keydown', handleKey);
|
||||
window.removeEventListener('scroll', handleScroll, true);
|
||||
document.removeEventListener("mousedown", handleClick);
|
||||
document.removeEventListener("keydown", handleKey);
|
||||
window.removeEventListener("scroll", handleScroll, true);
|
||||
};
|
||||
}, [onClose]);
|
||||
|
||||
// Clamp position to viewport
|
||||
const style: React.CSSProperties = {
|
||||
position: 'fixed',
|
||||
position: "fixed",
|
||||
left: Math.min(x, window.innerWidth - 220),
|
||||
top: Math.min(y, window.innerHeight - items.length * 32 - 16),
|
||||
zIndex: 9999,
|
||||
backgroundColor: 'var(--ctp-surface0)',
|
||||
border: '1px solid var(--ctp-surface1)',
|
||||
borderRadius: '0.5rem',
|
||||
padding: '0.25rem 0',
|
||||
backgroundColor: "var(--ctp-surface0)",
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
borderRadius: "0.5rem",
|
||||
padding: "0.25rem 0",
|
||||
minWidth: 200,
|
||||
boxShadow: '0 4px 12px rgba(0,0,0,0.4)',
|
||||
boxShadow: "0 4px 12px rgba(0,0,0,0.4)",
|
||||
};
|
||||
|
||||
return (
|
||||
<div ref={ref} style={style}>
|
||||
{items.map((item, i) =>
|
||||
item.divider ? (
|
||||
<div key={i} style={{ borderTop: '1px solid var(--ctp-surface1)', margin: '0.25rem 0' }} />
|
||||
<div
|
||||
key={i}
|
||||
style={{
|
||||
borderTop: "1px solid var(--ctp-surface1)",
|
||||
margin: "0.25rem 0",
|
||||
}}
|
||||
/>
|
||||
) : (
|
||||
<button
|
||||
key={i}
|
||||
onClick={() => {
|
||||
if (item.onToggle) item.onToggle();
|
||||
else if (item.onClick) { item.onClick(); onClose(); }
|
||||
else if (item.onClick) {
|
||||
item.onClick();
|
||||
onClose();
|
||||
}
|
||||
}}
|
||||
disabled={item.disabled}
|
||||
style={{
|
||||
display: 'flex',
|
||||
alignItems: 'center',
|
||||
gap: '0.5rem',
|
||||
width: '100%',
|
||||
padding: '0.35rem 0.75rem',
|
||||
background: 'none',
|
||||
border: 'none',
|
||||
color: item.disabled ? 'var(--ctp-overlay0)' : 'var(--ctp-text)',
|
||||
fontSize: '0.85rem',
|
||||
cursor: item.disabled ? 'default' : 'pointer',
|
||||
textAlign: 'left',
|
||||
display: "flex",
|
||||
alignItems: "center",
|
||||
gap: "0.5rem",
|
||||
width: "100%",
|
||||
padding: "0.25rem 0.75rem",
|
||||
background: "none",
|
||||
border: "none",
|
||||
color: item.disabled ? "var(--ctp-overlay0)" : "var(--ctp-text)",
|
||||
fontSize: "var(--font-body)",
|
||||
cursor: item.disabled ? "default" : "pointer",
|
||||
textAlign: "left",
|
||||
}}
|
||||
onMouseEnter={(e) => {
|
||||
if (!item.disabled) e.currentTarget.style.backgroundColor = 'var(--ctp-surface1)';
|
||||
if (!item.disabled)
|
||||
e.currentTarget.style.backgroundColor = "var(--ctp-surface1)";
|
||||
}}
|
||||
onMouseLeave={(e) => {
|
||||
e.currentTarget.style.backgroundColor = 'transparent';
|
||||
e.currentTarget.style.backgroundColor = "transparent";
|
||||
}}
|
||||
>
|
||||
{item.checked !== undefined && (
|
||||
<span style={{
|
||||
width: 16, height: 16, display: 'inline-flex', alignItems: 'center', justifyContent: 'center',
|
||||
border: '1px solid var(--ctp-overlay0)', borderRadius: 3,
|
||||
backgroundColor: item.checked ? 'var(--ctp-mauve)' : 'transparent',
|
||||
color: item.checked ? 'var(--ctp-crust)' : 'transparent',
|
||||
fontSize: '0.7rem', fontWeight: 700, flexShrink: 0,
|
||||
}}>
|
||||
{item.checked ? '✓' : ''}
|
||||
<span
|
||||
style={{
|
||||
width: 16,
|
||||
height: 16,
|
||||
display: "inline-flex",
|
||||
alignItems: "center",
|
||||
justifyContent: "center",
|
||||
border: "1px solid var(--ctp-overlay0)",
|
||||
borderRadius: 3,
|
||||
backgroundColor: item.checked
|
||||
? "var(--ctp-mauve)"
|
||||
: "transparent",
|
||||
color: item.checked ? "var(--ctp-crust)" : "transparent",
|
||||
flexShrink: 0,
|
||||
}}
|
||||
>
|
||||
{item.checked ? <Check size={14} /> : ""}
|
||||
</span>
|
||||
)}
|
||||
{item.label}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import type { ReactNode } from 'react';
|
||||
import type { ReactNode } from "react";
|
||||
|
||||
interface PageFooterProps {
|
||||
stats?: ReactNode;
|
||||
@@ -8,32 +8,40 @@ interface PageFooterProps {
|
||||
onPageChange?: (page: number) => void;
|
||||
}
|
||||
|
||||
export function PageFooter({ stats, page, pageSize, itemCount, onPageChange }: PageFooterProps) {
|
||||
export function PageFooter({
|
||||
stats,
|
||||
page,
|
||||
pageSize,
|
||||
itemCount,
|
||||
onPageChange,
|
||||
}: PageFooterProps) {
|
||||
const hasPagination = page !== undefined && onPageChange !== undefined;
|
||||
|
||||
return (
|
||||
<div style={{
|
||||
position: 'fixed',
|
||||
bottom: 0,
|
||||
left: 0,
|
||||
right: 0,
|
||||
height: 'var(--d-footer-h)',
|
||||
backgroundColor: 'var(--ctp-surface0)',
|
||||
borderTop: '1px solid var(--ctp-surface1)',
|
||||
display: 'flex',
|
||||
alignItems: 'center',
|
||||
justifyContent: 'space-between',
|
||||
padding: '0 var(--d-footer-px)',
|
||||
fontSize: 'var(--d-footer-font)',
|
||||
color: 'var(--ctp-subtext0)',
|
||||
zIndex: 100,
|
||||
}}>
|
||||
<div style={{ display: 'flex', gap: '1.5rem', alignItems: 'center' }}>
|
||||
<div
|
||||
style={{
|
||||
position: "fixed",
|
||||
bottom: 0,
|
||||
left: 0,
|
||||
right: 0,
|
||||
height: "var(--d-footer-h)",
|
||||
backgroundColor: "var(--ctp-surface0)",
|
||||
borderTop: "1px solid var(--ctp-surface1)",
|
||||
display: "flex",
|
||||
alignItems: "center",
|
||||
justifyContent: "space-between",
|
||||
padding: "0 var(--d-footer-px)",
|
||||
fontSize: "var(--d-footer-font)",
|
||||
color: "var(--ctp-subtext0)",
|
||||
zIndex: 100,
|
||||
}}
|
||||
>
|
||||
<div style={{ display: "flex", gap: "1.5rem", alignItems: "center" }}>
|
||||
{stats}
|
||||
</div>
|
||||
|
||||
{hasPagination && (
|
||||
<div style={{ display: 'flex', gap: '0.5rem', alignItems: 'center' }}>
|
||||
<div style={{ display: "flex", gap: "0.5rem", alignItems: "center" }}>
|
||||
<button
|
||||
onClick={() => onPageChange(Math.max(1, page - 1))}
|
||||
disabled={page <= 1}
|
||||
@@ -47,7 +55,11 @@ export function PageFooter({ stats, page, pageSize, itemCount, onPageChange }: P
|
||||
</span>
|
||||
<button
|
||||
onClick={() => onPageChange(page + 1)}
|
||||
disabled={pageSize !== undefined && itemCount !== undefined && itemCount < pageSize}
|
||||
disabled={
|
||||
pageSize !== undefined &&
|
||||
itemCount !== undefined &&
|
||||
itemCount < pageSize
|
||||
}
|
||||
style={pageBtnStyle}
|
||||
>
|
||||
Next
|
||||
@@ -59,11 +71,11 @@ export function PageFooter({ stats, page, pageSize, itemCount, onPageChange }: P
|
||||
}
|
||||
|
||||
const pageBtnStyle: React.CSSProperties = {
|
||||
padding: '0.15rem 0.4rem',
|
||||
fontSize: 'inherit',
|
||||
border: 'none',
|
||||
borderRadius: '0.25rem',
|
||||
backgroundColor: 'var(--ctp-surface1)',
|
||||
color: 'var(--ctp-text)',
|
||||
cursor: 'pointer',
|
||||
padding: "0.25rem 0.5rem",
|
||||
fontSize: "inherit",
|
||||
border: "none",
|
||||
borderRadius: "0.25rem",
|
||||
backgroundColor: "var(--ctp-surface1)",
|
||||
color: "var(--ctp-text)",
|
||||
cursor: "pointer",
|
||||
};
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { useState, useRef, useEffect, useCallback } from 'react';
|
||||
import { useState, useRef, useEffect, useCallback } from "react";
|
||||
import { X } from "lucide-react";
|
||||
|
||||
export interface TagOption {
|
||||
id: string;
|
||||
@@ -12,34 +13,45 @@ interface TagInputProps {
|
||||
searchFn: (query: string) => Promise<TagOption[]>;
|
||||
}
|
||||
|
||||
export function TagInput({ value, onChange, placeholder, searchFn }: TagInputProps) {
|
||||
const [query, setQuery] = useState('');
|
||||
export function TagInput({
|
||||
value,
|
||||
onChange,
|
||||
placeholder,
|
||||
searchFn,
|
||||
}: TagInputProps) {
|
||||
const [query, setQuery] = useState("");
|
||||
const [results, setResults] = useState<TagOption[]>([]);
|
||||
const [open, setOpen] = useState(false);
|
||||
const [highlighted, setHighlighted] = useState(0);
|
||||
const inputRef = useRef<HTMLInputElement>(null);
|
||||
const containerRef = useRef<HTMLDivElement>(null);
|
||||
const debounceRef = useRef<ReturnType<typeof setTimeout> | undefined>(undefined);
|
||||
const debounceRef = useRef<ReturnType<typeof setTimeout> | undefined>(
|
||||
undefined,
|
||||
);
|
||||
|
||||
// Debounced search
|
||||
const search = useCallback(
|
||||
(q: string) => {
|
||||
if (debounceRef.current) clearTimeout(debounceRef.current);
|
||||
if (q.trim() === '') {
|
||||
if (q.trim() === "") {
|
||||
// Show all results when input is empty but focused
|
||||
debounceRef.current = setTimeout(() => {
|
||||
searchFn('').then((opts) => {
|
||||
setResults(opts.filter((o) => !value.includes(o.id)));
|
||||
setHighlighted(0);
|
||||
}).catch(() => setResults([]));
|
||||
searchFn("")
|
||||
.then((opts) => {
|
||||
setResults(opts.filter((o) => !value.includes(o.id)));
|
||||
setHighlighted(0);
|
||||
})
|
||||
.catch(() => setResults([]));
|
||||
}, 100);
|
||||
return;
|
||||
}
|
||||
debounceRef.current = setTimeout(() => {
|
||||
searchFn(q).then((opts) => {
|
||||
setResults(opts.filter((o) => !value.includes(o.id)));
|
||||
setHighlighted(0);
|
||||
}).catch(() => setResults([]));
|
||||
searchFn(q)
|
||||
.then((opts) => {
|
||||
setResults(opts.filter((o) => !value.includes(o.id)));
|
||||
setHighlighted(0);
|
||||
})
|
||||
.catch(() => setResults([]));
|
||||
}, 200);
|
||||
},
|
||||
[searchFn, value],
|
||||
@@ -53,17 +65,20 @@ export function TagInput({ value, onChange, placeholder, searchFn }: TagInputPro
|
||||
// Close on click outside
|
||||
useEffect(() => {
|
||||
const handler = (e: MouseEvent) => {
|
||||
if (containerRef.current && !containerRef.current.contains(e.target as Node)) {
|
||||
if (
|
||||
containerRef.current &&
|
||||
!containerRef.current.contains(e.target as Node)
|
||||
) {
|
||||
setOpen(false);
|
||||
}
|
||||
};
|
||||
document.addEventListener('mousedown', handler);
|
||||
return () => document.removeEventListener('mousedown', handler);
|
||||
document.addEventListener("mousedown", handler);
|
||||
return () => document.removeEventListener("mousedown", handler);
|
||||
}, []);
|
||||
|
||||
const select = (id: string) => {
|
||||
onChange([...value, id]);
|
||||
setQuery('');
|
||||
setQuery("");
|
||||
setOpen(false);
|
||||
inputRef.current?.focus();
|
||||
};
|
||||
@@ -73,22 +88,22 @@ export function TagInput({ value, onChange, placeholder, searchFn }: TagInputPro
|
||||
};
|
||||
|
||||
const handleKeyDown = (e: React.KeyboardEvent) => {
|
||||
if (e.key === 'Backspace' && query === '' && value.length > 0) {
|
||||
if (e.key === "Backspace" && query === "" && value.length > 0) {
|
||||
onChange(value.slice(0, -1));
|
||||
return;
|
||||
}
|
||||
if (e.key === 'Escape') {
|
||||
if (e.key === "Escape") {
|
||||
setOpen(false);
|
||||
return;
|
||||
}
|
||||
if (!open || results.length === 0) return;
|
||||
if (e.key === 'ArrowDown') {
|
||||
if (e.key === "ArrowDown") {
|
||||
e.preventDefault();
|
||||
setHighlighted((h) => (h + 1) % results.length);
|
||||
} else if (e.key === 'ArrowUp') {
|
||||
} else if (e.key === "ArrowUp") {
|
||||
e.preventDefault();
|
||||
setHighlighted((h) => (h - 1 + results.length) % results.length);
|
||||
} else if (e.key === 'Enter') {
|
||||
} else if (e.key === "Enter") {
|
||||
e.preventDefault();
|
||||
if (results[highlighted]) select(results[highlighted].id);
|
||||
}
|
||||
@@ -99,19 +114,19 @@ export function TagInput({ value, onChange, placeholder, searchFn }: TagInputPro
|
||||
for (const r of results) labelMap.current.set(r.id, r.label);
|
||||
|
||||
return (
|
||||
<div ref={containerRef} style={{ position: 'relative' }}>
|
||||
<div ref={containerRef} style={{ position: "relative" }}>
|
||||
<div
|
||||
style={{
|
||||
display: 'flex',
|
||||
flexWrap: 'wrap',
|
||||
alignItems: 'center',
|
||||
gap: '0.25rem',
|
||||
padding: '0.25rem 0.5rem',
|
||||
backgroundColor: 'var(--ctp-base)',
|
||||
border: '1px solid var(--ctp-surface1)',
|
||||
borderRadius: '0.3rem',
|
||||
cursor: 'text',
|
||||
minHeight: '1.8rem',
|
||||
display: "flex",
|
||||
flexWrap: "wrap",
|
||||
alignItems: "center",
|
||||
gap: "0.25rem",
|
||||
padding: "0.25rem 0.5rem",
|
||||
backgroundColor: "var(--ctp-base)",
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
borderRadius: "0.25rem",
|
||||
cursor: "text",
|
||||
minHeight: "1.8rem",
|
||||
}}
|
||||
onClick={() => inputRef.current?.focus()}
|
||||
>
|
||||
@@ -119,14 +134,14 @@ export function TagInput({ value, onChange, placeholder, searchFn }: TagInputPro
|
||||
<span
|
||||
key={id}
|
||||
style={{
|
||||
display: 'inline-flex',
|
||||
alignItems: 'center',
|
||||
gap: '0.25rem',
|
||||
padding: '0.1rem 0.5rem',
|
||||
borderRadius: '1rem',
|
||||
backgroundColor: 'rgba(203,166,247,0.15)',
|
||||
color: 'var(--ctp-mauve)',
|
||||
fontSize: '0.75rem',
|
||||
display: "inline-flex",
|
||||
alignItems: "center",
|
||||
gap: "0.25rem",
|
||||
padding: "0.25rem 0.5rem",
|
||||
borderRadius: "1rem",
|
||||
backgroundColor: "rgba(203,166,247,0.15)",
|
||||
color: "var(--ctp-mauve)",
|
||||
fontSize: "0.75rem",
|
||||
}}
|
||||
>
|
||||
{labelMap.current.get(id) ?? id}
|
||||
@@ -137,16 +152,16 @@ export function TagInput({ value, onChange, placeholder, searchFn }: TagInputPro
|
||||
remove(id);
|
||||
}}
|
||||
style={{
|
||||
background: 'none',
|
||||
border: 'none',
|
||||
cursor: 'pointer',
|
||||
color: 'var(--ctp-mauve)',
|
||||
background: "none",
|
||||
border: "none",
|
||||
cursor: "pointer",
|
||||
color: "var(--ctp-mauve)",
|
||||
padding: 0,
|
||||
fontSize: '0.8rem',
|
||||
lineHeight: 1,
|
||||
display: "inline-flex",
|
||||
}}
|
||||
>
|
||||
×
|
||||
<X size={14} />
|
||||
</button>
|
||||
</span>
|
||||
))}
|
||||
@@ -166,30 +181,30 @@ export function TagInput({ value, onChange, placeholder, searchFn }: TagInputPro
|
||||
placeholder={value.length === 0 ? placeholder : undefined}
|
||||
style={{
|
||||
flex: 1,
|
||||
minWidth: '4rem',
|
||||
border: 'none',
|
||||
outline: 'none',
|
||||
background: 'transparent',
|
||||
color: 'var(--ctp-text)',
|
||||
fontSize: '0.85rem',
|
||||
padding: '0.1rem 0',
|
||||
minWidth: "4rem",
|
||||
border: "none",
|
||||
outline: "none",
|
||||
background: "transparent",
|
||||
color: "var(--ctp-text)",
|
||||
fontSize: "var(--font-body)",
|
||||
padding: "0.25rem 0",
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
{open && results.length > 0 && (
|
||||
<div
|
||||
style={{
|
||||
position: 'absolute',
|
||||
top: '100%',
|
||||
position: "absolute",
|
||||
top: "100%",
|
||||
left: 0,
|
||||
right: 0,
|
||||
zIndex: 10,
|
||||
marginTop: '0.2rem',
|
||||
backgroundColor: 'var(--ctp-surface0)',
|
||||
border: '1px solid var(--ctp-surface1)',
|
||||
borderRadius: '0.3rem',
|
||||
maxHeight: '160px',
|
||||
overflowY: 'auto',
|
||||
marginTop: "0.25rem",
|
||||
backgroundColor: "var(--ctp-surface0)",
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
borderRadius: "0.25rem",
|
||||
maxHeight: "160px",
|
||||
overflowY: "auto",
|
||||
}}
|
||||
>
|
||||
{results.map((opt, i) => (
|
||||
@@ -201,15 +216,15 @@ export function TagInput({ value, onChange, placeholder, searchFn }: TagInputPro
|
||||
}}
|
||||
onMouseEnter={() => setHighlighted(i)}
|
||||
style={{
|
||||
padding: '0.25rem 0.5rem',
|
||||
height: '28px',
|
||||
display: 'flex',
|
||||
alignItems: 'center',
|
||||
fontSize: '0.8rem',
|
||||
cursor: 'pointer',
|
||||
color: 'var(--ctp-text)',
|
||||
padding: "0.25rem 0.5rem",
|
||||
height: "28px",
|
||||
display: "flex",
|
||||
alignItems: "center",
|
||||
fontSize: "var(--font-table)",
|
||||
cursor: "pointer",
|
||||
color: "var(--ctp-text)",
|
||||
backgroundColor:
|
||||
i === highlighted ? 'var(--ctp-surface1)' : 'transparent',
|
||||
i === highlighted ? "var(--ctp-surface1)" : "transparent",
|
||||
}}
|
||||
>
|
||||
{opt.label}
|
||||
|
||||
@@ -210,7 +210,7 @@ export function AuditDetailPanel({
|
||||
fontFamily: "'JetBrains Mono', monospace",
|
||||
color: "var(--ctp-peach)",
|
||||
fontWeight: 600,
|
||||
fontSize: "1rem",
|
||||
fontSize: "var(--font-body)",
|
||||
}}
|
||||
>
|
||||
{audit.part_number}
|
||||
@@ -218,7 +218,7 @@ export function AuditDetailPanel({
|
||||
<span
|
||||
style={{
|
||||
display: "inline-block",
|
||||
padding: "0.15rem 0.5rem",
|
||||
padding: "0.25rem 0.5rem",
|
||||
borderRadius: "1rem",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 600,
|
||||
@@ -252,7 +252,7 @@ export function AuditDetailPanel({
|
||||
height: "100%",
|
||||
width: `${Math.min(audit.score * 100, 100)}%`,
|
||||
backgroundColor: color,
|
||||
transition: "width 0.3s, background-color 0.3s",
|
||||
transition: "all 0.15s ease",
|
||||
borderRadius: "0 3px 3px 0",
|
||||
}}
|
||||
/>
|
||||
@@ -263,7 +263,7 @@ export function AuditDetailPanel({
|
||||
style={{
|
||||
padding: "0.5rem 1rem",
|
||||
color: "var(--ctp-red)",
|
||||
fontSize: "0.8rem",
|
||||
fontSize: "var(--font-table)",
|
||||
}}
|
||||
>
|
||||
{error}
|
||||
@@ -274,7 +274,7 @@ export function AuditDetailPanel({
|
||||
<div
|
||||
style={{
|
||||
padding: "0.5rem 1rem",
|
||||
fontSize: "0.85rem",
|
||||
fontSize: "var(--font-body)",
|
||||
color: "var(--ctp-subtext1)",
|
||||
borderBottom: "1px solid var(--ctp-surface0)",
|
||||
flexShrink: 0,
|
||||
@@ -361,8 +361,8 @@ function FieldGroup({
|
||||
<div style={{ marginBottom: "0.75rem" }}>
|
||||
<div
|
||||
style={{
|
||||
padding: "0.3rem 1rem",
|
||||
fontSize: "0.7rem",
|
||||
padding: "0.25rem 1rem",
|
||||
fontSize: "var(--font-sm)",
|
||||
fontWeight: 600,
|
||||
textTransform: "uppercase",
|
||||
letterSpacing: "0.05em",
|
||||
@@ -424,7 +424,7 @@ function FieldRow({
|
||||
style={{
|
||||
display: "flex",
|
||||
alignItems: "center",
|
||||
padding: "0.3rem 1rem",
|
||||
padding: "0.25rem 1rem",
|
||||
borderLeft: `3px solid ${borderColor}`,
|
||||
marginLeft: "0.5rem",
|
||||
gap: "0.5rem",
|
||||
@@ -434,7 +434,7 @@ function FieldRow({
|
||||
style={{
|
||||
width: 140,
|
||||
flexShrink: 0,
|
||||
fontSize: "0.78rem",
|
||||
fontSize: "var(--font-table)",
|
||||
color: "var(--ctp-subtext1)",
|
||||
}}
|
||||
title={`Weight: ${field.weight}`}
|
||||
@@ -445,7 +445,7 @@ function FieldRow({
|
||||
style={{
|
||||
marginLeft: 4,
|
||||
color: "var(--ctp-red)",
|
||||
fontSize: "0.65rem",
|
||||
fontSize: "var(--font-xs)",
|
||||
}}
|
||||
>
|
||||
*
|
||||
@@ -456,7 +456,7 @@ function FieldRow({
|
||||
<div
|
||||
style={{
|
||||
flex: 1,
|
||||
fontSize: "0.8rem",
|
||||
fontSize: "var(--font-table)",
|
||||
color: field.filled ? "var(--ctp-text)" : "var(--ctp-subtext0)",
|
||||
fontStyle: field.filled ? "normal" : "italic",
|
||||
}}
|
||||
@@ -477,10 +477,10 @@ function FieldRow({
|
||||
placeholder="---"
|
||||
style={{
|
||||
flex: 1,
|
||||
padding: "0.2rem 0.4rem",
|
||||
fontSize: "0.8rem",
|
||||
padding: "0.25rem 0.5rem",
|
||||
fontSize: "var(--font-table)",
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
borderRadius: "0.3rem",
|
||||
borderRadius: "0.25rem",
|
||||
backgroundColor: "var(--ctp-surface0)",
|
||||
color: "var(--ctp-text)",
|
||||
outline: "none",
|
||||
@@ -492,10 +492,10 @@ function FieldRow({
|
||||
}
|
||||
|
||||
const closeBtnStyle: React.CSSProperties = {
|
||||
padding: "0.2rem 0.5rem",
|
||||
fontSize: "0.8rem",
|
||||
padding: "0.25rem 0.5rem",
|
||||
fontSize: "var(--font-table)",
|
||||
border: "none",
|
||||
borderRadius: "0.3rem",
|
||||
borderRadius: "0.25rem",
|
||||
backgroundColor: "var(--ctp-surface1)",
|
||||
color: "var(--ctp-subtext1)",
|
||||
cursor: "pointer",
|
||||
|
||||
@@ -51,10 +51,10 @@ export function AuditSummaryBar({
|
||||
display: "flex",
|
||||
alignItems: "center",
|
||||
justifyContent: "center",
|
||||
fontSize: "0.7rem",
|
||||
fontSize: "var(--font-sm)",
|
||||
fontWeight: 600,
|
||||
color: "var(--ctp-crust)",
|
||||
transition: "opacity 0.2s",
|
||||
transition: "all 0.15s ease",
|
||||
outline: isActive ? "2px solid var(--ctp-text)" : "none",
|
||||
outlineOffset: -2,
|
||||
}}
|
||||
@@ -70,17 +70,13 @@ export function AuditSummaryBar({
|
||||
style={{
|
||||
display: "flex",
|
||||
gap: "1.5rem",
|
||||
marginTop: "0.4rem",
|
||||
fontSize: "0.8rem",
|
||||
marginTop: "0.5rem",
|
||||
fontSize: "var(--font-table)",
|
||||
color: "var(--ctp-subtext0)",
|
||||
}}
|
||||
>
|
||||
<span>
|
||||
{summary.total_items} items
|
||||
</span>
|
||||
<span>
|
||||
Avg score: {(summary.avg_score * 100).toFixed(1)}%
|
||||
</span>
|
||||
<span>{summary.total_items} items</span>
|
||||
<span>Avg score: {(summary.avg_score * 100).toFixed(1)}%</span>
|
||||
{summary.manufactured_without_bom > 0 && (
|
||||
<span style={{ color: "var(--ctp-red)" }}>
|
||||
{summary.manufactured_without_bom} manufactured without BOM
|
||||
|
||||
@@ -55,7 +55,7 @@ export function AuditTable({
|
||||
style={{
|
||||
width: "100%",
|
||||
borderCollapse: "collapse",
|
||||
fontSize: "0.8rem",
|
||||
fontSize: "var(--font-table)",
|
||||
}}
|
||||
>
|
||||
<thead>
|
||||
@@ -85,9 +85,9 @@ export function AuditTable({
|
||||
style={{
|
||||
cursor: "pointer",
|
||||
backgroundColor: isSelected
|
||||
? "var(--ctp-surface1)"
|
||||
? "rgba(203, 166, 247, 0.08)"
|
||||
: "transparent",
|
||||
transition: "background-color 0.15s",
|
||||
transition: "all 0.15s ease",
|
||||
}}
|
||||
onMouseEnter={(e) => {
|
||||
if (!isSelected)
|
||||
@@ -103,7 +103,7 @@ export function AuditTable({
|
||||
<span
|
||||
style={{
|
||||
display: "inline-block",
|
||||
padding: "0.15rem 0.5rem",
|
||||
padding: "0.25rem 0.5rem",
|
||||
borderRadius: "1rem",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 600,
|
||||
@@ -154,7 +154,7 @@ const thStyle: React.CSSProperties = {
|
||||
padding: "var(--d-th-py) var(--d-th-px)",
|
||||
fontSize: "var(--d-th-font)",
|
||||
borderBottom: "1px solid var(--ctp-surface1)",
|
||||
color: "var(--ctp-subtext0)",
|
||||
color: "var(--ctp-overlay1)",
|
||||
fontWeight: 500,
|
||||
position: "sticky",
|
||||
top: 0,
|
||||
|
||||
@@ -97,7 +97,7 @@ export function AuditToolbar({
|
||||
const selectStyle: React.CSSProperties = {
|
||||
padding: "var(--d-input-py) var(--d-input-px)",
|
||||
fontSize: "var(--d-input-font)",
|
||||
borderRadius: "0.4rem",
|
||||
borderRadius: "0.5rem",
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
backgroundColor: "var(--ctp-surface0)",
|
||||
color: "var(--ctp-text)",
|
||||
@@ -106,7 +106,7 @@ const selectStyle: React.CSSProperties = {
|
||||
const btnStyle: React.CSSProperties = {
|
||||
padding: "var(--d-input-py) var(--d-input-px)",
|
||||
fontSize: "var(--d-input-font)",
|
||||
borderRadius: "0.4rem",
|
||||
borderRadius: "0.5rem",
|
||||
border: "none",
|
||||
backgroundColor: "var(--ctp-surface1)",
|
||||
color: "var(--ctp-subtext1)",
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { useState, useEffect, useCallback } from "react";
|
||||
import { Plus, Download } from "lucide-react";
|
||||
import { get, post, put, del } from "../../api/client";
|
||||
import type { BOMEntry } from "../../api/types";
|
||||
|
||||
@@ -117,11 +118,11 @@ export function BOMTab({ partNumber, isEditor }: BOMTabProps) {
|
||||
};
|
||||
|
||||
const inputStyle: React.CSSProperties = {
|
||||
padding: "0.2rem 0.4rem",
|
||||
fontSize: "0.8rem",
|
||||
padding: "0.25rem 0.5rem",
|
||||
fontSize: "var(--font-table)",
|
||||
backgroundColor: "var(--ctp-base)",
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
borderRadius: "0.3rem",
|
||||
borderRadius: "0.25rem",
|
||||
color: "var(--ctp-text)",
|
||||
width: "100%",
|
||||
};
|
||||
@@ -225,7 +226,9 @@ export function BOMTab({ partNumber, isEditor }: BOMTabProps) {
|
||||
marginBottom: "0.5rem",
|
||||
}}
|
||||
>
|
||||
<span style={{ fontSize: "0.85rem", color: "var(--ctp-subtext1)" }}>
|
||||
<span
|
||||
style={{ fontSize: "var(--font-body)", color: "var(--ctp-subtext1)" }}
|
||||
>
|
||||
{entries.length} entries
|
||||
</span>
|
||||
<span style={{ flex: 1 }} />
|
||||
@@ -233,9 +236,14 @@ export function BOMTab({ partNumber, isEditor }: BOMTabProps) {
|
||||
onClick={() => {
|
||||
window.location.href = `/api/items/${encodeURIComponent(partNumber)}/bom/export.csv`;
|
||||
}}
|
||||
style={toolBtnStyle}
|
||||
style={{
|
||||
...toolBtnStyle,
|
||||
display: "inline-flex",
|
||||
alignItems: "center",
|
||||
gap: "0.25rem",
|
||||
}}
|
||||
>
|
||||
Export CSV
|
||||
<Download size={14} /> Export CSV
|
||||
</button>
|
||||
{isEditor && (
|
||||
<button
|
||||
@@ -244,9 +252,14 @@ export function BOMTab({ partNumber, isEditor }: BOMTabProps) {
|
||||
setEditIdx(null);
|
||||
setForm(emptyForm);
|
||||
}}
|
||||
style={toolBtnStyle}
|
||||
style={{
|
||||
...toolBtnStyle,
|
||||
display: "inline-flex",
|
||||
alignItems: "center",
|
||||
gap: "0.25rem",
|
||||
}}
|
||||
>
|
||||
+ Add
|
||||
<Plus size={14} /> Add
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
@@ -254,9 +267,9 @@ export function BOMTab({ partNumber, isEditor }: BOMTabProps) {
|
||||
{isEditor && assemblyCount > 0 && (
|
||||
<div
|
||||
style={{
|
||||
padding: "0.35rem 0.6rem",
|
||||
padding: "0.25rem 0.5rem",
|
||||
marginBottom: "0.5rem",
|
||||
borderRadius: "0.3rem",
|
||||
borderRadius: "0.25rem",
|
||||
backgroundColor: "rgba(148,226,213,0.1)",
|
||||
border: "1px solid rgba(148,226,213,0.3)",
|
||||
fontSize: "0.75rem",
|
||||
@@ -274,7 +287,7 @@ export function BOMTab({ partNumber, isEditor }: BOMTabProps) {
|
||||
style={{
|
||||
width: "100%",
|
||||
borderCollapse: "collapse",
|
||||
fontSize: "0.8rem",
|
||||
fontSize: "var(--font-table)",
|
||||
}}
|
||||
>
|
||||
<thead>
|
||||
@@ -403,12 +416,12 @@ export function BOMTab({ partNumber, isEditor }: BOMTabProps) {
|
||||
}
|
||||
|
||||
const thStyle: React.CSSProperties = {
|
||||
padding: "0.3rem 0.5rem",
|
||||
padding: "0.25rem 0.5rem",
|
||||
textAlign: "left",
|
||||
borderBottom: "1px solid var(--ctp-surface1)",
|
||||
color: "var(--ctp-subtext1)",
|
||||
color: "var(--ctp-overlay1)",
|
||||
fontWeight: 600,
|
||||
fontSize: "0.7rem",
|
||||
fontSize: "var(--font-sm)",
|
||||
textTransform: "uppercase",
|
||||
letterSpacing: "0.05em",
|
||||
whiteSpace: "nowrap",
|
||||
@@ -422,9 +435,10 @@ const tdStyle: React.CSSProperties = {
|
||||
|
||||
const toolBtnStyle: React.CSSProperties = {
|
||||
padding: "0.25rem 0.5rem",
|
||||
fontSize: "0.8rem",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
border: "none",
|
||||
borderRadius: "0.3rem",
|
||||
borderRadius: "0.25rem",
|
||||
backgroundColor: "var(--ctp-surface1)",
|
||||
color: "var(--ctp-text)",
|
||||
cursor: "pointer",
|
||||
@@ -436,12 +450,15 @@ const actionBtnStyle: React.CSSProperties = {
|
||||
color: "var(--ctp-subtext1)",
|
||||
cursor: "pointer",
|
||||
fontSize: "0.75rem",
|
||||
padding: "0.1rem 0.3rem",
|
||||
fontWeight: 500,
|
||||
padding: "0.25rem 0.25rem",
|
||||
borderRadius: "0.25rem",
|
||||
};
|
||||
|
||||
const saveBtnStyle: React.CSSProperties = {
|
||||
padding: "0.2rem 0.4rem",
|
||||
padding: "0.25rem 0.5rem",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
border: "none",
|
||||
borderRadius: "0.25rem",
|
||||
backgroundColor: "var(--ctp-green)",
|
||||
@@ -451,9 +468,9 @@ const saveBtnStyle: React.CSSProperties = {
|
||||
};
|
||||
|
||||
const sourceBadgeBase: React.CSSProperties = {
|
||||
padding: "0.1rem 0.4rem",
|
||||
padding: "0.25rem 0.5rem",
|
||||
borderRadius: "1rem",
|
||||
fontSize: "0.7rem",
|
||||
fontSize: "var(--font-sm)",
|
||||
fontWeight: 500,
|
||||
};
|
||||
|
||||
@@ -470,8 +487,9 @@ const manualBadge: React.CSSProperties = {
|
||||
};
|
||||
|
||||
const cancelBtnStyle: React.CSSProperties = {
|
||||
padding: "0.2rem 0.4rem",
|
||||
padding: "0.25rem 0.5rem",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
border: "none",
|
||||
borderRadius: "0.25rem",
|
||||
backgroundColor: "var(--ctp-surface1)",
|
||||
|
||||
@@ -62,7 +62,7 @@ export function CategoryPicker({
|
||||
<div
|
||||
style={{
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
borderRadius: "0.4rem",
|
||||
borderRadius: "0.5rem",
|
||||
backgroundColor: "var(--ctp-base)",
|
||||
overflow: "hidden",
|
||||
}}
|
||||
@@ -74,7 +74,7 @@ export function CategoryPicker({
|
||||
display: "flex",
|
||||
flexWrap: "wrap",
|
||||
gap: "0.25rem",
|
||||
padding: "0.4rem 0.5rem",
|
||||
padding: "0.5rem 0.5rem",
|
||||
borderBottom: "1px solid var(--ctp-surface1)",
|
||||
backgroundColor: "var(--ctp-mantle)",
|
||||
}}
|
||||
@@ -95,9 +95,9 @@ export function CategoryPicker({
|
||||
}
|
||||
}}
|
||||
style={{
|
||||
padding: "0.2rem 0.5rem",
|
||||
fontSize: "0.7rem",
|
||||
fontWeight: isActive ? 600 : 400,
|
||||
padding: "0.25rem 0.5rem",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
border: "none",
|
||||
borderRadius: "0.25rem",
|
||||
cursor: "pointer",
|
||||
@@ -107,7 +107,7 @@ export function CategoryPicker({
|
||||
color: isActive
|
||||
? "var(--ctp-mauve)"
|
||||
: "var(--ctp-subtext0)",
|
||||
transition: "background-color 0.1s",
|
||||
transition: "all 0.15s ease",
|
||||
}}
|
||||
>
|
||||
<span style={{ fontFamily: "'JetBrains Mono', monospace" }}>
|
||||
@@ -133,8 +133,8 @@ export function CategoryPicker({
|
||||
disabled={isMultiStage && !selectedDomain}
|
||||
style={{
|
||||
width: "100%",
|
||||
padding: "0.4rem 0.5rem",
|
||||
fontSize: "0.8rem",
|
||||
padding: "0.5rem 0.5rem",
|
||||
fontSize: "var(--font-table)",
|
||||
border: "none",
|
||||
borderBottom: "1px solid var(--ctp-surface1)",
|
||||
backgroundColor: "var(--ctp-mantle)",
|
||||
@@ -152,7 +152,7 @@ export function CategoryPicker({
|
||||
padding: "0.75rem",
|
||||
textAlign: "center",
|
||||
color: "var(--ctp-subtext0)",
|
||||
fontSize: "0.8rem",
|
||||
fontSize: "var(--font-table)",
|
||||
}}
|
||||
>
|
||||
Select a domain to see categories
|
||||
@@ -163,7 +163,7 @@ export function CategoryPicker({
|
||||
padding: "0.75rem",
|
||||
textAlign: "center",
|
||||
color: "var(--ctp-subtext0)",
|
||||
fontSize: "0.8rem",
|
||||
fontSize: "var(--font-table)",
|
||||
}}
|
||||
>
|
||||
No categories found
|
||||
@@ -180,15 +180,15 @@ export function CategoryPicker({
|
||||
display: "flex",
|
||||
alignItems: "center",
|
||||
gap: "0.5rem",
|
||||
padding: "0.3rem 0.5rem",
|
||||
padding: "0.25rem 0.5rem",
|
||||
cursor: "pointer",
|
||||
fontSize: "0.8rem",
|
||||
fontSize: "var(--font-table)",
|
||||
backgroundColor: isSelected
|
||||
? "rgba(203,166,247,0.12)"
|
||||
: "transparent",
|
||||
color: isSelected ? "var(--ctp-mauve)" : "var(--ctp-text)",
|
||||
fontWeight: isSelected ? 600 : 400,
|
||||
transition: "background-color 0.1s",
|
||||
transition: "all 0.15s ease",
|
||||
}}
|
||||
onMouseEnter={(e) => {
|
||||
if (!isSelected)
|
||||
@@ -228,7 +228,7 @@ export function CategoryPicker({
|
||||
{value && categories[value] && (
|
||||
<div
|
||||
style={{
|
||||
padding: "0.3rem 0.5rem",
|
||||
padding: "0.25rem 0.5rem",
|
||||
fontSize: "0.75rem",
|
||||
color: "var(--ctp-subtext0)",
|
||||
borderTop: "1px solid var(--ctp-surface0)",
|
||||
|
||||
@@ -263,7 +263,7 @@ export function CreateItemPane({ onCreated, onCancel }: CreateItemPaneProps) {
|
||||
style={{
|
||||
color: "var(--ctp-green)",
|
||||
fontWeight: 600,
|
||||
fontSize: "0.9rem",
|
||||
fontSize: "var(--font-body)",
|
||||
}}
|
||||
>
|
||||
New Item
|
||||
@@ -382,7 +382,7 @@ export function CreateItemPane({ onCreated, onCancel }: CreateItemPaneProps) {
|
||||
onClick={handleThumbnailSelect}
|
||||
style={{
|
||||
aspectRatio: "4/3",
|
||||
borderRadius: "0.4rem",
|
||||
borderRadius: "0.5rem",
|
||||
border: "1px dashed var(--ctp-surface1)",
|
||||
display: "flex",
|
||||
alignItems: "center",
|
||||
@@ -400,13 +400,19 @@ export function CreateItemPane({ onCreated, onCancel }: CreateItemPaneProps) {
|
||||
/>
|
||||
) : thumbnailFile?.uploadStatus === "uploading" ? (
|
||||
<span
|
||||
style={{ fontSize: "0.8rem", color: "var(--ctp-subtext0)" }}
|
||||
style={{
|
||||
fontSize: "var(--font-table)",
|
||||
color: "var(--ctp-subtext0)",
|
||||
}}
|
||||
>
|
||||
Uploading... {thumbnailFile.uploadProgress}%
|
||||
</span>
|
||||
) : (
|
||||
<span
|
||||
style={{ fontSize: "0.8rem", color: "var(--ctp-subtext0)" }}
|
||||
style={{
|
||||
fontSize: "var(--font-table)",
|
||||
color: "var(--ctp-subtext0)",
|
||||
}}
|
||||
>
|
||||
Click to upload
|
||||
</span>
|
||||
@@ -453,6 +459,7 @@ function renderField(
|
||||
<div key={field.name} style={{ gridColumn: "1 / -1" }}>
|
||||
<FormGroup label={field.label}>
|
||||
<textarea
|
||||
className="silo-input"
|
||||
value={value}
|
||||
onChange={(e) => onChange(e.target.value)}
|
||||
style={{ ...inputStyle, minHeight: 60, resize: "vertical" }}
|
||||
@@ -467,6 +474,7 @@ function renderField(
|
||||
return (
|
||||
<FormGroup key={field.name} label={field.label}>
|
||||
<select
|
||||
className="silo-input"
|
||||
value={value || (field.default != null ? String(field.default) : "")}
|
||||
onChange={(e) => onChange(e.target.value)}
|
||||
style={inputStyle}
|
||||
@@ -486,6 +494,7 @@ function renderField(
|
||||
return (
|
||||
<FormGroup key={field.name} label={field.label}>
|
||||
<select
|
||||
className="silo-input"
|
||||
value={value}
|
||||
onChange={(e) => onChange(e.target.value)}
|
||||
style={inputStyle}
|
||||
@@ -505,6 +514,7 @@ function renderField(
|
||||
label={`${field.label}${field.currency ? ` (${field.currency})` : ""}`}
|
||||
>
|
||||
<input
|
||||
className="silo-input"
|
||||
type="number"
|
||||
step="0.01"
|
||||
value={value}
|
||||
@@ -521,6 +531,7 @@ function renderField(
|
||||
<div key={field.name} style={{ gridColumn: "1 / -1" }}>
|
||||
<FormGroup label={field.label}>
|
||||
<input
|
||||
className="silo-input"
|
||||
type="url"
|
||||
value={value}
|
||||
onChange={(e) => onChange(e.target.value)}
|
||||
@@ -541,6 +552,7 @@ function renderField(
|
||||
return (
|
||||
<FormGroup key={field.name} label={field.label}>
|
||||
<input
|
||||
className="silo-input"
|
||||
type={inputType}
|
||||
value={value}
|
||||
onChange={(e) => onChange(e.target.value)}
|
||||
@@ -565,7 +577,7 @@ function SectionHeader({ children }: { children: React.ReactNode }) {
|
||||
>
|
||||
<span
|
||||
style={{
|
||||
fontSize: "0.7rem",
|
||||
fontSize: "var(--font-sm)",
|
||||
fontWeight: 600,
|
||||
textTransform: "uppercase",
|
||||
letterSpacing: "0.05em",
|
||||
@@ -602,12 +614,12 @@ function SidebarSection({
|
||||
>
|
||||
<div
|
||||
style={{
|
||||
fontSize: "0.7rem",
|
||||
fontSize: "var(--font-sm)",
|
||||
fontWeight: 600,
|
||||
textTransform: "uppercase",
|
||||
letterSpacing: "0.05em",
|
||||
color: "var(--ctp-subtext0)",
|
||||
marginBottom: "0.4rem",
|
||||
marginBottom: "0.5rem",
|
||||
}}
|
||||
>
|
||||
{title}
|
||||
@@ -623,8 +635,8 @@ function MetaRow({ label, value }: { label: string; value: string }) {
|
||||
style={{
|
||||
display: "flex",
|
||||
justifyContent: "space-between",
|
||||
fontSize: "0.8rem",
|
||||
padding: "0.15rem 0",
|
||||
fontSize: "var(--font-table)",
|
||||
padding: "0.25rem 0",
|
||||
}}
|
||||
>
|
||||
<span style={{ color: "var(--ctp-subtext0)" }}>{label}</span>
|
||||
@@ -641,13 +653,13 @@ function FormGroup({
|
||||
children: React.ReactNode;
|
||||
}) {
|
||||
return (
|
||||
<div style={{ marginBottom: "0.6rem" }}>
|
||||
<div style={{ marginBottom: "0.5rem" }}>
|
||||
<label
|
||||
style={{
|
||||
display: "block",
|
||||
fontSize: "0.75rem",
|
||||
color: "var(--ctp-subtext0)",
|
||||
marginBottom: "0.2rem",
|
||||
marginBottom: "0.25rem",
|
||||
}}
|
||||
>
|
||||
{label}
|
||||
@@ -670,10 +682,11 @@ const headerStyle: React.CSSProperties = {
|
||||
};
|
||||
|
||||
const actionBtnStyle: React.CSSProperties = {
|
||||
padding: "0.3rem 0.75rem",
|
||||
fontSize: "0.8rem",
|
||||
padding: "0.25rem 0.75rem",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
border: "none",
|
||||
borderRadius: "0.3rem",
|
||||
borderRadius: "0.25rem",
|
||||
color: "var(--ctp-crust)",
|
||||
cursor: "pointer",
|
||||
};
|
||||
@@ -683,17 +696,19 @@ const cancelBtnStyle: React.CSSProperties = {
|
||||
border: "none",
|
||||
cursor: "pointer",
|
||||
color: "var(--ctp-subtext1)",
|
||||
fontSize: "0.8rem",
|
||||
padding: "0.2rem 0.4rem",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
padding: "0.25rem 0.5rem",
|
||||
borderRadius: "0.25rem",
|
||||
};
|
||||
|
||||
const inputStyle: React.CSSProperties = {
|
||||
width: "100%",
|
||||
padding: "0.35rem 0.5rem",
|
||||
fontSize: "0.85rem",
|
||||
padding: "0.25rem 0.5rem",
|
||||
fontSize: "var(--font-body)",
|
||||
backgroundColor: "var(--ctp-base)",
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
borderRadius: "0.3rem",
|
||||
borderRadius: "0.25rem",
|
||||
color: "var(--ctp-text)",
|
||||
boxSizing: "border-box",
|
||||
};
|
||||
@@ -708,7 +723,7 @@ const errorStyle: React.CSSProperties = {
|
||||
color: "var(--ctp-red)",
|
||||
backgroundColor: "rgba(243,139,168,0.1)",
|
||||
padding: "0.5rem",
|
||||
borderRadius: "0.3rem",
|
||||
borderRadius: "0.25rem",
|
||||
marginBottom: "0.5rem",
|
||||
fontSize: "0.85rem",
|
||||
fontSize: "var(--font-body)",
|
||||
};
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { useState } from 'react';
|
||||
import { del } from '../../api/client';
|
||||
import { useState } from "react";
|
||||
import { del } from "../../api/client";
|
||||
|
||||
interface DeleteItemPaneProps {
|
||||
partNumber: string;
|
||||
@@ -7,7 +7,11 @@ interface DeleteItemPaneProps {
|
||||
onCancel: () => void;
|
||||
}
|
||||
|
||||
export function DeleteItemPane({ partNumber, onDeleted, onCancel }: DeleteItemPaneProps) {
|
||||
export function DeleteItemPane({
|
||||
partNumber,
|
||||
onDeleted,
|
||||
onCancel,
|
||||
}: DeleteItemPaneProps) {
|
||||
const [deleting, setDeleting] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
|
||||
@@ -18,59 +22,133 @@ export function DeleteItemPane({ partNumber, onDeleted, onCancel }: DeleteItemPa
|
||||
await del(`/api/items/${encodeURIComponent(partNumber)}`);
|
||||
onDeleted();
|
||||
} catch (e) {
|
||||
setError(e instanceof Error ? e.message : 'Failed to delete item');
|
||||
setError(e instanceof Error ? e.message : "Failed to delete item");
|
||||
} finally {
|
||||
setDeleting(false);
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<div style={{ display: 'flex', flexDirection: 'column', height: '100%' }}>
|
||||
<div style={{
|
||||
display: 'flex', alignItems: 'center', gap: '0.75rem',
|
||||
padding: '0.5rem 0.75rem',
|
||||
borderBottom: '1px solid var(--ctp-surface1)',
|
||||
backgroundColor: 'var(--ctp-mantle)',
|
||||
flexShrink: 0,
|
||||
}}>
|
||||
<span style={{ color: 'var(--ctp-red)', fontWeight: 600, fontSize: '0.9rem' }}>Delete Item</span>
|
||||
<div style={{ display: "flex", flexDirection: "column", height: "100%" }}>
|
||||
<div
|
||||
style={{
|
||||
display: "flex",
|
||||
alignItems: "center",
|
||||
gap: "0.75rem",
|
||||
padding: "0.5rem 0.75rem",
|
||||
borderBottom: "1px solid var(--ctp-surface1)",
|
||||
backgroundColor: "var(--ctp-mantle)",
|
||||
flexShrink: 0,
|
||||
}}
|
||||
>
|
||||
<span
|
||||
style={{
|
||||
color: "var(--ctp-red)",
|
||||
fontWeight: 600,
|
||||
fontSize: "var(--font-body)",
|
||||
}}
|
||||
>
|
||||
Delete Item
|
||||
</span>
|
||||
<span style={{ flex: 1 }} />
|
||||
<button onClick={onCancel} style={headerBtnStyle}>Cancel</button>
|
||||
<button onClick={onCancel} style={headerBtnStyle}>
|
||||
Cancel
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<div style={{ flex: 1, display: 'flex', flexDirection: 'column', alignItems: 'center', justifyContent: 'center', padding: '2rem', gap: '1rem' }}>
|
||||
<div
|
||||
style={{
|
||||
flex: 1,
|
||||
display: "flex",
|
||||
flexDirection: "column",
|
||||
alignItems: "center",
|
||||
justifyContent: "center",
|
||||
padding: "2rem",
|
||||
gap: "1rem",
|
||||
}}
|
||||
>
|
||||
{error && (
|
||||
<div style={{ color: 'var(--ctp-red)', backgroundColor: 'rgba(243,139,168,0.1)', padding: '0.5rem 1rem', borderRadius: '0.3rem', fontSize: '0.85rem', width: '100%', textAlign: 'center' }}>
|
||||
<div
|
||||
style={{
|
||||
color: "var(--ctp-red)",
|
||||
backgroundColor: "rgba(243,139,168,0.1)",
|
||||
padding: "0.5rem 1rem",
|
||||
borderRadius: "0.25rem",
|
||||
fontSize: "var(--font-body)",
|
||||
width: "100%",
|
||||
textAlign: "center",
|
||||
}}
|
||||
>
|
||||
{error}
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div style={{ textAlign: 'center' }}>
|
||||
<p style={{ fontSize: '0.9rem', color: 'var(--ctp-text)', marginBottom: '0.5rem' }}>
|
||||
<div style={{ textAlign: "center" }}>
|
||||
<p
|
||||
style={{
|
||||
fontSize: "var(--font-body)",
|
||||
color: "var(--ctp-text)",
|
||||
marginBottom: "0.5rem",
|
||||
}}
|
||||
>
|
||||
Permanently delete item
|
||||
</p>
|
||||
<p style={{ fontFamily: "'JetBrains Mono', monospace", color: 'var(--ctp-peach)', fontSize: '1.1rem', fontWeight: 600 }}>
|
||||
<p
|
||||
style={{
|
||||
fontFamily: "'JetBrains Mono', monospace",
|
||||
color: "var(--ctp-peach)",
|
||||
fontSize: "var(--font-title)",
|
||||
fontWeight: 600,
|
||||
}}
|
||||
>
|
||||
{partNumber}
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<p style={{ color: 'var(--ctp-subtext0)', fontSize: '0.85rem', textAlign: 'center', maxWidth: 300 }}>
|
||||
This will permanently remove this item, all its revisions, BOM entries, and file attachments. This action cannot be undone.
|
||||
<p
|
||||
style={{
|
||||
color: "var(--ctp-subtext0)",
|
||||
fontSize: "var(--font-body)",
|
||||
textAlign: "center",
|
||||
maxWidth: 300,
|
||||
}}
|
||||
>
|
||||
This will permanently remove this item, all its revisions, BOM
|
||||
entries, and file attachments. This action cannot be undone.
|
||||
</p>
|
||||
|
||||
<div style={{ display: 'flex', gap: '0.75rem', marginTop: '0.5rem' }}>
|
||||
<button onClick={onCancel} style={{
|
||||
padding: '0.5rem 1.25rem', fontSize: '0.85rem', border: 'none', borderRadius: '0.4rem',
|
||||
backgroundColor: 'var(--ctp-surface1)', color: 'var(--ctp-text)', cursor: 'pointer',
|
||||
}}>
|
||||
<div style={{ display: "flex", gap: "0.75rem", marginTop: "0.5rem" }}>
|
||||
<button
|
||||
onClick={onCancel}
|
||||
style={{
|
||||
padding: "0.5rem 1.25rem",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
border: "none",
|
||||
borderRadius: "0.25rem",
|
||||
backgroundColor: "var(--ctp-surface1)",
|
||||
color: "var(--ctp-text)",
|
||||
cursor: "pointer",
|
||||
}}
|
||||
>
|
||||
Cancel
|
||||
</button>
|
||||
<button onClick={() => void handleDelete()} disabled={deleting} style={{
|
||||
padding: '0.5rem 1.25rem', fontSize: '0.85rem', border: 'none', borderRadius: '0.4rem',
|
||||
backgroundColor: 'var(--ctp-red)', color: 'var(--ctp-crust)', cursor: 'pointer',
|
||||
opacity: deleting ? 0.6 : 1,
|
||||
}}>
|
||||
{deleting ? 'Deleting...' : 'Delete Permanently'}
|
||||
<button
|
||||
onClick={() => void handleDelete()}
|
||||
disabled={deleting}
|
||||
style={{
|
||||
padding: "0.5rem 1.25rem",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
border: "none",
|
||||
borderRadius: "0.25rem",
|
||||
backgroundColor: "var(--ctp-red)",
|
||||
color: "var(--ctp-crust)",
|
||||
cursor: "pointer",
|
||||
opacity: deleting ? 0.6 : 1,
|
||||
}}
|
||||
>
|
||||
{deleting ? "Deleting..." : "Delete Permanently"}
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
@@ -79,6 +157,12 @@ export function DeleteItemPane({ partNumber, onDeleted, onCancel }: DeleteItemPa
|
||||
}
|
||||
|
||||
const headerBtnStyle: React.CSSProperties = {
|
||||
background: 'none', border: 'none', cursor: 'pointer',
|
||||
color: 'var(--ctp-subtext1)', fontSize: '0.8rem', padding: '0.2rem 0.4rem',
|
||||
background: "none",
|
||||
border: "none",
|
||||
cursor: "pointer",
|
||||
color: "var(--ctp-subtext1)",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
padding: "0.25rem 0.5rem",
|
||||
borderRadius: "0.25rem",
|
||||
};
|
||||
|
||||
@@ -79,7 +79,7 @@ export function EditItemPane({
|
||||
style={{
|
||||
color: "var(--ctp-blue)",
|
||||
fontWeight: 600,
|
||||
fontSize: "0.9rem",
|
||||
fontSize: "var(--font-body)",
|
||||
}}
|
||||
>
|
||||
Edit {partNumber}
|
||||
@@ -89,10 +89,11 @@ export function EditItemPane({
|
||||
onClick={() => void handleSave()}
|
||||
disabled={saving}
|
||||
style={{
|
||||
padding: "0.3rem 0.75rem",
|
||||
fontSize: "0.8rem",
|
||||
padding: "0.25rem 0.75rem",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
border: "none",
|
||||
borderRadius: "0.3rem",
|
||||
borderRadius: "0.25rem",
|
||||
backgroundColor: "var(--ctp-blue)",
|
||||
color: "var(--ctp-crust)",
|
||||
cursor: "pointer",
|
||||
@@ -113,9 +114,9 @@ export function EditItemPane({
|
||||
color: "var(--ctp-red)",
|
||||
backgroundColor: "rgba(243,139,168,0.1)",
|
||||
padding: "0.5rem",
|
||||
borderRadius: "0.3rem",
|
||||
borderRadius: "0.25rem",
|
||||
marginBottom: "0.5rem",
|
||||
fontSize: "0.85rem",
|
||||
fontSize: "var(--font-body)",
|
||||
}}
|
||||
>
|
||||
{error}
|
||||
@@ -124,6 +125,7 @@ export function EditItemPane({
|
||||
|
||||
<FormGroup label="Part Number">
|
||||
<input
|
||||
className="silo-input"
|
||||
value={pn}
|
||||
onChange={(e) => setPN(e.target.value)}
|
||||
style={inputStyle}
|
||||
@@ -132,6 +134,7 @@ export function EditItemPane({
|
||||
|
||||
<FormGroup label="Type">
|
||||
<select
|
||||
className="silo-input"
|
||||
value={itemType}
|
||||
onChange={(e) => setItemType(e.target.value)}
|
||||
style={inputStyle}
|
||||
@@ -145,6 +148,7 @@ export function EditItemPane({
|
||||
|
||||
<FormGroup label="Description">
|
||||
<input
|
||||
className="silo-input"
|
||||
value={description}
|
||||
onChange={(e) => setDescription(e.target.value)}
|
||||
style={inputStyle}
|
||||
@@ -153,6 +157,7 @@ export function EditItemPane({
|
||||
|
||||
<FormGroup label="Sourcing Type">
|
||||
<select
|
||||
className="silo-input"
|
||||
value={sourcingType}
|
||||
onChange={(e) => setSourcingType(e.target.value)}
|
||||
style={inputStyle}
|
||||
@@ -166,6 +171,7 @@ export function EditItemPane({
|
||||
|
||||
<FormGroup label="Long Description">
|
||||
<textarea
|
||||
className="silo-input"
|
||||
value={longDescription}
|
||||
onChange={(e) => setLongDescription(e.target.value)}
|
||||
style={{ ...inputStyle, minHeight: 80, resize: "vertical" }}
|
||||
@@ -184,13 +190,13 @@ function FormGroup({
|
||||
children: React.ReactNode;
|
||||
}) {
|
||||
return (
|
||||
<div style={{ marginBottom: "0.6rem" }}>
|
||||
<div style={{ marginBottom: "0.5rem" }}>
|
||||
<label
|
||||
style={{
|
||||
display: "block",
|
||||
fontSize: "0.75rem",
|
||||
color: "var(--ctp-subtext0)",
|
||||
marginBottom: "0.2rem",
|
||||
marginBottom: "0.25rem",
|
||||
}}
|
||||
>
|
||||
{label}
|
||||
@@ -202,11 +208,11 @@ function FormGroup({
|
||||
|
||||
const inputStyle: React.CSSProperties = {
|
||||
width: "100%",
|
||||
padding: "0.35rem 0.5rem",
|
||||
fontSize: "0.85rem",
|
||||
padding: "0.25rem 0.5rem",
|
||||
fontSize: "var(--font-body)",
|
||||
backgroundColor: "var(--ctp-base)",
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
borderRadius: "0.3rem",
|
||||
borderRadius: "0.25rem",
|
||||
color: "var(--ctp-text)",
|
||||
};
|
||||
|
||||
@@ -215,6 +221,8 @@ const headerBtnStyle: React.CSSProperties = {
|
||||
border: "none",
|
||||
cursor: "pointer",
|
||||
color: "var(--ctp-subtext1)",
|
||||
fontSize: "0.8rem",
|
||||
padding: "0.2rem 0.4rem",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
padding: "0.25rem 0.5rem",
|
||||
borderRadius: "0.25rem",
|
||||
};
|
||||
|
||||
@@ -72,13 +72,13 @@ export function FileDropZone({
|
||||
padding: "1.25rem",
|
||||
textAlign: "center",
|
||||
cursor: "pointer",
|
||||
backgroundColor: dragOver
|
||||
? "rgba(203,166,247,0.05)"
|
||||
: "transparent",
|
||||
transition: "border-color 0.15s, background-color 0.15s",
|
||||
backgroundColor: dragOver ? "rgba(203,166,247,0.05)" : "transparent",
|
||||
transition: "all 0.15s ease",
|
||||
}}
|
||||
>
|
||||
<div style={{ fontSize: "0.85rem", color: "var(--ctp-subtext1)" }}>
|
||||
<div
|
||||
style={{ fontSize: "var(--font-body)", color: "var(--ctp-subtext1)" }}
|
||||
>
|
||||
Drop files here or{" "}
|
||||
<span style={{ color: "var(--ctp-mauve)", fontWeight: 600 }}>
|
||||
browse
|
||||
@@ -87,7 +87,7 @@ export function FileDropZone({
|
||||
{accept && (
|
||||
<div
|
||||
style={{
|
||||
fontSize: "0.7rem",
|
||||
fontSize: "var(--font-sm)",
|
||||
color: "var(--ctp-overlay0)",
|
||||
marginTop: "0.25rem",
|
||||
}}
|
||||
@@ -113,7 +113,11 @@ export function FileDropZone({
|
||||
{files.length > 0 && (
|
||||
<div style={{ marginTop: "0.5rem" }}>
|
||||
{files.map((att, i) => (
|
||||
<FileRow key={i} attachment={att} onRemove={() => onFileRemoved(i)} />
|
||||
<FileRow
|
||||
key={i}
|
||||
attachment={att}
|
||||
onRemove={() => onFileRemoved(i)}
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
@@ -139,8 +143,8 @@ function FileRow({
|
||||
display: "flex",
|
||||
alignItems: "center",
|
||||
gap: "0.5rem",
|
||||
padding: "0.3rem 0.4rem",
|
||||
borderRadius: "0.3rem",
|
||||
padding: "0.25rem 0.5rem",
|
||||
borderRadius: "0.25rem",
|
||||
position: "relative",
|
||||
}}
|
||||
>
|
||||
@@ -149,14 +153,14 @@ function FileRow({
|
||||
style={{
|
||||
width: 28,
|
||||
height: 28,
|
||||
borderRadius: "0.3rem",
|
||||
borderRadius: "0.25rem",
|
||||
backgroundColor: color,
|
||||
opacity: 0.8,
|
||||
display: "flex",
|
||||
alignItems: "center",
|
||||
justifyContent: "center",
|
||||
fontSize: "0.6rem",
|
||||
fontWeight: 700,
|
||||
fontSize: "var(--font-xs)",
|
||||
fontWeight: 600,
|
||||
color: "var(--ctp-crust)",
|
||||
flexShrink: 0,
|
||||
}}
|
||||
@@ -168,7 +172,7 @@ function FileRow({
|
||||
<div style={{ flex: 1, minWidth: 0 }}>
|
||||
<div
|
||||
style={{
|
||||
fontSize: "0.8rem",
|
||||
fontSize: "var(--font-table)",
|
||||
color: "var(--ctp-text)",
|
||||
overflow: "hidden",
|
||||
textOverflow: "ellipsis",
|
||||
@@ -177,7 +181,9 @@ function FileRow({
|
||||
>
|
||||
{attachment.file.name}
|
||||
</div>
|
||||
<div style={{ fontSize: "0.7rem", color: "var(--ctp-overlay0)" }}>
|
||||
<div
|
||||
style={{ fontSize: "var(--font-sm)", color: "var(--ctp-overlay0)" }}
|
||||
>
|
||||
{formatSize(attachment.file.size)}
|
||||
{attachment.uploadStatus === "error" && (
|
||||
<span style={{ color: "var(--ctp-red)", marginLeft: "0.5rem" }}>
|
||||
@@ -202,7 +208,7 @@ function FileRow({
|
||||
width: `${attachment.uploadProgress}%`,
|
||||
backgroundColor: "var(--ctp-mauve)",
|
||||
borderRadius: 1,
|
||||
transition: "width 0.15s",
|
||||
transition: "all 0.15s ease",
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
@@ -213,7 +219,7 @@ function FileRow({
|
||||
{attachment.uploadStatus === "complete" ? (
|
||||
<span
|
||||
style={{
|
||||
fontSize: "0.7rem",
|
||||
fontSize: "var(--font-sm)",
|
||||
color: "var(--ctp-green)",
|
||||
flexShrink: 0,
|
||||
}}
|
||||
@@ -231,11 +237,11 @@ function FileRow({
|
||||
background: "none",
|
||||
border: "none",
|
||||
cursor: "pointer",
|
||||
fontSize: "0.8rem",
|
||||
fontSize: "var(--font-table)",
|
||||
color: hovered ? "var(--ctp-red)" : "var(--ctp-overlay0)",
|
||||
padding: "0 0.2rem",
|
||||
padding: "0 0.25rem",
|
||||
flexShrink: 0,
|
||||
transition: "color 0.15s",
|
||||
transition: "all 0.15s ease",
|
||||
}}
|
||||
title="Remove"
|
||||
>
|
||||
|
||||
@@ -72,7 +72,7 @@ export function ImportItemsPane({
|
||||
style={{
|
||||
color: "var(--ctp-yellow)",
|
||||
fontWeight: 600,
|
||||
fontSize: "0.9rem",
|
||||
fontSize: "var(--font-body)",
|
||||
}}
|
||||
>
|
||||
Import Items (CSV)
|
||||
@@ -90,9 +90,9 @@ export function ImportItemsPane({
|
||||
color: "var(--ctp-red)",
|
||||
backgroundColor: "rgba(243,139,168,0.1)",
|
||||
padding: "0.5rem",
|
||||
borderRadius: "0.3rem",
|
||||
borderRadius: "0.25rem",
|
||||
marginBottom: "0.5rem",
|
||||
fontSize: "0.85rem",
|
||||
fontSize: "var(--font-body)",
|
||||
}}
|
||||
>
|
||||
{error}
|
||||
@@ -102,7 +102,7 @@ export function ImportItemsPane({
|
||||
{/* Instructions */}
|
||||
<div
|
||||
style={{
|
||||
fontSize: "0.8rem",
|
||||
fontSize: "var(--font-table)",
|
||||
color: "var(--ctp-subtext0)",
|
||||
marginBottom: "0.75rem",
|
||||
}}
|
||||
@@ -120,7 +120,10 @@ export function ImportItemsPane({
|
||||
</p>
|
||||
<a
|
||||
href="/api/items/template.csv"
|
||||
style={{ color: "var(--ctp-sapphire)", fontSize: "0.8rem" }}
|
||||
style={{
|
||||
color: "var(--ctp-sapphire)",
|
||||
fontSize: "var(--font-table)",
|
||||
}}
|
||||
>
|
||||
Download CSV template
|
||||
</a>
|
||||
@@ -149,7 +152,7 @@ export function ImportItemsPane({
|
||||
color: "var(--ctp-subtext1)",
|
||||
cursor: "pointer",
|
||||
width: "100%",
|
||||
fontSize: "0.85rem",
|
||||
fontSize: "var(--font-body)",
|
||||
}}
|
||||
>
|
||||
{file ? file.name : "Choose CSV file..."}
|
||||
@@ -161,8 +164,8 @@ export function ImportItemsPane({
|
||||
style={{
|
||||
display: "flex",
|
||||
alignItems: "center",
|
||||
gap: "0.4rem",
|
||||
fontSize: "0.85rem",
|
||||
gap: "0.5rem",
|
||||
fontSize: "var(--font-body)",
|
||||
color: "var(--ctp-subtext1)",
|
||||
marginBottom: "0.75rem",
|
||||
}}
|
||||
@@ -184,10 +187,11 @@ export function ImportItemsPane({
|
||||
onClick={() => void doImport(true)}
|
||||
disabled={!file || importing}
|
||||
style={{
|
||||
padding: "0.4rem 0.75rem",
|
||||
fontSize: "0.85rem",
|
||||
padding: "0.5rem 0.75rem",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
border: "none",
|
||||
borderRadius: "0.3rem",
|
||||
borderRadius: "0.25rem",
|
||||
backgroundColor: "var(--ctp-yellow)",
|
||||
color: "var(--ctp-crust)",
|
||||
cursor: "pointer",
|
||||
@@ -201,10 +205,11 @@ export function ImportItemsPane({
|
||||
onClick={() => void doImport(false)}
|
||||
disabled={importing || (result?.error_count ?? 0) > 0}
|
||||
style={{
|
||||
padding: "0.4rem 0.75rem",
|
||||
fontSize: "0.85rem",
|
||||
padding: "0.5rem 0.75rem",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
border: "none",
|
||||
borderRadius: "0.3rem",
|
||||
borderRadius: "0.25rem",
|
||||
backgroundColor: "var(--ctp-green)",
|
||||
color: "var(--ctp-crust)",
|
||||
cursor: "pointer",
|
||||
@@ -222,8 +227,8 @@ export function ImportItemsPane({
|
||||
style={{
|
||||
padding: "0.5rem",
|
||||
backgroundColor: "var(--ctp-surface0)",
|
||||
borderRadius: "0.4rem",
|
||||
fontSize: "0.8rem",
|
||||
borderRadius: "0.5rem",
|
||||
fontSize: "var(--font-table)",
|
||||
}}
|
||||
>
|
||||
<p>
|
||||
@@ -257,7 +262,7 @@ export function ImportItemsPane({
|
||||
style={{
|
||||
color: "var(--ctp-red)",
|
||||
fontSize: "0.75rem",
|
||||
padding: "0.1rem 0",
|
||||
padding: "0.25rem 0",
|
||||
}}
|
||||
>
|
||||
Row {err.row}
|
||||
@@ -289,6 +294,8 @@ const headerBtnStyle: React.CSSProperties = {
|
||||
border: "none",
|
||||
cursor: "pointer",
|
||||
color: "var(--ctp-subtext1)",
|
||||
fontSize: "0.8rem",
|
||||
padding: "0.2rem 0.4rem",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
padding: "0.25rem 0.5rem",
|
||||
borderRadius: "0.25rem",
|
||||
};
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { useState, useEffect } from "react";
|
||||
import { X } from "lucide-react";
|
||||
import { get } from "../../api/client";
|
||||
import type { Item } from "../../api/types";
|
||||
import { MainTab } from "./MainTab";
|
||||
@@ -64,9 +65,11 @@ export function ItemDetail({
|
||||
}
|
||||
|
||||
const typeColors: Record<string, { bg: string; color: string }> = {
|
||||
part: { bg: "rgba(137,180,250,0.2)", color: "var(--ctp-blue)" },
|
||||
assembly: { bg: "rgba(166,227,161,0.2)", color: "var(--ctp-green)" },
|
||||
document: { bg: "rgba(249,226,175,0.2)", color: "var(--ctp-yellow)" },
|
||||
part: { bg: "rgba(166,227,161,0.2)", color: "var(--ctp-green)" },
|
||||
assembly: { bg: "rgba(203,166,247,0.2)", color: "var(--ctp-mauve)" },
|
||||
document: { bg: "rgba(137,180,250,0.2)", color: "var(--ctp-blue)" },
|
||||
purchased: { bg: "rgba(250,179,135,0.2)", color: "var(--ctp-peach)" },
|
||||
phantom: { bg: "rgba(127,132,156,0.2)", color: "var(--ctp-overlay1)" },
|
||||
tooling: { bg: "rgba(243,139,168,0.2)", color: "var(--ctp-red)" },
|
||||
};
|
||||
const tc = typeColors[item.item_type] ?? {
|
||||
@@ -93,16 +96,16 @@ export function ItemDetail({
|
||||
fontFamily: "'JetBrains Mono', monospace",
|
||||
color: "var(--ctp-peach)",
|
||||
fontWeight: 600,
|
||||
fontSize: "0.9rem",
|
||||
fontSize: "var(--font-body)",
|
||||
}}
|
||||
>
|
||||
{item.part_number}
|
||||
</span>
|
||||
<span
|
||||
style={{
|
||||
padding: "0.1rem 0.5rem",
|
||||
padding: "0.25rem 0.5rem",
|
||||
borderRadius: "1rem",
|
||||
fontSize: "0.7rem",
|
||||
fontSize: "var(--font-sm)",
|
||||
fontWeight: 500,
|
||||
backgroundColor: tc.bg,
|
||||
color: tc.color,
|
||||
@@ -129,9 +132,13 @@ export function ItemDetail({
|
||||
)}
|
||||
<button
|
||||
onClick={onClose}
|
||||
style={{ ...headerBtnStyle, fontSize: "1rem" }}
|
||||
style={{
|
||||
...headerBtnStyle,
|
||||
display: "inline-flex",
|
||||
alignItems: "center",
|
||||
}}
|
||||
>
|
||||
×
|
||||
<X size={14} />
|
||||
</button>
|
||||
</div>
|
||||
|
||||
@@ -150,8 +157,8 @@ export function ItemDetail({
|
||||
key={tab.key}
|
||||
onClick={() => setActiveTab(tab.key)}
|
||||
style={{
|
||||
padding: "0.4rem 0.75rem",
|
||||
fontSize: "0.8rem",
|
||||
padding: "0.5rem 0.75rem",
|
||||
fontSize: "var(--font-table)",
|
||||
border: "none",
|
||||
borderBottom:
|
||||
activeTab === tab.key
|
||||
@@ -197,6 +204,6 @@ const headerBtnStyle: React.CSSProperties = {
|
||||
border: "none",
|
||||
cursor: "pointer",
|
||||
color: "var(--ctp-subtext1)",
|
||||
fontSize: "0.8rem",
|
||||
padding: "0.2rem 0.4rem",
|
||||
fontSize: "var(--font-table)",
|
||||
padding: "0.25rem 0.5rem",
|
||||
};
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { useState, useCallback } from "react";
|
||||
import { ChevronUp, ChevronDown } from "lucide-react";
|
||||
import type { Item } from "../../api/types";
|
||||
import { ContextMenu, type ContextMenuItem } from "../ContextMenu";
|
||||
|
||||
@@ -49,9 +50,11 @@ interface ItemTableProps {
|
||||
}
|
||||
|
||||
const typeColors: Record<string, { bg: string; color: string }> = {
|
||||
part: { bg: "rgba(137,180,250,0.2)", color: "var(--ctp-blue)" },
|
||||
assembly: { bg: "rgba(166,227,161,0.2)", color: "var(--ctp-green)" },
|
||||
document: { bg: "rgba(249,226,175,0.2)", color: "var(--ctp-yellow)" },
|
||||
part: { bg: "rgba(166,227,161,0.2)", color: "var(--ctp-green)" },
|
||||
assembly: { bg: "rgba(203,166,247,0.2)", color: "var(--ctp-mauve)" },
|
||||
document: { bg: "rgba(137,180,250,0.2)", color: "var(--ctp-blue)" },
|
||||
purchased: { bg: "rgba(250,179,135,0.2)", color: "var(--ctp-peach)" },
|
||||
phantom: { bg: "rgba(127,132,156,0.2)", color: "var(--ctp-overlay1)" },
|
||||
tooling: { bg: "rgba(243,139,168,0.2)", color: "var(--ctp-red)" },
|
||||
};
|
||||
|
||||
@@ -148,7 +151,7 @@ export function ItemTable({
|
||||
padding: "var(--d-th-py) var(--d-th-px)",
|
||||
textAlign: "left",
|
||||
borderBottom: "1px solid var(--ctp-surface1)",
|
||||
color: "var(--ctp-subtext1)",
|
||||
color: "var(--ctp-overlay1)",
|
||||
fontWeight: 600,
|
||||
fontSize: "var(--d-th-font)",
|
||||
textTransform: "uppercase",
|
||||
@@ -189,8 +192,18 @@ export function ItemTable({
|
||||
>
|
||||
{col.label}
|
||||
{sortKey === col.key && (
|
||||
<span style={{ marginLeft: 4 }}>
|
||||
{sortDir === "asc" ? "▲" : "▼"}
|
||||
<span
|
||||
style={{
|
||||
marginLeft: 4,
|
||||
display: "inline-flex",
|
||||
verticalAlign: "middle",
|
||||
}}
|
||||
>
|
||||
{sortDir === "asc" ? (
|
||||
<ChevronUp size={14} />
|
||||
) : (
|
||||
<ChevronDown size={14} />
|
||||
)}
|
||||
</span>
|
||||
)}
|
||||
</th>
|
||||
@@ -201,7 +214,7 @@ export function ItemTable({
|
||||
{sortedItems.map((item, idx) => {
|
||||
const isSelected = item.part_number === selectedPN;
|
||||
const rowBg = isSelected
|
||||
? "var(--ctp-surface1)"
|
||||
? "rgba(203, 166, 247, 0.08)"
|
||||
: idx % 2 === 0
|
||||
? "var(--ctp-base)"
|
||||
: "var(--ctp-surface0)";
|
||||
@@ -255,7 +268,7 @@ export function ItemTable({
|
||||
<td key={col.key} style={tdStyle}>
|
||||
<span
|
||||
style={{
|
||||
padding: "0.1rem 0.5rem",
|
||||
padding: "0.25rem 0.5rem",
|
||||
borderRadius: "1rem",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
@@ -383,7 +396,8 @@ const actionBtnStyle: React.CSSProperties = {
|
||||
border: "none",
|
||||
color: "var(--ctp-subtext1)",
|
||||
cursor: "pointer",
|
||||
fontSize: "0.8rem",
|
||||
padding: "0.15rem 0.4rem",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
padding: "0.25rem 0.5rem",
|
||||
borderRadius: "0.25rem",
|
||||
};
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { useEffect, useState } from "react";
|
||||
import { Columns2, Rows2, Plus, Download, Upload } from "lucide-react";
|
||||
import { get } from "../../api/client";
|
||||
import type { Project } from "../../api/types";
|
||||
import type { ItemFilters } from "../../hooks/useItems";
|
||||
@@ -37,9 +38,10 @@ export function ItemsToolbar({
|
||||
onClick={() => onFilterChange({ searchScope: scope })}
|
||||
style={{
|
||||
padding: "var(--d-input-py) var(--d-input-px)",
|
||||
fontSize: "var(--d-input-font)",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
border: "none",
|
||||
borderRadius: "0.3rem",
|
||||
borderRadius: "0.25rem",
|
||||
cursor: "pointer",
|
||||
backgroundColor:
|
||||
filters.searchScope === scope
|
||||
@@ -79,7 +81,7 @@ export function ItemsToolbar({
|
||||
padding: "var(--d-input-py) var(--d-input-px)",
|
||||
backgroundColor: "var(--ctp-surface0)",
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
borderRadius: "0.4rem",
|
||||
borderRadius: "0.5rem",
|
||||
color: "var(--ctp-text)",
|
||||
fontSize: "var(--d-input-font)",
|
||||
}}
|
||||
@@ -126,20 +128,42 @@ export function ItemsToolbar({
|
||||
onLayoutChange(layout === "horizontal" ? "vertical" : "horizontal")
|
||||
}
|
||||
title={`Switch to ${layout === "horizontal" ? "vertical" : "horizontal"} layout`}
|
||||
style={toolBtnStyle}
|
||||
style={{
|
||||
...toolBtnStyle,
|
||||
display: "inline-flex",
|
||||
alignItems: "center",
|
||||
}}
|
||||
>
|
||||
{layout === "horizontal" ? "⬌" : "⬍"}
|
||||
{layout === "horizontal" ? <Columns2 size={14} /> : <Rows2 size={14} />}
|
||||
</button>
|
||||
|
||||
{/* Export */}
|
||||
<button onClick={onExport} style={toolBtnStyle} title="Export CSV">
|
||||
Export
|
||||
<button
|
||||
onClick={onExport}
|
||||
style={{
|
||||
...toolBtnStyle,
|
||||
display: "inline-flex",
|
||||
alignItems: "center",
|
||||
gap: "0.25rem",
|
||||
}}
|
||||
title="Export CSV"
|
||||
>
|
||||
<Download size={14} /> Export
|
||||
</button>
|
||||
|
||||
{/* Import (editor only) */}
|
||||
{isEditor && (
|
||||
<button onClick={onImport} style={toolBtnStyle} title="Import CSV">
|
||||
Import
|
||||
<button
|
||||
onClick={onImport}
|
||||
style={{
|
||||
...toolBtnStyle,
|
||||
display: "inline-flex",
|
||||
alignItems: "center",
|
||||
gap: "0.25rem",
|
||||
}}
|
||||
title="Import CSV"
|
||||
>
|
||||
<Upload size={14} /> Import
|
||||
</button>
|
||||
)}
|
||||
|
||||
@@ -151,9 +175,12 @@ export function ItemsToolbar({
|
||||
...toolBtnStyle,
|
||||
backgroundColor: "var(--ctp-mauve)",
|
||||
color: "var(--ctp-crust)",
|
||||
display: "inline-flex",
|
||||
alignItems: "center",
|
||||
gap: "0.25rem",
|
||||
}}
|
||||
>
|
||||
+ New
|
||||
<Plus size={14} /> New
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
@@ -164,7 +191,7 @@ const selectStyle: React.CSSProperties = {
|
||||
padding: "var(--d-input-py) var(--d-input-px)",
|
||||
backgroundColor: "var(--ctp-surface0)",
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
borderRadius: "0.4rem",
|
||||
borderRadius: "0.5rem",
|
||||
color: "var(--ctp-text)",
|
||||
fontSize: "var(--d-input-font)",
|
||||
};
|
||||
@@ -173,8 +200,9 @@ const toolBtnStyle: React.CSSProperties = {
|
||||
padding: "var(--d-input-py) var(--d-input-px)",
|
||||
backgroundColor: "var(--ctp-surface1)",
|
||||
border: "none",
|
||||
borderRadius: "0.4rem",
|
||||
borderRadius: "0.25rem",
|
||||
color: "var(--ctp-text)",
|
||||
fontSize: "var(--d-input-font)",
|
||||
fontSize: "0.75rem",
|
||||
fontWeight: 500,
|
||||
cursor: "pointer",
|
||||
};
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { useState, useEffect } from "react";
|
||||
import { X } from "lucide-react";
|
||||
import { get, post, del } from "../../api/client";
|
||||
import type { Item, Project, Revision } from "../../api/types";
|
||||
|
||||
@@ -83,8 +84,8 @@ export function MainTab({ item, onReload, isEditor }: MainTabProps) {
|
||||
style={{
|
||||
display: "flex",
|
||||
gap: "1rem",
|
||||
padding: "0.3rem 0",
|
||||
fontSize: "0.85rem",
|
||||
padding: "0.25rem 0",
|
||||
fontSize: "var(--font-body)",
|
||||
}}
|
||||
>
|
||||
<span style={{ width: 120, flexShrink: 0, color: "var(--ctp-subtext0)" }}>
|
||||
@@ -133,8 +134,8 @@ export function MainTab({ item, onReload, isEditor }: MainTabProps) {
|
||||
marginTop: "0.75rem",
|
||||
padding: "0.5rem",
|
||||
backgroundColor: "var(--ctp-surface0)",
|
||||
borderRadius: "0.4rem",
|
||||
fontSize: "0.85rem",
|
||||
borderRadius: "0.5rem",
|
||||
fontSize: "var(--font-body)",
|
||||
}}
|
||||
>
|
||||
<div
|
||||
@@ -176,7 +177,7 @@ export function MainTab({ item, onReload, isEditor }: MainTabProps) {
|
||||
display: "inline-flex",
|
||||
alignItems: "center",
|
||||
gap: "0.25rem",
|
||||
padding: "0.1rem 0.5rem",
|
||||
padding: "0.25rem 0.5rem",
|
||||
borderRadius: "1rem",
|
||||
backgroundColor: "rgba(203,166,247,0.15)",
|
||||
color: "var(--ctp-mauve)",
|
||||
@@ -192,11 +193,11 @@ export function MainTab({ item, onReload, isEditor }: MainTabProps) {
|
||||
border: "none",
|
||||
color: "var(--ctp-overlay0)",
|
||||
cursor: "pointer",
|
||||
fontSize: "0.8rem",
|
||||
padding: 0,
|
||||
display: "inline-flex",
|
||||
}}
|
||||
>
|
||||
×
|
||||
<X size={14} />
|
||||
</button>
|
||||
)}
|
||||
</span>
|
||||
@@ -207,11 +208,11 @@ export function MainTab({ item, onReload, isEditor }: MainTabProps) {
|
||||
value={addProject}
|
||||
onChange={(e) => setAddProject(e.target.value)}
|
||||
style={{
|
||||
padding: "0.1rem 0.3rem",
|
||||
padding: "0.25rem 0.25rem",
|
||||
fontSize: "0.75rem",
|
||||
backgroundColor: "var(--ctp-surface0)",
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
borderRadius: "0.3rem",
|
||||
borderRadius: "0.25rem",
|
||||
color: "var(--ctp-text)",
|
||||
}}
|
||||
>
|
||||
@@ -228,12 +229,12 @@ export function MainTab({ item, onReload, isEditor }: MainTabProps) {
|
||||
<button
|
||||
onClick={() => void handleAddProject()}
|
||||
style={{
|
||||
padding: "0.1rem 0.4rem",
|
||||
fontSize: "0.7rem",
|
||||
padding: "0.25rem 0.5rem",
|
||||
fontSize: "var(--font-sm)",
|
||||
border: "none",
|
||||
backgroundColor: "var(--ctp-mauve)",
|
||||
color: "var(--ctp-crust)",
|
||||
borderRadius: "0.3rem",
|
||||
borderRadius: "0.25rem",
|
||||
cursor: "pointer",
|
||||
}}
|
||||
>
|
||||
@@ -252,7 +253,7 @@ export function MainTab({ item, onReload, isEditor }: MainTabProps) {
|
||||
marginTop: "0.75rem",
|
||||
padding: "0.5rem",
|
||||
backgroundColor: "var(--ctp-surface0)",
|
||||
borderRadius: "0.4rem",
|
||||
borderRadius: "0.5rem",
|
||||
}}
|
||||
>
|
||||
<div
|
||||
@@ -269,7 +270,7 @@ export function MainTab({ item, onReload, isEditor }: MainTabProps) {
|
||||
display: "flex",
|
||||
alignItems: "center",
|
||||
gap: "0.75rem",
|
||||
fontSize: "0.85rem",
|
||||
fontSize: "var(--font-body)",
|
||||
}}
|
||||
>
|
||||
{latestRev.file_size != null && (
|
||||
@@ -292,12 +293,12 @@ export function MainTab({ item, onReload, isEditor }: MainTabProps) {
|
||||
window.location.href = `/api/items/${encodeURIComponent(item.part_number)}/file/${latestRev.revision_number}`;
|
||||
}}
|
||||
style={{
|
||||
padding: "0.2rem 0.5rem",
|
||||
fontSize: "0.8rem",
|
||||
padding: "0.25rem 0.5rem",
|
||||
fontSize: "var(--font-table)",
|
||||
border: "none",
|
||||
backgroundColor: "var(--ctp-surface1)",
|
||||
color: "var(--ctp-text)",
|
||||
borderRadius: "0.3rem",
|
||||
borderRadius: "0.25rem",
|
||||
cursor: "pointer",
|
||||
}}
|
||||
>
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { useState } from 'react';
|
||||
import { post } from '../../api/client';
|
||||
import type { Item } from '../../api/types';
|
||||
import { useState } from "react";
|
||||
import { X, Plus } from "lucide-react";
|
||||
import { post } from "../../api/client";
|
||||
import type { Item } from "../../api/types";
|
||||
|
||||
interface PropertiesTabProps {
|
||||
item: Item;
|
||||
@@ -8,24 +9,24 @@ interface PropertiesTabProps {
|
||||
isEditor: boolean;
|
||||
}
|
||||
|
||||
type Mode = 'form' | 'json';
|
||||
type Mode = "form" | "json";
|
||||
|
||||
interface PropRow {
|
||||
key: string;
|
||||
value: string;
|
||||
type: 'string' | 'number' | 'boolean';
|
||||
type: "string" | "number" | "boolean";
|
||||
}
|
||||
|
||||
function detectType(v: unknown): PropRow['type'] {
|
||||
if (typeof v === 'number') return 'number';
|
||||
if (typeof v === 'boolean') return 'boolean';
|
||||
return 'string';
|
||||
function detectType(v: unknown): PropRow["type"] {
|
||||
if (typeof v === "number") return "number";
|
||||
if (typeof v === "boolean") return "boolean";
|
||||
return "string";
|
||||
}
|
||||
|
||||
function toRows(props: Record<string, unknown>): PropRow[] {
|
||||
return Object.entries(props).map(([key, value]) => ({
|
||||
key,
|
||||
value: String(value ?? ''),
|
||||
value: String(value ?? ""),
|
||||
type: detectType(value),
|
||||
}));
|
||||
}
|
||||
@@ -35,17 +36,26 @@ function fromRows(rows: PropRow[]): Record<string, unknown> {
|
||||
for (const row of rows) {
|
||||
if (!row.key.trim()) continue;
|
||||
switch (row.type) {
|
||||
case 'number': obj[row.key] = Number(row.value) || 0; break;
|
||||
case 'boolean': obj[row.key] = row.value === 'true'; break;
|
||||
default: obj[row.key] = row.value;
|
||||
case "number":
|
||||
obj[row.key] = Number(row.value) || 0;
|
||||
break;
|
||||
case "boolean":
|
||||
obj[row.key] = row.value === "true";
|
||||
break;
|
||||
default:
|
||||
obj[row.key] = row.value;
|
||||
}
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
export function PropertiesTab({ item, onReload, isEditor }: PropertiesTabProps) {
|
||||
export function PropertiesTab({
|
||||
item,
|
||||
onReload,
|
||||
isEditor,
|
||||
}: PropertiesTabProps) {
|
||||
const props = item.properties ?? {};
|
||||
const [mode, setMode] = useState<Mode>('form');
|
||||
const [mode, setMode] = useState<Mode>("form");
|
||||
const [rows, setRows] = useState<PropRow[]>(toRows(props));
|
||||
const [jsonText, setJsonText] = useState(JSON.stringify(props, null, 2));
|
||||
const [jsonError, setJsonError] = useState<string | null>(null);
|
||||
@@ -62,18 +72,20 @@ export function PropertiesTab({ item, onReload, isEditor }: PropertiesTabProps)
|
||||
setRows(toRows(parsed));
|
||||
setJsonError(null);
|
||||
} catch (e) {
|
||||
setJsonError(e instanceof Error ? e.message : 'Invalid JSON');
|
||||
setJsonError(e instanceof Error ? e.message : "Invalid JSON");
|
||||
}
|
||||
};
|
||||
|
||||
const switchMode = (m: Mode) => {
|
||||
if (m === 'json') syncFormToJson();
|
||||
if (m === "json") syncFormToJson();
|
||||
else syncJsonToForm();
|
||||
setMode(m);
|
||||
};
|
||||
|
||||
const updateRow = (idx: number, field: keyof PropRow, value: string) => {
|
||||
setRows((prev) => prev.map((r, i) => i === idx ? { ...r, [field]: value } : r));
|
||||
setRows((prev) =>
|
||||
prev.map((r, i) => (i === idx ? { ...r, [field]: value } : r)),
|
||||
);
|
||||
};
|
||||
|
||||
const removeRow = (idx: number) => {
|
||||
@@ -81,72 +93,112 @@ export function PropertiesTab({ item, onReload, isEditor }: PropertiesTabProps)
|
||||
};
|
||||
|
||||
const addRow = () => {
|
||||
setRows((prev) => [...prev, { key: '', value: '', type: 'string' }]);
|
||||
setRows((prev) => [...prev, { key: "", value: "", type: "string" }]);
|
||||
};
|
||||
|
||||
const handleSave = async () => {
|
||||
let properties: Record<string, unknown>;
|
||||
if (mode === 'json') {
|
||||
if (mode === "json") {
|
||||
try {
|
||||
properties = JSON.parse(jsonText) as Record<string, unknown>;
|
||||
} catch {
|
||||
setJsonError('Invalid JSON');
|
||||
setJsonError("Invalid JSON");
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
properties = fromRows(rows);
|
||||
}
|
||||
|
||||
const comment = prompt('Revision comment (optional):') ?? '';
|
||||
const comment = prompt("Revision comment (optional):") ?? "";
|
||||
setSaving(true);
|
||||
try {
|
||||
await post(`/api/items/${encodeURIComponent(item.part_number)}/revisions`, { properties, comment });
|
||||
await post(
|
||||
`/api/items/${encodeURIComponent(item.part_number)}/revisions`,
|
||||
{ properties, comment },
|
||||
);
|
||||
onReload();
|
||||
} catch (e) {
|
||||
alert(e instanceof Error ? e.message : 'Failed to save properties');
|
||||
alert(e instanceof Error ? e.message : "Failed to save properties");
|
||||
} finally {
|
||||
setSaving(false);
|
||||
}
|
||||
};
|
||||
|
||||
const inputStyle: React.CSSProperties = {
|
||||
padding: '0.25rem 0.4rem', fontSize: '0.8rem',
|
||||
backgroundColor: 'var(--ctp-base)', border: '1px solid var(--ctp-surface1)',
|
||||
borderRadius: '0.3rem', color: 'var(--ctp-text)',
|
||||
padding: "0.25rem 0.5rem",
|
||||
fontSize: "var(--font-table)",
|
||||
backgroundColor: "var(--ctp-base)",
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
borderRadius: "0.25rem",
|
||||
color: "var(--ctp-text)",
|
||||
};
|
||||
|
||||
return (
|
||||
<div>
|
||||
{/* Mode toggle */}
|
||||
<div style={{ display: 'flex', gap: '0.5rem', marginBottom: '0.5rem', alignItems: 'center' }}>
|
||||
<button onClick={() => switchMode('form')} style={mode === 'form' ? activeTabBtn : tabBtn}>Form</button>
|
||||
<button onClick={() => switchMode('json')} style={mode === 'json' ? activeTabBtn : tabBtn}>JSON</button>
|
||||
<div
|
||||
style={{
|
||||
display: "flex",
|
||||
gap: "0.5rem",
|
||||
marginBottom: "0.5rem",
|
||||
alignItems: "center",
|
||||
}}
|
||||
>
|
||||
<button
|
||||
onClick={() => switchMode("form")}
|
||||
style={mode === "form" ? activeTabBtn : tabBtn}
|
||||
>
|
||||
Form
|
||||
</button>
|
||||
<button
|
||||
onClick={() => switchMode("json")}
|
||||
style={mode === "json" ? activeTabBtn : tabBtn}
|
||||
>
|
||||
JSON
|
||||
</button>
|
||||
<span style={{ flex: 1 }} />
|
||||
{isEditor && (
|
||||
<button onClick={() => void handleSave()} disabled={saving} style={{
|
||||
padding: '0.3rem 0.75rem', fontSize: '0.8rem', border: 'none', borderRadius: '0.3rem',
|
||||
backgroundColor: 'var(--ctp-mauve)', color: 'var(--ctp-crust)', cursor: 'pointer',
|
||||
opacity: saving ? 0.6 : 1,
|
||||
}}>
|
||||
{saving ? 'Saving...' : 'Save (New Revision)'}
|
||||
<button
|
||||
onClick={() => void handleSave()}
|
||||
disabled={saving}
|
||||
style={{
|
||||
padding: "0.25rem 0.75rem",
|
||||
fontSize: "var(--font-table)",
|
||||
border: "none",
|
||||
borderRadius: "0.25rem",
|
||||
backgroundColor: "var(--ctp-mauve)",
|
||||
color: "var(--ctp-crust)",
|
||||
cursor: "pointer",
|
||||
opacity: saving ? 0.6 : 1,
|
||||
}}
|
||||
>
|
||||
{saving ? "Saving..." : "Save (New Revision)"}
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{mode === 'form' ? (
|
||||
{mode === "form" ? (
|
||||
<div>
|
||||
{rows.map((row, idx) => (
|
||||
<div key={idx} style={{ display: 'flex', gap: '0.3rem', marginBottom: '0.25rem', alignItems: 'center' }}>
|
||||
<div
|
||||
key={idx}
|
||||
style={{
|
||||
display: "flex",
|
||||
gap: "0.25rem",
|
||||
marginBottom: "0.25rem",
|
||||
alignItems: "center",
|
||||
}}
|
||||
>
|
||||
<input
|
||||
value={row.key}
|
||||
onChange={(e) => updateRow(idx, 'key', e.target.value)}
|
||||
onChange={(e) => updateRow(idx, "key", e.target.value)}
|
||||
placeholder="Key"
|
||||
style={{ ...inputStyle, width: 140 }}
|
||||
disabled={!isEditor}
|
||||
/>
|
||||
<select
|
||||
value={row.type}
|
||||
onChange={(e) => updateRow(idx, 'type', e.target.value)}
|
||||
onChange={(e) => updateRow(idx, "type", e.target.value)}
|
||||
style={{ ...inputStyle, width: 80 }}
|
||||
disabled={!isEditor}
|
||||
>
|
||||
@@ -154,44 +206,90 @@ export function PropertiesTab({ item, onReload, isEditor }: PropertiesTabProps)
|
||||
<option value="number">num</option>
|
||||
<option value="boolean">bool</option>
|
||||
</select>
|
||||
{row.type === 'boolean' ? (
|
||||
<select value={row.value} onChange={(e) => updateRow(idx, 'value', e.target.value)} style={{ ...inputStyle, flex: 1 }} disabled={!isEditor}>
|
||||
{row.type === "boolean" ? (
|
||||
<select
|
||||
value={row.value}
|
||||
onChange={(e) => updateRow(idx, "value", e.target.value)}
|
||||
style={{ ...inputStyle, flex: 1 }}
|
||||
disabled={!isEditor}
|
||||
>
|
||||
<option value="true">true</option>
|
||||
<option value="false">false</option>
|
||||
</select>
|
||||
) : (
|
||||
<input
|
||||
type={row.type === 'number' ? 'number' : 'text'}
|
||||
type={row.type === "number" ? "number" : "text"}
|
||||
value={row.value}
|
||||
onChange={(e) => updateRow(idx, 'value', e.target.value)}
|
||||
onChange={(e) => updateRow(idx, "value", e.target.value)}
|
||||
placeholder="Value"
|
||||
style={{ ...inputStyle, flex: 1 }}
|
||||
disabled={!isEditor}
|
||||
/>
|
||||
)}
|
||||
{isEditor && (
|
||||
<button onClick={() => removeRow(idx)} style={{ background: 'none', border: 'none', color: 'var(--ctp-red)', cursor: 'pointer', fontSize: '0.9rem' }}>×</button>
|
||||
<button
|
||||
onClick={() => removeRow(idx)}
|
||||
style={{
|
||||
background: "none",
|
||||
border: "none",
|
||||
color: "var(--ctp-red)",
|
||||
cursor: "pointer",
|
||||
display: "inline-flex",
|
||||
}}
|
||||
>
|
||||
<X size={14} />
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
))}
|
||||
{isEditor && (
|
||||
<button onClick={addRow} style={{ ...tabBtn, marginTop: '0.25rem' }}>+ Add Property</button>
|
||||
<button
|
||||
onClick={addRow}
|
||||
style={{
|
||||
...tabBtn,
|
||||
marginTop: "0.25rem",
|
||||
display: "inline-flex",
|
||||
alignItems: "center",
|
||||
gap: "0.25rem",
|
||||
}}
|
||||
>
|
||||
<Plus size={14} /> Add Property
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
) : (
|
||||
<div>
|
||||
<textarea
|
||||
value={jsonText}
|
||||
onChange={(e) => { setJsonText(e.target.value); setJsonError(null); }}
|
||||
onChange={(e) => {
|
||||
setJsonText(e.target.value);
|
||||
setJsonError(null);
|
||||
}}
|
||||
disabled={!isEditor}
|
||||
style={{
|
||||
width: '100%', minHeight: 200, padding: '0.5rem',
|
||||
fontFamily: "'JetBrains Mono', monospace", fontSize: '0.8rem',
|
||||
backgroundColor: 'var(--ctp-base)', border: '1px solid var(--ctp-surface1)',
|
||||
borderRadius: '0.4rem', color: 'var(--ctp-text)', resize: 'vertical',
|
||||
width: "100%",
|
||||
minHeight: 200,
|
||||
padding: "0.5rem",
|
||||
fontFamily: "'JetBrains Mono', monospace",
|
||||
fontSize: "var(--font-table)",
|
||||
backgroundColor: "var(--ctp-base)",
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
borderRadius: "0.5rem",
|
||||
color: "var(--ctp-text)",
|
||||
resize: "vertical",
|
||||
}}
|
||||
/>
|
||||
{jsonError && <div style={{ color: 'var(--ctp-red)', fontSize: '0.8rem', marginTop: '0.25rem' }}>{jsonError}</div>}
|
||||
{jsonError && (
|
||||
<div
|
||||
style={{
|
||||
color: "var(--ctp-red)",
|
||||
fontSize: "var(--font-table)",
|
||||
marginTop: "0.25rem",
|
||||
}}
|
||||
>
|
||||
{jsonError}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
@@ -199,11 +297,17 @@ export function PropertiesTab({ item, onReload, isEditor }: PropertiesTabProps)
|
||||
}
|
||||
|
||||
const tabBtn: React.CSSProperties = {
|
||||
padding: '0.25rem 0.5rem', fontSize: '0.8rem', border: 'none', borderRadius: '0.3rem',
|
||||
backgroundColor: 'var(--ctp-surface0)', color: 'var(--ctp-subtext1)', cursor: 'pointer',
|
||||
padding: "0.25rem 0.5rem",
|
||||
fontSize: "var(--font-table)",
|
||||
border: "none",
|
||||
borderRadius: "0.25rem",
|
||||
backgroundColor: "var(--ctp-surface0)",
|
||||
color: "var(--ctp-subtext1)",
|
||||
cursor: "pointer",
|
||||
};
|
||||
|
||||
const activeTabBtn: React.CSSProperties = {
|
||||
...tabBtn,
|
||||
backgroundColor: 'var(--ctp-surface1)', color: 'var(--ctp-mauve)',
|
||||
backgroundColor: "var(--ctp-surface1)",
|
||||
color: "var(--ctp-mauve)",
|
||||
};
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { useState, useEffect } from 'react';
|
||||
import { get, post } from '../../api/client';
|
||||
import type { Revision, RevisionComparison } from '../../api/types';
|
||||
import { useState, useEffect } from "react";
|
||||
import { Download } from "lucide-react";
|
||||
import { get, post } from "../../api/client";
|
||||
import type { Revision, RevisionComparison } from "../../api/types";
|
||||
|
||||
interface RevisionsTabProps {
|
||||
partNumber: string;
|
||||
@@ -8,28 +9,35 @@ interface RevisionsTabProps {
|
||||
}
|
||||
|
||||
const statusColors: Record<string, string> = {
|
||||
draft: 'var(--ctp-overlay1)',
|
||||
review: 'var(--ctp-yellow)',
|
||||
released: 'var(--ctp-green)',
|
||||
obsolete: 'var(--ctp-red)',
|
||||
draft: "var(--ctp-overlay1)",
|
||||
review: "var(--ctp-yellow)",
|
||||
released: "var(--ctp-green)",
|
||||
obsolete: "var(--ctp-red)",
|
||||
};
|
||||
|
||||
function formatDate(s: string) {
|
||||
if (!s) return '';
|
||||
return new Date(s).toLocaleDateString('en-US', { year: 'numeric', month: 'short', day: 'numeric' });
|
||||
if (!s) return "";
|
||||
return new Date(s).toLocaleDateString("en-US", {
|
||||
year: "numeric",
|
||||
month: "short",
|
||||
day: "numeric",
|
||||
});
|
||||
}
|
||||
|
||||
export function RevisionsTab({ partNumber, isEditor }: RevisionsTabProps) {
|
||||
const [revisions, setRevisions] = useState<Revision[]>([]);
|
||||
const [loading, setLoading] = useState(true);
|
||||
const [compareFrom, setCompareFrom] = useState('');
|
||||
const [compareTo, setCompareTo] = useState('');
|
||||
const [compareFrom, setCompareFrom] = useState("");
|
||||
const [compareTo, setCompareTo] = useState("");
|
||||
const [comparison, setComparison] = useState<RevisionComparison | null>(null);
|
||||
|
||||
const load = () => {
|
||||
setLoading(true);
|
||||
get<Revision[]>(`/api/items/${encodeURIComponent(partNumber)}/revisions`)
|
||||
.then((r) => { setRevisions(r); setLoading(false); })
|
||||
.then((r) => {
|
||||
setRevisions(r);
|
||||
setLoading(false);
|
||||
})
|
||||
.catch(() => setLoading(false));
|
||||
};
|
||||
|
||||
@@ -39,97 +47,177 @@ export function RevisionsTab({ partNumber, isEditor }: RevisionsTabProps) {
|
||||
if (!compareFrom || !compareTo) return;
|
||||
try {
|
||||
const result = await get<RevisionComparison>(
|
||||
`/api/items/${encodeURIComponent(partNumber)}/revisions/compare?from=${compareFrom}&to=${compareTo}`
|
||||
`/api/items/${encodeURIComponent(partNumber)}/revisions/compare?from=${compareFrom}&to=${compareTo}`,
|
||||
);
|
||||
setComparison(result);
|
||||
} catch (e) {
|
||||
alert(e instanceof Error ? e.message : 'Compare failed');
|
||||
alert(e instanceof Error ? e.message : "Compare failed");
|
||||
}
|
||||
};
|
||||
|
||||
const handleStatusChange = async (rev: number, status: string) => {
|
||||
try {
|
||||
await fetch(`/api/items/${encodeURIComponent(partNumber)}/revisions/${rev}`, {
|
||||
method: 'PATCH',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
credentials: 'include',
|
||||
body: JSON.stringify({ status }),
|
||||
});
|
||||
await fetch(
|
||||
`/api/items/${encodeURIComponent(partNumber)}/revisions/${rev}`,
|
||||
{
|
||||
method: "PATCH",
|
||||
headers: { "Content-Type": "application/json" },
|
||||
credentials: "include",
|
||||
body: JSON.stringify({ status }),
|
||||
},
|
||||
);
|
||||
load();
|
||||
} catch (e) {
|
||||
alert(e instanceof Error ? e.message : 'Status update failed');
|
||||
alert(e instanceof Error ? e.message : "Status update failed");
|
||||
}
|
||||
};
|
||||
|
||||
const handleRollback = async (rev: number) => {
|
||||
if (!confirm(`Rollback to revision ${rev}? This creates a new revision with data from rev ${rev}.`)) return;
|
||||
const comment = prompt('Rollback comment:') ?? `Rollback to rev ${rev}`;
|
||||
if (
|
||||
!confirm(
|
||||
`Rollback to revision ${rev}? This creates a new revision with data from rev ${rev}.`,
|
||||
)
|
||||
)
|
||||
return;
|
||||
const comment = prompt("Rollback comment:") ?? `Rollback to rev ${rev}`;
|
||||
try {
|
||||
await post(`/api/items/${encodeURIComponent(partNumber)}/revisions/${rev}/rollback`, { comment });
|
||||
await post(
|
||||
`/api/items/${encodeURIComponent(partNumber)}/revisions/${rev}/rollback`,
|
||||
{ comment },
|
||||
);
|
||||
load();
|
||||
} catch (e) {
|
||||
alert(e instanceof Error ? e.message : 'Rollback failed');
|
||||
alert(e instanceof Error ? e.message : "Rollback failed");
|
||||
}
|
||||
};
|
||||
|
||||
if (loading) return <div style={{ color: 'var(--ctp-subtext0)' }}>Loading revisions...</div>;
|
||||
if (loading)
|
||||
return (
|
||||
<div style={{ color: "var(--ctp-subtext0)" }}>Loading revisions...</div>
|
||||
);
|
||||
|
||||
const selectStyle: React.CSSProperties = {
|
||||
padding: '0.25rem 0.4rem', fontSize: '0.8rem',
|
||||
backgroundColor: 'var(--ctp-surface0)', border: '1px solid var(--ctp-surface1)',
|
||||
borderRadius: '0.3rem', color: 'var(--ctp-text)',
|
||||
padding: "0.25rem 0.5rem",
|
||||
fontSize: "var(--font-table)",
|
||||
backgroundColor: "var(--ctp-surface0)",
|
||||
border: "1px solid var(--ctp-surface1)",
|
||||
borderRadius: "0.25rem",
|
||||
color: "var(--ctp-text)",
|
||||
};
|
||||
|
||||
return (
|
||||
<div>
|
||||
{/* Compare controls */}
|
||||
<div style={{ display: 'flex', gap: '0.5rem', alignItems: 'center', marginBottom: '0.75rem' }}>
|
||||
<select value={compareFrom} onChange={(e) => setCompareFrom(e.target.value)} style={selectStyle}>
|
||||
<div
|
||||
style={{
|
||||
display: "flex",
|
||||
gap: "0.5rem",
|
||||
alignItems: "center",
|
||||
marginBottom: "0.75rem",
|
||||
}}
|
||||
>
|
||||
<select
|
||||
value={compareFrom}
|
||||
onChange={(e) => setCompareFrom(e.target.value)}
|
||||
style={selectStyle}
|
||||
>
|
||||
<option value="">From rev...</option>
|
||||
{revisions.map((r) => <option key={r.id} value={r.revision_number}>Rev {r.revision_number}</option>)}
|
||||
{revisions.map((r) => (
|
||||
<option key={r.id} value={r.revision_number}>
|
||||
Rev {r.revision_number}
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
<select value={compareTo} onChange={(e) => setCompareTo(e.target.value)} style={selectStyle}>
|
||||
<select
|
||||
value={compareTo}
|
||||
onChange={(e) => setCompareTo(e.target.value)}
|
||||
style={selectStyle}
|
||||
>
|
||||
<option value="">To rev...</option>
|
||||
{revisions.map((r) => <option key={r.id} value={r.revision_number}>Rev {r.revision_number}</option>)}
|
||||
{revisions.map((r) => (
|
||||
<option key={r.id} value={r.revision_number}>
|
||||
Rev {r.revision_number}
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
<button onClick={() => void handleCompare()} disabled={!compareFrom || !compareTo} style={{
|
||||
padding: '0.25rem 0.5rem', fontSize: '0.8rem', border: 'none', borderRadius: '0.3rem',
|
||||
backgroundColor: 'var(--ctp-mauve)', color: 'var(--ctp-crust)', cursor: 'pointer',
|
||||
opacity: (!compareFrom || !compareTo) ? 0.5 : 1,
|
||||
}}>
|
||||
<button
|
||||
onClick={() => void handleCompare()}
|
||||
disabled={!compareFrom || !compareTo}
|
||||
style={{
|
||||
padding: "0.25rem 0.5rem",
|
||||
fontSize: "var(--font-table)",
|
||||
border: "none",
|
||||
borderRadius: "0.25rem",
|
||||
backgroundColor: "var(--ctp-mauve)",
|
||||
color: "var(--ctp-crust)",
|
||||
cursor: "pointer",
|
||||
opacity: !compareFrom || !compareTo ? 0.5 : 1,
|
||||
}}
|
||||
>
|
||||
Compare
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{/* Compare results */}
|
||||
{comparison && (
|
||||
<div style={{
|
||||
padding: '0.5rem', backgroundColor: 'var(--ctp-surface0)', borderRadius: '0.4rem',
|
||||
fontSize: '0.8rem', marginBottom: '0.75rem', fontFamily: "'JetBrains Mono', monospace",
|
||||
}}>
|
||||
<div
|
||||
style={{
|
||||
padding: "0.5rem",
|
||||
backgroundColor: "var(--ctp-surface0)",
|
||||
borderRadius: "0.5rem",
|
||||
fontSize: "var(--font-table)",
|
||||
marginBottom: "0.75rem",
|
||||
fontFamily: "'JetBrains Mono', monospace",
|
||||
}}
|
||||
>
|
||||
{comparison.status_changed && (
|
||||
<div>Status: <span style={{ color: 'var(--ctp-red)' }}>{comparison.status_changed.from}</span> → <span style={{ color: 'var(--ctp-green)' }}>{comparison.status_changed.to}</span></div>
|
||||
<div>
|
||||
Status:{" "}
|
||||
<span style={{ color: "var(--ctp-red)" }}>
|
||||
{comparison.status_changed.from}
|
||||
</span>{" "}
|
||||
→{" "}
|
||||
<span style={{ color: "var(--ctp-green)" }}>
|
||||
{comparison.status_changed.to}
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
{comparison.file_changed && (
|
||||
<div style={{ color: "var(--ctp-yellow)" }}>File changed</div>
|
||||
)}
|
||||
{comparison.file_changed && <div style={{ color: 'var(--ctp-yellow)' }}>File changed</div>}
|
||||
{Object.entries(comparison.added).map(([k, v]) => (
|
||||
<div key={k} style={{ color: 'var(--ctp-green)' }}>+ {k}: {String(v)}</div>
|
||||
<div key={k} style={{ color: "var(--ctp-green)" }}>
|
||||
+ {k}: {String(v)}
|
||||
</div>
|
||||
))}
|
||||
{Object.entries(comparison.removed).map(([k, v]) => (
|
||||
<div key={k} style={{ color: 'var(--ctp-red)' }}>- {k}: {String(v)}</div>
|
||||
<div key={k} style={{ color: "var(--ctp-red)" }}>
|
||||
- {k}: {String(v)}
|
||||
</div>
|
||||
))}
|
||||
{Object.entries(comparison.changed).map(([k, c]) => (
|
||||
<div key={k} style={{ color: 'var(--ctp-yellow)' }}>~ {k}: {String(c.from)} → {String(c.to)}</div>
|
||||
<div key={k} style={{ color: "var(--ctp-yellow)" }}>
|
||||
~ {k}: {String(c.from)} → {String(c.to)}
|
||||
</div>
|
||||
))}
|
||||
{!comparison.status_changed && !comparison.file_changed &&
|
||||
Object.keys(comparison.added).length === 0 && Object.keys(comparison.removed).length === 0 &&
|
||||
Object.keys(comparison.changed).length === 0 && (
|
||||
<div style={{ color: 'var(--ctp-subtext0)' }}>No differences</div>
|
||||
)}
|
||||
{!comparison.status_changed &&
|
||||
!comparison.file_changed &&
|
||||
Object.keys(comparison.added).length === 0 &&
|
||||
Object.keys(comparison.removed).length === 0 &&
|
||||
Object.keys(comparison.changed).length === 0 && (
|
||||
<div style={{ color: "var(--ctp-subtext0)" }}>No differences</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Revisions table */}
|
||||
<table style={{ width: '100%', borderCollapse: 'collapse', fontSize: '0.8rem' }}>
|
||||
<table
|
||||
style={{
|
||||
width: "100%",
|
||||
borderCollapse: "collapse",
|
||||
fontSize: "var(--font-table)",
|
||||
}}
|
||||
>
|
||||
<thead>
|
||||
<tr>
|
||||
<th style={thStyle}>Rev</th>
|
||||
@@ -143,17 +231,32 @@ export function RevisionsTab({ partNumber, isEditor }: RevisionsTabProps) {
|
||||
</thead>
|
||||
<tbody>
|
||||
{revisions.map((rev, idx) => (
|
||||
<tr key={rev.id} style={{ backgroundColor: idx % 2 === 0 ? 'var(--ctp-base)' : 'var(--ctp-surface0)' }}>
|
||||
<tr
|
||||
key={rev.id}
|
||||
style={{
|
||||
backgroundColor:
|
||||
idx % 2 === 0 ? "var(--ctp-base)" : "var(--ctp-surface0)",
|
||||
}}
|
||||
>
|
||||
<td style={tdStyle}>{rev.revision_number}</td>
|
||||
<td style={tdStyle}>
|
||||
{isEditor ? (
|
||||
<select
|
||||
value={rev.status}
|
||||
onChange={(e) => void handleStatusChange(rev.revision_number, e.target.value)}
|
||||
onChange={(e) =>
|
||||
void handleStatusChange(
|
||||
rev.revision_number,
|
||||
e.target.value,
|
||||
)
|
||||
}
|
||||
style={{
|
||||
padding: '0.1rem 0.3rem', fontSize: '0.75rem', border: 'none', borderRadius: '0.3rem',
|
||||
backgroundColor: 'transparent', color: statusColors[rev.status] ?? 'var(--ctp-text)',
|
||||
cursor: 'pointer',
|
||||
padding: "0.25rem 0.25rem",
|
||||
fontSize: "0.75rem",
|
||||
border: "none",
|
||||
borderRadius: "0.25rem",
|
||||
backgroundColor: "transparent",
|
||||
color: statusColors[rev.status] ?? "var(--ctp-text)",
|
||||
cursor: "pointer",
|
||||
}}
|
||||
>
|
||||
<option value="draft">draft</option>
|
||||
@@ -162,27 +265,58 @@ export function RevisionsTab({ partNumber, isEditor }: RevisionsTabProps) {
|
||||
<option value="obsolete">obsolete</option>
|
||||
</select>
|
||||
) : (
|
||||
<span style={{ color: statusColors[rev.status] ?? 'var(--ctp-text)' }}>{rev.status}</span>
|
||||
<span
|
||||
style={{
|
||||
color: statusColors[rev.status] ?? "var(--ctp-text)",
|
||||
}}
|
||||
>
|
||||
{rev.status}
|
||||
</span>
|
||||
)}
|
||||
</td>
|
||||
<td style={tdStyle}>{formatDate(rev.created_at)}</td>
|
||||
<td style={tdStyle}>{rev.created_by ?? '—'}</td>
|
||||
<td style={{ ...tdStyle, maxWidth: 150, overflow: 'hidden', textOverflow: 'ellipsis' }}>{rev.comment ?? ''}</td>
|
||||
<td style={tdStyle}>{rev.created_by ?? "—"}</td>
|
||||
<td
|
||||
style={{
|
||||
...tdStyle,
|
||||
maxWidth: 150,
|
||||
overflow: "hidden",
|
||||
textOverflow: "ellipsis",
|
||||
}}
|
||||
>
|
||||
{rev.comment ?? ""}
|
||||
</td>
|
||||
<td style={tdStyle}>
|
||||
{rev.file_key ? (
|
||||
<button
|
||||
onClick={() => { window.location.href = `/api/items/${encodeURIComponent(partNumber)}/file/${rev.revision_number}`; }}
|
||||
style={{ background: 'none', border: 'none', color: 'var(--ctp-sapphire)', cursor: 'pointer', fontSize: '0.8rem' }}
|
||||
onClick={() => {
|
||||
window.location.href = `/api/items/${encodeURIComponent(partNumber)}/file/${rev.revision_number}`;
|
||||
}}
|
||||
style={{
|
||||
background: "none",
|
||||
border: "none",
|
||||
color: "var(--ctp-sapphire)",
|
||||
cursor: "pointer",
|
||||
display: "inline-flex",
|
||||
}}
|
||||
>
|
||||
↓
|
||||
<Download size={14} />
|
||||
</button>
|
||||
) : '—'}
|
||||
) : (
|
||||
"—"
|
||||
)}
|
||||
</td>
|
||||
{isEditor && (
|
||||
<td style={tdStyle}>
|
||||
<button
|
||||
onClick={() => void handleRollback(rev.revision_number)}
|
||||
style={{ background: 'none', border: 'none', color: 'var(--ctp-peach)', cursor: 'pointer', fontSize: '0.75rem' }}
|
||||
style={{
|
||||
background: "none",
|
||||
border: "none",
|
||||
color: "var(--ctp-peach)",
|
||||
cursor: "pointer",
|
||||
fontSize: "0.75rem",
|
||||
}}
|
||||
title="Rollback to this revision"
|
||||
>
|
||||
Rollback
|
||||
@@ -198,10 +332,18 @@ export function RevisionsTab({ partNumber, isEditor }: RevisionsTabProps) {
|
||||
}
|
||||
|
||||
const thStyle: React.CSSProperties = {
|
||||
padding: '0.3rem 0.5rem', textAlign: 'left', borderBottom: '1px solid var(--ctp-surface1)',
|
||||
color: 'var(--ctp-subtext1)', fontWeight: 600, fontSize: '0.7rem', textTransform: 'uppercase', letterSpacing: '0.05em',
|
||||
padding: "0.25rem 0.5rem",
|
||||
textAlign: "left",
|
||||
borderBottom: "1px solid var(--ctp-surface1)",
|
||||
color: "var(--ctp-subtext1)",
|
||||
fontWeight: 600,
|
||||
fontSize: "var(--font-sm)",
|
||||
textTransform: "uppercase",
|
||||
letterSpacing: "0.05em",
|
||||
};
|
||||
|
||||
const tdStyle: React.CSSProperties = {
|
||||
padding: '0.25rem 0.5rem', borderBottom: '1px solid var(--ctp-surface0)', whiteSpace: 'nowrap',
|
||||
padding: "0.25rem 0.5rem",
|
||||
borderBottom: "1px solid var(--ctp-surface0)",
|
||||
whiteSpace: "nowrap",
|
||||
};
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { useState, useEffect } from 'react';
|
||||
import { get } from '../../api/client';
|
||||
import type { WhereUsedEntry } from '../../api/types';
|
||||
import { useState, useEffect } from "react";
|
||||
import { get } from "../../api/client";
|
||||
import type { WhereUsedEntry } from "../../api/types";
|
||||
|
||||
interface WhereUsedTabProps {
|
||||
partNumber: string;
|
||||
@@ -12,20 +12,35 @@ export function WhereUsedTab({ partNumber }: WhereUsedTabProps) {
|
||||
|
||||
useEffect(() => {
|
||||
setLoading(true);
|
||||
get<WhereUsedEntry[]>(`/api/items/${encodeURIComponent(partNumber)}/bom/where-used`)
|
||||
get<WhereUsedEntry[]>(
|
||||
`/api/items/${encodeURIComponent(partNumber)}/bom/where-used`,
|
||||
)
|
||||
.then(setEntries)
|
||||
.catch(() => setEntries([]))
|
||||
.finally(() => setLoading(false));
|
||||
}, [partNumber]);
|
||||
|
||||
if (loading) return <div style={{ color: 'var(--ctp-subtext0)' }}>Loading where-used...</div>;
|
||||
if (loading)
|
||||
return (
|
||||
<div style={{ color: "var(--ctp-subtext0)" }}>Loading where-used...</div>
|
||||
);
|
||||
|
||||
if (entries.length === 0) {
|
||||
return <div style={{ color: 'var(--ctp-subtext0)', padding: '1rem' }}>Not used in any assemblies.</div>;
|
||||
return (
|
||||
<div style={{ color: "var(--ctp-subtext0)", padding: "1rem" }}>
|
||||
Not used in any assemblies.
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<table style={{ width: '100%', borderCollapse: 'collapse', fontSize: '0.8rem' }}>
|
||||
<table
|
||||
style={{
|
||||
width: "100%",
|
||||
borderCollapse: "collapse",
|
||||
fontSize: "var(--font-table)",
|
||||
}}
|
||||
>
|
||||
<thead>
|
||||
<tr>
|
||||
<th style={thStyle}>Parent PN</th>
|
||||
@@ -36,13 +51,25 @@ export function WhereUsedTab({ partNumber }: WhereUsedTabProps) {
|
||||
</thead>
|
||||
<tbody>
|
||||
{entries.map((e, idx) => (
|
||||
<tr key={e.id} style={{ backgroundColor: idx % 2 === 0 ? 'var(--ctp-base)' : 'var(--ctp-surface0)' }}>
|
||||
<td style={{ ...tdStyle, fontFamily: "'JetBrains Mono', monospace", color: 'var(--ctp-peach)' }}>
|
||||
<tr
|
||||
key={e.id}
|
||||
style={{
|
||||
backgroundColor:
|
||||
idx % 2 === 0 ? "var(--ctp-base)" : "var(--ctp-surface0)",
|
||||
}}
|
||||
>
|
||||
<td
|
||||
style={{
|
||||
...tdStyle,
|
||||
fontFamily: "'JetBrains Mono', monospace",
|
||||
color: "var(--ctp-peach)",
|
||||
}}
|
||||
>
|
||||
{e.parent_part_number}
|
||||
</td>
|
||||
<td style={tdStyle}>{e.parent_description}</td>
|
||||
<td style={tdStyle}>{e.rel_type}</td>
|
||||
<td style={tdStyle}>{e.quantity ?? '—'}</td>
|
||||
<td style={tdStyle}>{e.quantity ?? "—"}</td>
|
||||
</tr>
|
||||
))}
|
||||
</tbody>
|
||||
@@ -51,10 +78,18 @@ export function WhereUsedTab({ partNumber }: WhereUsedTabProps) {
|
||||
}
|
||||
|
||||
const thStyle: React.CSSProperties = {
|
||||
padding: '0.3rem 0.5rem', textAlign: 'left', borderBottom: '1px solid var(--ctp-surface1)',
|
||||
color: 'var(--ctp-subtext1)', fontWeight: 600, fontSize: '0.7rem', textTransform: 'uppercase', letterSpacing: '0.05em',
|
||||
padding: "0.25rem 0.5rem",
|
||||
textAlign: "left",
|
||||
borderBottom: "1px solid var(--ctp-surface1)",
|
||||
color: "var(--ctp-subtext1)",
|
||||
fontWeight: 600,
|
||||
fontSize: "var(--font-sm)",
|
||||
textTransform: "uppercase",
|
||||
letterSpacing: "0.05em",
|
||||
};
|
||||
|
||||
const tdStyle: React.CSSProperties = {
|
||||
padding: '0.25rem 0.5rem', borderBottom: '1px solid var(--ctp-surface0)', whiteSpace: 'nowrap',
|
||||
padding: "0.25rem 0.5rem",
|
||||
borderBottom: "1px solid var(--ctp-surface0)",
|
||||
whiteSpace: "nowrap",
|
||||
};
|
||||
|
||||
@@ -57,7 +57,7 @@ export function AuditPage() {
|
||||
style={{
|
||||
color: "var(--ctp-red)",
|
||||
padding: "0.5rem",
|
||||
fontSize: "0.85rem",
|
||||
fontSize: "var(--font-body)",
|
||||
}}
|
||||
>
|
||||
Error: {error}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user