Consolidate documentation and update backend services

- Reorganize docs into 'Core deployment guides' and 'Setup and configuration' subdirectories
- Consolidate redundant documentation files (ACR, pipelines, deployment guides)
- Add documentation consolidation plan
- Update backend database factory and logger services
- Update migration script and docker-compose configurations
- Add PostgreSQL setup script
This commit is contained in:
2026-01-22 22:45:54 +01:00
parent 18aec4ad80
commit f4399a8e4e
49 changed files with 1320 additions and 7243 deletions

View File

@@ -17,6 +17,8 @@ const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const SQLITE_CACHE_DB = join(__dirname, '../../data/cmdb-cache.db');
// Note: Legacy support - old SQLite setups may have had separate classifications.db file
// Current setup uses a single database file for all data
const SQLITE_CLASSIFICATIONS_DB = join(__dirname, '../../data/classifications.db');
async function migrate() {

View File

@@ -27,7 +27,7 @@ export function createDatabaseAdapter(dbType?: string, dbPath?: string, allowClo
// Try to construct from individual components
const host = process.env.DATABASE_HOST || 'localhost';
const port = process.env.DATABASE_PORT || '5432';
const name = process.env.DATABASE_NAME || 'cmdb';
const name = process.env.DATABASE_NAME || 'cmdb_insight';
const user = process.env.DATABASE_USER || 'cmdb';
const password = process.env.DATABASE_PASSWORD || '';
// Azure PostgreSQL requires SSL - always use sslmode=require for Azure
@@ -51,35 +51,12 @@ export function createDatabaseAdapter(dbType?: string, dbPath?: string, allowClo
}
/**
* Create a database adapter for the classifications database
* Create a database adapter for classifications and session state
*
* Uses the same database as the main cache. All data (CMDB cache,
* classification history, and session state) is stored in a single database.
*/
export function createClassificationsDatabaseAdapter(): DatabaseAdapter {
const type = process.env.DATABASE_TYPE || 'sqlite';
const databaseUrl = process.env.CLASSIFICATIONS_DATABASE_URL || process.env.DATABASE_URL;
if (type === 'postgres' || type === 'postgresql') {
if (!databaseUrl) {
// Try to construct from individual components
const host = process.env.DATABASE_HOST || 'localhost';
const port = process.env.DATABASE_PORT || '5432';
const name = process.env.CLASSIFICATIONS_DATABASE_NAME || process.env.DATABASE_NAME || 'cmdb';
const user = process.env.DATABASE_USER || 'cmdb';
const password = process.env.DATABASE_PASSWORD || '';
// Azure PostgreSQL requires SSL - always use sslmode=require for Azure
const isAzure = host.includes('.postgres.database.azure.com');
const ssl = (process.env.DATABASE_SSL === 'true' || isAzure) ? '?sslmode=require' : '';
const constructedUrl = `postgresql://${user}:${password}@${host}:${port}/${name}${ssl}`;
logger.info('Creating PostgreSQL adapter for classifications with constructed connection string');
return new PostgresAdapter(constructedUrl);
}
logger.info('Creating PostgreSQL adapter for classifications');
return new PostgresAdapter(databaseUrl);
}
// Default to SQLite
const defaultPath = join(__dirname, '../../data/classifications.db');
logger.info(`Creating SQLite adapter for classifications with path: ${defaultPath}`);
return new SqliteAdapter(defaultPath);
// Always use the same database adapter as the main cache
return createDatabaseAdapter();
}

View File

@@ -34,7 +34,16 @@ export const logger = winston.createLogger({
// Only add file logging if we're in production AND have write permissions
// In Azure App Service, console logging is automatically captured, so file logging is optional
if (config.isProduction && !process.env.AZURE_APP_SERVICE) {
// Detect Azure App Service environment
const isAzureAppService = !!(
process.env.AZURE_APP_SERVICE ||
process.env.WEBSITE_SITE_NAME ||
process.env.WEBSITE_INSTANCE_ID ||
process.env.AzureWebJobsStorage
);
if (config.isProduction && !isAzureAppService) {
// For non-Azure environments, try to use logs/ directory
const logDir = path.join(__dirname, '../../logs');

View File

@@ -3,7 +3,7 @@ services:
image: postgres:15-alpine
container_name: cmdb-postgres-dev
environment:
POSTGRES_DB: cmdb_cache
POSTGRES_DB: cmdb_insight
POSTGRES_USER: cmdb
POSTGRES_PASSWORD: cmdb-dev
ports:

View File

@@ -2,7 +2,7 @@ services:
postgres:
image: postgres:15-alpine
environment:
POSTGRES_DB: cmdb
POSTGRES_DB: cmdb_insight
POSTGRES_USER: cmdb
POSTGRES_PASSWORD: cmdb-dev
ports:
@@ -28,7 +28,7 @@ services:
- DATABASE_TYPE=postgres
- DATABASE_HOST=postgres
- DATABASE_PORT=5432
- DATABASE_NAME=cmdb
- DATABASE_NAME=cmdb_insight
- DATABASE_USER=cmdb
- DATABASE_PASSWORD=cmdb-dev
# Optional Jira/AI variables (set in .env file or environment)

View File

@@ -1,119 +0,0 @@
# Authentication System Implementation Status
## ✅ Completed Features
### Backend
- ✅ Database schema with users, roles, permissions, sessions, user_settings, email_tokens tables
- ✅ User service (CRUD, password hashing, email verification, password reset)
- ✅ Role service (dynamic role and permission management)
- ✅ Auth service (local auth + OAuth with database-backed sessions)
- ✅ Email service (Nodemailer with SMTP)
- ✅ Encryption service (AES-256-GCM for sensitive data)
- ✅ User settings service (Jira PAT, AI features, API keys)
- ✅ Authorization middleware (requireAuth, requireRole, requirePermission)
- ✅ All API routes protected with authentication
- ✅ Auth routes (login, logout, password reset, email verification, invitations)
- ✅ User management routes (admin only)
- ✅ Role management routes
- ✅ User settings routes
- ✅ Profile routes
### Frontend
- ✅ Auth store extended with roles, permissions, local auth support
- ✅ Permission hooks (useHasPermission, useHasRole, usePermissions)
- ✅ ProtectedRoute component
- ✅ Login component (local login + OAuth choice)
- ✅ ForgotPassword component
- ✅ ResetPassword component
- ✅ AcceptInvitation component
- ✅ UserManagement component (admin)
- ✅ RoleManagement component (admin)
- ✅ UserSettings component
- ✅ Profile component
- ✅ UserMenu with logout and profile/settings links
- ✅ Feature gating based on permissions
## 🔧 Configuration Required
### Environment Variables
**Required for local authentication:**
```env
LOCAL_AUTH_ENABLED=true
```
**Required for email functionality:**
```env
SMTP_HOST=smtp.example.com
SMTP_PORT=587
SMTP_SECURE=false
SMTP_USER=your-email@example.com
SMTP_PASSWORD=your-password
SMTP_FROM=noreply@example.com
```
**Required for encryption:**
```env
ENCRYPTION_KEY=your-32-byte-encryption-key-base64
```
**Optional - Initial admin user:**
```env
ADMIN_EMAIL=admin@example.com
ADMIN_PASSWORD=SecurePassword123!
ADMIN_USERNAME=admin
ADMIN_DISPLAY_NAME=Administrator
```
**Password requirements:**
```env
PASSWORD_MIN_LENGTH=8
PASSWORD_REQUIRE_UPPERCASE=true
PASSWORD_REQUIRE_LOWERCASE=true
PASSWORD_REQUIRE_NUMBER=true
PASSWORD_REQUIRE_SPECIAL=false
```
**Session duration:**
```env
SESSION_DURATION_HOURS=24
```
## 📝 Notes
### JIRA_AUTH Settings
- `JIRA_PAT` can be removed from global env - users configure their own PAT in settings
- `JIRA_OAUTH_CLIENT_ID` and `JIRA_OAUTH_CLIENT_SECRET` are still needed for OAuth flow
- `JIRA_HOST` and `JIRA_SCHEMA_ID` are still needed (infrastructure settings)
### AI API Keys
- `ANTHROPIC_API_KEY` can be removed from global env - users configure their own keys
- `OPENAI_API_KEY` can be removed from global env - users configure their own keys
- `TAVILY_API_KEY` can be removed from global env - users configure their own keys
- These are now stored per-user in the `user_settings` table (encrypted)
### Authentication Flow
1. On first run, migrations create database tables
2. If `ADMIN_EMAIL` and `ADMIN_PASSWORD` are set, initial admin user is created
3. Once users exist, authentication is automatically required
4. Users can log in with email/password (local auth) or OAuth (if configured)
5. User menu shows logged-in user with links to Profile and Settings
6. Logout is available for all authenticated users
## 🚀 Next Steps
1. Set `LOCAL_AUTH_ENABLED=true` in environment
2. Configure SMTP settings for email functionality
3. Generate encryption key: `openssl rand -base64 32`
4. Set initial admin credentials (optional)
5. Run the application - migrations will run automatically
6. Log in with admin account
7. Create additional users via User Management
8. Configure roles and permissions as needed
## ⚠️ Important
- Once users exist in the database, authentication is **automatically required**
- Service account mode only works if no users exist AND local auth is not enabled
- All API routes are protected - unauthenticated requests return 401
- User-specific settings (Jira PAT, AI keys) are encrypted at rest

View File

@@ -1,207 +0,0 @@
# Azure Container Registry - Domain Name Label Scope
## Wat is Domain Name Label Scope?
**Domain Name Label (DNL) Scope** is een security feature van Azure Container Registry die voorkomt dat iemand anders dezelfde DNS naam kan gebruiken als je registry wordt verwijderd (subdomain takeover prevention).
## Opties
### 1. **Unsecure** (Aanbevolen voor simpele setup) ⭐
**DNS Format:** `registryname.azurecr.io`
**Voorbeeld:**
- Registry naam: `zuyderlandcmdbacr`
- DNS naam: `zuyderlandcmdbacr.azurecr.io`
**Voordelen:**
- ✅ Eenvoudig en voorspelbaar
- ✅ Geen hash in de naam
- ✅ Makkelijk te onthouden en configureren
**Nadelen:**
- ❌ Minder security (maar meestal voldoende voor interne tools)
**Wanneer gebruiken:**
- ✅ Simpele setup
- ✅ Interne/corporate omgeving
- ✅ Je wilt een voorspelbare DNS naam
---
### 2. **Resource Group Reuse** (Aanbevolen voor security) 🔒
**DNS Format:** `registryname-hash.azurecr.io`
**Voorbeeld:**
- Registry naam: `zuyderlandcmdbacr`
- DNS naam: `zuyderlandcmdbacr-abc123.azurecr.io` (met unieke hash)
**Voordelen:**
- ✅ Extra security layer
- ✅ Consistent binnen resource group
- ✅ Voorkomt subdomain takeover
**Nadelen:**
- ❌ Hash in de naam (minder voorspelbaar)
- ❌ Moet alle configuraties aanpassen met volledige DNS naam
**Wanneer gebruiken:**
- ✅ Productie omgevingen
- ✅ Security is belangrijk
- ✅ Je wilt extra bescherming
---
### 3. **Subscription Reuse**
**DNS Format:** `registryname-hash.azurecr.io` (hash gebaseerd op subscription)
**Wanneer gebruiken:**
- Als je meerdere resource groups hebt binnen dezelfde subscription
- Je wilt consistentie binnen de subscription
---
### 4. **Tenant Reuse**
**DNS Format:** `registryname-hash.azurecr.io` (hash gebaseerd op tenant)
**Wanneer gebruiken:**
- Als je meerdere subscriptions hebt binnen dezelfde tenant
- Je wilt consistentie binnen de tenant
---
### 5. **No Reuse**
**DNS Format:** `registryname-uniquehash.azurecr.io` (altijd unieke hash)
**Wanneer gebruiken:**
- Maximale security vereist
- Je wilt absoluut geen risico op DNS conflicts
---
## 🎯 Aanbeveling voor Jouw Situatie
**Voor CMDB Insight (20 gebruikers, corporate omgeving):**
### Optie A: **"Unsecure"** (Aanbevolen) ⭐
**Waarom:**
- ✅ Eenvoudigste setup
- ✅ Voorspelbare DNS naam
- ✅ Geen configuratie wijzigingen nodig
- ✅ Voldoende voor interne corporate tool
**DNS naam wordt:** `zuyderlandcmdbacr.azurecr.io`
**Configuratie:**
```yaml
# azure-pipelines.yml
acrName: 'zuyderlandcmdbacr' # Simpel, zonder hash
```
---
### Optie B: **"Resource Group Reuse"** (Als je extra security wilt) 🔒
**Waarom:**
- ✅ Extra security layer
- ✅ Voorkomt subdomain takeover
- ✅ Consistent binnen resource group
**DNS naam wordt:** `zuyderlandcmdbacr-abc123.azurecr.io` (met hash)
**⚠️ Belangrijk:** Je moet dan alle configuraties aanpassen!
**Configuratie wijzigingen nodig:**
```yaml
# azure-pipelines.yml
acrName: 'zuyderlandcmdbacr-abc123' # Met hash!
```
```yaml
# docker-compose.prod.acr.yml
image: zuyderlandcmdbacr-abc123.azurecr.io/cmdb-insight/backend:latest
```
```bash
# scripts/build-and-push-azure.sh
REGISTRY="zuyderlandcmdbacr-abc123.azurecr.io" # Met hash!
```
---
## ⚠️ Belangrijke Waarschuwingen
### 1. **Permanente Keuze**
De DNL Scope keuze is **permanent** en kan **niet meer worden gewijzigd** na aanmaken van de registry!
### 2. **Geen Streepjes in Registry Naam**
Als je een DNL Scope met hash gebruikt, mag je **geen streepjes (`-`)** gebruiken in de registry naam, omdat de hash zelf al een streepje gebruikt als scheidingsteken.
**Goed:** `zuyderlandcmdbacr`
**Fout:** `zuyderland-cmdb-acr` (streepje conflict met hash)
### 3. **Configuratie Aanpassingen**
Als je een hash gebruikt, moet je **alle configuraties aanpassen** met de volledige DNS naam (inclusief hash).
---
## 📋 Checklist
### Als je "Unsecure" kiest:
- [ ] Registry naam zonder streepjes (bijv. `zuyderlandcmdbacr`)
- [ ] DNS naam wordt: `zuyderlandcmdbacr.azurecr.io`
- [ ] Geen configuratie wijzigingen nodig
- [ ] Gebruik `acrName: 'zuyderlandcmdbacr'` in pipeline
### Als je "Resource Group Reuse" kiest:
- [ ] Registry naam zonder streepjes (bijv. `zuyderlandcmdbacr`)
- [ ] Noteer de volledige DNS naam na aanmaken (met hash)
- [ ] Pas `azure-pipelines.yml` aan met volledige DNS naam
- [ ] Pas `docker-compose.prod.acr.yml` aan met volledige DNS naam
- [ ] Pas `scripts/build-and-push-azure.sh` aan met volledige DNS naam
---
## 🔍 DNS Naam Vinden
Na het aanmaken van de ACR, vind je de DNS naam:
**Via Azure Portal:**
1. Ga naar je Container Registry
2. Klik op **"Overview"**
3. De **"Login server"** is je DNS naam
**Via Azure CLI:**
```bash
az acr show --name zuyderlandcmdbacr --query loginServer -o tsv
```
**Output voorbeelden:**
- Unsecure: `zuyderlandcmdbacr.azurecr.io`
- Met hash: `zuyderlandcmdbacr-abc123.azurecr.io`
---
## 💡 Mijn Aanbeveling
**Voor jouw situatie (corporate tool, 20 gebruikers):**
Kies **"Unsecure"** omdat:
1. ✅ Eenvoudigste setup
2. ✅ Geen configuratie wijzigingen nodig
3. ✅ Voldoende security voor interne tool
4. ✅ Voorspelbare DNS naam
Als je later meer security nodig hebt, kun je altijd een nieuwe registry aanmaken met een andere scope (maar dan moet je wel alles migreren).
---
## 📚 Meer Informatie
- [Azure Container Registry DNL Scope Documentation](https://learn.microsoft.com/en-us/azure/container-registry/container-registry-get-started-portal)
- [Subdomain Takeover Prevention](https://learn.microsoft.com/en-us/azure/container-registry/container-registry-security)

View File

@@ -1,165 +0,0 @@
# Azure Container Registry Naming Recommendation
Recommendations for naming your Azure Container Registry for Zuyderland Application Services.
## 🎯 Requirements
Azure Container Registry names must:
- Be **globally unique** (across all Azure subscriptions)
- Be **5-50 characters** long
- Contain **only lowercase alphanumeric characters** (no hyphens, underscores, or special characters)
- Be **descriptive** but not too long
## 💡 Recommended Options
### Option 1: `zuyderlandacr` ⭐ **RECOMMENDED**
**Pros:**
- ✅ Clear company identification
- ✅ Short and memorable (15 characters)
- ✅ Generic enough for all Application Services apps
- ✅ Easy to type and remember
- ✅ Professional appearance
**Cons:**
- ⚠️ Might be taken if Zuyderland already has an ACR
**Usage:**
```bash
ACR_NAME="zuyderlandacr"
# Images: zuyderlandacr.azurecr.io/cmdb-insight/backend:latest
```
### Option 2: `zuyderlandsharedacr`
**Pros:**
- ✅ Clearly indicates it's a shared registry
- ✅ Company identification
- ✅ Good for documentation ("shared ACR")
**Cons:**
- ⚠️ Longer (20 characters)
- ⚠️ "shared" might be redundant (ACRs are typically shared)
**Usage:**
```bash
ACR_NAME="zuyderlandsharedacr"
# Images: zuyderlandsharedacr.azurecr.io/cmdb-insight/backend:latest
```
### Option 3: `zyldacr` (Abbreviated)
**Pros:**
- ✅ Very short (7 characters)
- ✅ Easy to type
- ✅ Less likely to be taken
**Cons:**
- ⚠️ Less clear what "zyld" means
- ⚠️ Might not be obvious it's Zuyderland
**Usage:**
```bash
ACR_NAME="zyldacr"
# Images: zyldacr.azurecr.io/cmdb-insight/backend:latest
```
### Option 4: `zuyderlandappsvcsacr`
**Pros:**
- ✅ Includes department name (Application Services)
- ✅ Very specific
**Cons:**
- ⚠️ Long (23 characters)
- ⚠️ Less flexible if other departments want to use it
- ⚠️ Harder to type
**Usage:**
```bash
ACR_NAME="zuyderlandappsvcsacr"
# Images: zuyderlandappsvcsacr.azurecr.io/cmdb-insight/backend:latest
```
## 🏆 Final Recommendation
**Use: `zuyderlandacr`**
**Reasoning:**
1. **Clear and professional**: Immediately identifies as Zuyderland
2. **Appropriate length**: Not too short (unclear) or too long (hard to type)
3. **Generic enough**: Can be used by all Application Services applications
4. **Future-proof**: Works for any department or team within Zuyderland
5. **Standard pattern**: Follows common naming convention (`companynameacr`)
## 🔍 Check Availability
Before creating, check if the name is available:
```bash
# Try to check if name exists (will fail if available, which is good)
az acr show --name zuyderlandacr --resource-group dummy-rg 2>&1 | grep -q "could not be found" && echo "Name available!" || echo "Name might be taken"
# Or try to create (will fail if taken)
az acr check-name --name zuyderlandacr
```
## 📝 Alternative if Name is Taken
If `zuyderlandacr` is already taken, try:
1. `zuyderlandacr01` - Add number suffix
2. `zuyderlandacrprod` - Add environment suffix
3. `zyldacr` - Use abbreviation
4. `zuyderlandregistry` - Use full word "registry"
5. `zuyderlandcontainers` - Use "containers" instead of "acr"
## 🎯 Naming Pattern
For consistency across Zuyderland, consider this pattern:
```
zuyderlandacr ← Shared ACR for all apps (recommended)
zuyderlandacrdev ← Development ACR (if needed)
zuyderlandacrprod ← Production ACR (if separate)
```
**But for most cases, one shared ACR (`zuyderlandacr`) is sufficient.**
## 📋 Update Your Configuration
Once you've chosen a name, update:
### 1. Setup Script
```bash
# In scripts/setup-azure-resources.sh
ACR_NAME="zuyderlandacr"
ACR_RESOURCE_GROUP="rg-shared-services" # Or rg-zuyderland-shared
```
### 2. Pipeline Variables
```yaml
# In azure-pipelines.yml
variables:
acrName: 'zuyderlandacr'
repositoryName: 'cmdb-insight'
```
### 3. Build Scripts
```bash
# In scripts/build-and-push-azure.sh
export ACR_NAME="zuyderlandacr"
```
## ✅ Checklist
- [ ] Choose ACR name: `zuyderlandacr` (recommended)
- [ ] Check name availability
- [ ] Create ACR with chosen name
- [ ] Update all configuration files
- [ ] Document name for team
- [ ] Share ACR name with other Application Services teams
---
**Recommended: `zuyderlandacr`** - Clear, professional, and reusable for all Zuyderland Application Services applications.

View File

@@ -1,205 +0,0 @@
# Azure Container Registry - Role Assignment Permissions Mode
## 🎯 Aanbeveling voor Jouw Situatie
**Voor CMDB Insight (20 gebruikers, corporate tool, productie):**
### ✅ **RBAC Registry Permissions** (Aanbevolen) ⭐
**Waarom:**
- ✅ Eenvoudiger te beheren
- ✅ Voldoende voor jouw use case
- ✅ Minder complexiteit
- ✅ Standaard keuze voor de meeste scenario's
---
## 📊 Opties Vergelijking
### Optie 1: **RBAC Registry Permissions** ⭐ **AANBEVOLEN**
**Hoe het werkt:**
- Permissions worden ingesteld op **registry niveau**
- Alle repositories binnen de registry delen dezelfde permissions
- Gebruikers hebben toegang tot alle repositories of geen
**Voordelen:**
-**Eenvoudig** - Minder complexiteit
-**Makkelijk te beheren** - Eén set permissions voor de hele registry
-**Voldoende voor de meeste scenario's** - Perfect voor jouw situatie
-**Standaard keuze** - Meest gebruikte optie
**Nadelen:**
- ❌ Minder flexibel - Kan niet per repository permissions instellen
- ❌ Alle repositories hebben dezelfde toegang
**Wanneer gebruiken:**
-**Jouw situatie** - 20 gebruikers, corporate tool
- ✅ Kleine tot middelgrote teams
- ✅ Alle repositories hebben dezelfde toegangsvereisten
- ✅ Eenvoudige permission structuur gewenst
**Voorbeeld:**
- Alle developers hebben toegang tot alle repositories
- Alle CI/CD pipelines hebben toegang tot alle repositories
- Geen per-repository verschillen nodig
---
### Optie 2: **RBAC Registry + ABAC Repository Permissions**
**Hoe het werkt:**
- Permissions op **registry niveau** (RBAC)
- **Extra** permissions op **repository niveau** (ABAC - Attribute-Based Access Control)
- Kan per repository verschillende permissions instellen
**Voordelen:**
-**Flexibeler** - Per repository permissions mogelijk
-**Granular control** - Verschillende teams kunnen verschillende repositories hebben
-**Enterprise features** - Voor complexe organisaties
**Nadelen:**
-**Complexer** - Meer configuratie nodig
-**Moeilijker te beheren** - Meerdere permission levels
-**Meer overhead** - Meer tijd nodig voor setup en onderhoud
**Wanneer gebruiken:**
- ✅ Grote organisaties met meerdere teams
- ✅ Verschillende repositories hebben verschillende toegangsvereisten
- ✅ Compliance requirements die granular control vereisen
- ✅ Multi-tenant scenarios
**Voorbeeld:**
- Team A heeft alleen toegang tot repository A
- Team B heeft alleen toegang tot repository B
- CI/CD pipeline heeft toegang tot alle repositories
- Externe partners hebben alleen toegang tot specifieke repositories
---
## 🔍 Jouw Situatie Analyse
**Jouw setup:**
- 2 repositories: `cmdb-insight/backend` en `cmdb-insight/frontend`
- 20 gebruikers (klein team)
- Corporate tool (interne gebruikers)
- Productie omgeving
**Permission vereisten:**
- ✅ Alle teamleden hebben toegang tot beide repositories
- ✅ CI/CD pipeline heeft toegang tot beide repositories
- ✅ Geen per-repository verschillen nodig
- ✅ Eenvoudige beheer gewenst
**Conclusie:****RBAC Registry Permissions is perfect!**
---
## 📋 Checklist: Welke Keuze?
### Kies **RBAC Registry Permissions** als:
- [x] Je <50 gebruikers hebt ✅
- [x] Alle repositories dezelfde toegang hebben ✅
- [x] Je eenvoudige beheer wilt ✅
- [x] Je geen per-repository verschillen nodig hebt ✅
- [x] Je een klein tot middelgroot team hebt ✅
**→ Jouw situatie: ✅ Kies RBAC Registry Permissions!**
### Kies **RBAC Registry + ABAC Repository Permissions** als:
- [ ] Je >100 gebruikers hebt
- [ ] Verschillende repositories verschillende toegang nodig hebben
- [ ] Je granular control nodig hebt
- [ ] Je multi-tenant scenario hebt
- [ ] Je compliance requirements hebt die granular control vereisen
---
## 🔄 Kan Ik Later Wisselen?
**⚠️ Belangrijk:**
- Deze keuze is **permanent** en kan **niet meer worden gewijzigd** na aanmaken van de registry!
- Als je later ABAC nodig hebt, moet je een nieuwe registry aanmaken
**Aanbeveling:**
- Start met **RBAC Registry Permissions** (eenvoudigst)
- Als je later granular control nodig hebt, overweeg dan een nieuwe registry met ABAC
- Voor jouw situatie is RBAC Registry Permissions voldoende
---
## 💡 Permission Rollen (RBAC Registry Permissions)
Met RBAC Registry Permissions kun je deze rollen toewijzen:
### **AcrPull** (Lezen)
- Images pullen
- Voor: Developers, CI/CD pipelines
### **AcrPush** (Schrijven)
- Images pushen
- Voor: CI/CD pipelines, build servers
### **AcrDelete** (Verwijderen)
- Images verwijderen
- Voor: Administrators, cleanup scripts
### **Owner** (Volledig beheer)
- Alles + registry beheer
- Voor: Administrators
**Voor jouw situatie:**
- **Developers**: `AcrPull` (images pullen)
- **CI/CD Pipeline**: `AcrPush` (images pushen)
- **Administrators**: `Owner` (volledig beheer)
---
## 🎯 Mijn Aanbeveling
**Voor CMDB Insight:**
### ✅ **Kies RBAC Registry Permissions** ⭐
**Waarom:**
1.**Eenvoudig** - Minder complexiteit, makkelijker te beheren
2.**Voldoende** - Alle repositories hebben dezelfde toegang (wat je nodig hebt)
3.**Standaard** - Meest gebruikte optie, goed gedocumenteerd
4.**Perfect voor jouw situatie** - 20 gebruikers, 2 repositories, corporate tool
**Je hebt niet nodig:**
- ❌ Per-repository permissions (alle repositories hebben dezelfde toegang)
- ❌ Complexe permission structuur (klein team)
- ❌ Multi-tenant scenarios (corporate tool)
**Setup:**
1. Kies **RBAC Registry Permissions**
2. Wijs rollen toe aan gebruikers/groepen:
- Developers → `AcrPull`
- CI/CD → `AcrPush`
- Admins → `Owner`
**Klaar!**
---
## 📚 Meer Informatie
- [Azure Container Registry RBAC](https://learn.microsoft.com/en-us/azure/container-registry/container-registry-roles)
- [ACR Permissions Best Practices](https://learn.microsoft.com/en-us/azure/container-registry/container-registry-best-practices)
- [ABAC Repository Permissions](https://learn.microsoft.com/en-us/azure/container-registry/container-registry-repository-scoped-permissions)
---
## 🎯 Conclusie
**Kies: RBAC Registry Permissions**
Dit is de beste keuze voor:
- ✅ 20 gebruikers
- ✅ Corporate tool
- ✅ 2 repositories (backend + frontend)
- ✅ Eenvoudige beheer gewenst
- ✅ Alle repositories hebben dezelfde toegang
Je kunt altijd later een nieuwe registry aanmaken met ABAC als je granular control nodig hebt, maar voor jouw situatie is dat niet nodig.

View File

@@ -1,246 +0,0 @@
# Azure Container Registry - Pricing Plan Keuze
## 🎯 Aanbeveling voor Jouw Situatie
**Voor CMDB Insight (20 gebruikers, corporate tool, productie):**
### ✅ **Basic SKU** (Aanbevolen) ⭐
**Waarom:**
- ✅ Voldoende storage (10GB) voor meerdere versies
- ✅ Goedkoop (~€5/maand)
- ✅ Alle features die je nodig hebt
- ✅ Perfect voor kleine tot middelgrote teams
---
## 📊 SKU Vergelijking
### Basic SKU (~€5/maand) ⭐ **AANBEVOLEN**
**Inclusief:**
-**10GB storage** - Ruim voldoende voor backend + frontend images met meerdere versies
-**1GB/day webhook throughput** - Voldoende voor CI/CD
-**Unlimited pulls** - Geen extra kosten voor image pulls
-**Admin user enabled** - Voor development/productie
-**RBAC support** - Role-based access control
-**Content trust** - Image signing support
**Limitaties:**
- ❌ Geen geo-replicatie
- ❌ Geen security scanning (vulnerability scanning)
- ❌ Geen content trust storage
**Wanneer gebruiken:**
-**Jouw situatie** - 20 gebruikers, corporate tool
- ✅ Development en productie omgevingen
- ✅ Kleine tot middelgrote teams
- ✅ Budget-conscious deployments
**Voorbeeld kosten:**
- 2 images (backend + frontend)
- ~10 versies per image
- ~500MB per image = ~10GB totaal
- **Kosten: ~€5/maand** (alleen storage, geen extra pull kosten)
---
### Standard SKU (~€20/maand)
**Inclusief (alles van Basic +):**
-**100GB storage** - Voor grote deployments
-**10GB/day webhook throughput** - Voor hoge CI/CD volumes
-**Geo-replicatie** - Images repliceren naar meerdere regio's
-**Content trust storage** - Voor image signing
**Extra features:**
-**Better performance** - Snellere pulls voor geo-replicated images
-**Disaster recovery** - Images beschikbaar in meerdere regio's
**Wanneer gebruiken:**
- ✅ Grote deployments (>50GB images)
- ✅ Multi-region deployments nodig
- ✅ Hoge CI/CD volumes (>1GB/day)
- ✅ Disaster recovery requirements
**Voor jouw situatie:****Niet nodig** - Basic is voldoende
---
### Premium SKU (~€50/maand)
**Inclusief (alles van Standard +):**
-**500GB storage** - Voor zeer grote deployments
-**50GB/day webhook throughput** - Voor enterprise CI/CD
-**Security scanning** - Automatische vulnerability scanning
-**Advanced security features** - Firewall rules, private endpoints
-**Dedicated throughput** - Garantie op performance
**Extra features:**
-**Image vulnerability scanning** - Automatisch scannen op security issues
-**Private endpoints** - Volledig private connectivity
-**Firewall rules** - Network-level security
**Wanneer gebruiken:**
- ✅ Enterprise deployments
- ✅ Security compliance requirements (ISO 27001, etc.)
- ✅ Zeer grote deployments (>100GB)
- ✅ Multi-tenant scenarios
**Voor jouw situatie:****Niet nodig** - Overkill voor 20 gebruikers
---
## 💰 Kosten Breakdown
### Basic SKU (Aanbevolen) ⭐
**Maandelijkse kosten:**
- **Storage**: €0.167 per GB/maand
- **10GB storage**: ~€1.67/maand
- **Base fee**: ~€3-4/maand
- **Totaal**: ~€5/maand
**Voorbeeld voor jouw situatie:**
- Backend image: ~200MB
- Frontend image: ~50MB
- 10 versies per image: ~2.5GB
- **Ruim binnen 10GB limit** ✅
**Jaarlijkse kosten:** ~€60/jaar
---
### Standard SKU
**Maandelijkse kosten:**
- **Storage**: €0.167 per GB/maand (eerste 100GB)
- **100GB storage**: ~€16.70/maand
- **Base fee**: ~€3-4/maand
- **Totaal**: ~€20/maand
**Jaarlijkse kosten:** ~€240/jaar
**Voor jouw situatie:****Te duur** - Je gebruikt maar ~2.5GB
---
### Premium SKU
**Maandelijkse kosten:**
- **Storage**: €0.167 per GB/maand (eerste 500GB)
- **500GB storage**: ~€83.50/maand
- **Base fee**: ~€16.50/maand
- **Totaal**: ~€50-100/maand (afhankelijk van storage)
**Jaarlijkse kosten:** ~€600-1200/jaar
**Voor jouw situatie:****Veel te duur** - Niet nodig
---
## 📈 Wanneer Upgrade naar Standard/Premium?
### Upgrade naar Standard als:
- ✅ Je >50GB images hebt
- ✅ Je multi-region deployment nodig hebt
- ✅ Je >1GB/day webhook throughput nodig hebt
- ✅ Je disaster recovery nodig hebt
### Upgrade naar Premium als:
- ✅ Je security scanning nodig hebt (compliance)
- ✅ Je >100GB images hebt
- ✅ Je private endpoints nodig hebt
- ✅ Je enterprise security features nodig hebt
**Voor jouw situatie:** Start met **Basic**, upgrade later als nodig.
---
## 🔄 Upgrade/Downgrade
**Goed nieuws:**
- ✅ Je kunt altijd upgraden (Basic → Standard → Premium)
- ✅ Je kunt downgraden (Premium → Standard → Basic)
- ⚠️ **Let op**: Bij downgrade verlies je mogelijk data als je over de storage limit gaat
**Aanbeveling:**
- Start met **Basic**
- Monitor storage gebruik
- Upgrade alleen als je echt de extra features nodig hebt
---
## 📋 Checklist: Welke SKU?
### Kies Basic als:
- [x] Je <50GB images hebt ✅
- [x] Je <20 gebruikers hebt ✅
- [x] Je geen geo-replicatie nodig hebt ✅
- [x] Je geen security scanning nodig hebt ✅
- [x] Je budget-conscious bent ✅
**→ Jouw situatie: ✅ Kies Basic!**
### Kies Standard als:
- [ ] Je >50GB images hebt
- [ ] Je multi-region deployment nodig hebt
- [ ] Je disaster recovery nodig hebt
- [ ] Je >1GB/day webhook throughput nodig hebt
### Kies Premium als:
- [ ] Je security scanning nodig hebt (compliance)
- [ ] Je >100GB images hebt
- [ ] Je private endpoints nodig hebt
- [ ] Je enterprise security features nodig hebt
---
## 💡 Mijn Aanbeveling
**Voor CMDB Insight:**
### ✅ **Kies Basic SKU** ⭐
**Waarom:**
1.**Voldoende storage** - 10GB is ruim voldoende voor jouw 2 images met meerdere versies
2.**Kosteneffectief** - ~€5/maand vs €20-50/maand
3.**Alle features die je nodig hebt** - RBAC, content trust, unlimited pulls
4.**Eenvoudig** - Geen complexe configuratie nodig
5.**Upgrade mogelijk** - Je kunt altijd later upgraden als nodig
**Geschatte storage gebruik:**
- Backend: ~200MB × 10 versies = ~2GB
- Frontend: ~50MB × 10 versies = ~0.5GB
- **Totaal: ~2.5GB** (ruim binnen 10GB limit)
**Kosten:**
- **Maandelijks**: ~€5
- **Jaarlijks**: ~€60
- **Kosteneffectief** voor jouw use case
---
## 🎯 Conclusie
**Kies: Basic SKU**
Dit is de beste keuze voor:
- ✅ 20 gebruikers
- ✅ Corporate tool
- ✅ Productie omgeving
- ✅ Budget-conscious
- ✅ Eenvoudige setup
Je kunt altijd later upgraden naar Standard of Premium als je:
- Meer storage nodig hebt
- Geo-replicatie nodig hebt
- Security scanning nodig hebt
---
## 📚 Meer Informatie
- [Azure Container Registry Pricing](https://azure.microsoft.com/en-us/pricing/details/container-registry/)
- [ACR SKU Comparison](https://learn.microsoft.com/en-us/azure/container-registry/container-registry-skus)
- [ACR Storage Limits](https://learn.microsoft.com/en-us/azure/container-registry/container-registry-skus#sku-features-and-limits)

View File

@@ -1,317 +0,0 @@
# Shared Azure Container Registry Setup
Guide for using a shared Azure Container Registry across multiple applications.
## 🎯 Why Share ACR?
**Benefits:**
-**Cost Savings**: One ACR for all applications (€5-20/month vs multiple ACRs)
-**Centralized Management**: All images in one place
-**Easier Collaboration**: Teams can share images
-**Better Resource Utilization**: More efficient use of storage
**How it works:**
- ACR is shared, but each application uses a **unique repository name**
- Repository name (`cmdb-insight`) separates your app from others
- Images are organized by application: `acr.azurecr.io/app-name/service:tag`
## 📦 ACR Structure
```
zuyderlandacr.azurecr.io/
├── cmdb-insight/ ← This application
│ ├── backend:latest
│ ├── backend:v1.0.0
│ ├── frontend:latest
│ └── frontend:v1.0.0
├── other-app/ ← Another application
│ ├── api:latest
│ └── web:latest
└── shared-services/ ← Shared base images
├── nginx:latest
└── node:20-alpine
```
## 🔧 Setup Options
### Option 1: Use Existing ACR (Recommended)
If you already have an ACR for other applications:
```bash
# Set your existing ACR details
ACR_NAME="your-existing-acr"
ACR_RESOURCE_GROUP="rg-shared-services" # Or wherever your ACR is
# Verify it exists
az acr show --name $ACR_NAME --resource-group $ACR_RESOURCE_GROUP
# Get login server
ACR_LOGIN_SERVER=$(az acr show --name $ACR_NAME --resource-group $ACR_RESOURCE_GROUP --query loginServer --output tsv)
echo "ACR Login Server: $ACR_LOGIN_SERVER"
```
**That's it!** Your images will be stored as:
- `your-existing-acr.azurecr.io/cmdb-insight/backend:latest`
- `your-existing-acr.azurecr.io/cmdb-insight/frontend:latest`
### Option 2: Create New Shared ACR
If you don't have an ACR yet, create one that can be shared:
```bash
# Set variables
ACR_NAME="zuyderlandacr" # Recommended: company name + "acr"
ACR_RESOURCE_GROUP="rg-shared-services" # Shared resource group
LOCATION="westeurope"
SKU="Standard" # Recommended for shared ACR
# Create resource group for shared services
az group create --name $ACR_RESOURCE_GROUP --location $LOCATION
# Create ACR
az acr create \
--resource-group $ACR_RESOURCE_GROUP \
--name $ACR_NAME \
--sku $SKU \
--admin-enabled true
# Verify
az acr show --name $ACR_NAME --resource-group $ACR_RESOURCE_GROUP
```
## 🚀 Using Shared ACR
### Build and Push Images
```bash
# Set ACR name
export ACR_NAME="zuyderlandacr"
export REPO_NAME="cmdb-insight" # This is your app identifier
# Build and push (repository name separates your app)
./scripts/build-and-push-azure.sh
# Images will be:
# - zuyderlandacr.azurecr.io/cmdb-insight/backend:latest
# - zuyderlandacr.azurecr.io/cmdb-insight/frontend:latest
```
### Configure App Services
```bash
# Backend App Service
az webapp config container set \
--name cmdb-backend-prod \
--resource-group rg-cmdb-insight-prod \
--docker-custom-image-name "zuyderlandacr.azurecr.io/cmdb-insight/backend:latest" \
--docker-registry-server-url "https://zuyderlandacr.azurecr.io"
# Frontend App Service
az webapp config container set \
--name cmdb-frontend-prod \
--resource-group rg-cmdb-insight-prod \
--docker-custom-image-name "zuyderlandacr.azurecr.io/cmdb-insight/frontend:latest" \
--docker-registry-server-url "https://zuyderlandacr.azurecr.io"
```
### Update Pipeline Variables
In `azure-pipelines.yml`:
```yaml
variables:
acrName: 'yourcompanyacr' # Shared ACR name
repositoryName: 'cmdb-insight' # Your app repository name
# ... other variables
```
## 🔐 Permissions
### Grant App Services Access to Shared ACR
```bash
# Get App Service Managed Identity
BACKEND_PRINCIPAL_ID=$(az webapp identity show \
--name cmdb-backend-prod \
--resource-group rg-cmdb-insight-prod \
--query principalId --output tsv)
# Get ACR Resource ID (from shared resource group)
ACR_ID=$(az acr show \
--name zuyderlandacr \
--resource-group rg-shared-services \
--query id --output tsv)
# Grant AcrPull permission
az role assignment create \
--assignee $BACKEND_PRINCIPAL_ID \
--role AcrPull \
--scope $ACR_ID
```
## 📊 Managing Multiple Applications
### List All Repositories
```bash
# See all applications in ACR
az acr repository list --name zuyderlandacr
# Output:
# cmdb-insight
# other-app
# shared-services
```
### List Images for This App
```bash
# Backend images
az acr repository show-tags \
--name zuyderlandacr \
--repository cmdb-insight/backend
# Frontend images
az acr repository show-tags \
--name zuyderlandacr \
--repository cmdb-insight/frontend
```
### Clean Up Old Images
```bash
# Delete old tags (keep last 10)
az acr repository show-tags \
--name zuyderlandacr \
--repository cmdb-insight/backend \
--orderby time_desc \
--query '[10:].name' \
--output tsv | \
xargs -I {} az acr repository delete \
--name zuyderlandacr \
--image cmdb-insight/backend:{} \
--yes
```
## 💰 Cost Optimization
### Shared ACR Costs
| SKU | Storage | Cost | Best For |
|-----|---------|------|----------|
| Basic | 10GB | €5/month | Small teams, few apps |
| Standard | 100GB | €20/month | **Recommended for shared ACR** |
| Premium | 500GB | €50/month | Large organizations |
**Recommendation**: Use **Standard** SKU for shared ACR:
- Enough storage for multiple applications
- Geo-replication available
- Good balance of cost and features
### Cost Savings Example
**Without sharing:**
- App 1 ACR: €20/month
- App 2 ACR: €20/month
- App 3 ACR: €20/month
- **Total: €60/month**
**With shared ACR:**
- Shared ACR (Standard): €20/month
- **Total: €20/month**
- **Savings: €40/month (67%)**
## 🎯 Best Practices
### 1. Naming Convention
Use consistent repository naming:
- `app-name/service:tag` (e.g., `cmdb-insight/backend:latest`)
- Avoid generic names like `backend`, `frontend`
- Include app identifier in repository name
### 2. Resource Group Organization
**Option A: Separate Resource Groups**
```
rg-shared-services/
└── ACR (shared)
rg-cmdb-insight-prod/
└── App-specific resources
```
**Option B: Single Resource Group**
```
rg-production/
├── ACR
├── App 1 resources
├── App 2 resources
└── App 3 resources
```
### 3. Access Control
- Use **Managed Identity** for App Services (recommended)
- Grant **AcrPull** role (not AcrPush) to App Services
- Use **Service Principals** for CI/CD pipelines
- Consider **Azure RBAC** for fine-grained access
### 4. Image Tagging Strategy
```bash
# Use semantic versioning
cmdb-insight/backend:v1.0.0
cmdb-insight/backend:v1.0.1
cmdb-insight/backend:latest
# Use build IDs for CI/CD
cmdb-insight/backend:12345
cmdb-insight/backend:latest
```
## 🔄 Migration from Dedicated ACR
If you have a dedicated ACR and want to migrate to shared:
```bash
# 1. Tag images with new repository name
docker tag oldacr.azurecr.io/backend:latest newacr.azurecr.io/cmdb-insight/backend:latest
docker tag oldacr.azurecr.io/frontend:latest newacr.azurecr.io/cmdb-insight/frontend:latest
# 2. Push to shared ACR
docker push newacr.azurecr.io/cmdb-insight/backend:latest
docker push newacr.azurecr.io/cmdb-insight/frontend:latest
# 3. Update App Services
az webapp config container set \
--name cmdb-backend-prod \
--resource-group rg-cmdb-insight-prod \
--docker-custom-image-name "newacr.azurecr.io/cmdb-insight/backend:latest"
# 4. Update pipeline variables
# 5. Test deployment
# 6. Delete old ACR (after verification)
```
## 📚 Related Documentation
- **`AZURE-NEW-SUBSCRIPTION-SETUP.md`** - Complete Azure setup guide
- **`AZURE-CONTAINER-REGISTRY.md`** - ACR setup and usage
- **`AZURE-PIPELINE-DEPLOYMENT.md`** - Automated deployment
## ✅ Checklist
- [ ] Decide: Use existing ACR or create new shared ACR
- [ ] Verify ACR exists or create new one
- [ ] Update pipeline variables with ACR name
- [ ] Grant App Services access to ACR
- [ ] Build and push images with repository name `cmdb-insight`
- [ ] Configure App Services to use shared ACR
- [ ] Test deployment
- [ ] Document ACR name for team
---
**💡 Remember**: The repository name (`cmdb-insight`) is what separates your application from others in the shared ACR!

View File

@@ -1,451 +0,0 @@
# Azure Container Registry - Docker Images Build & Push Guide
Deze guide beschrijft hoe je Docker images bouwt en naar Azure Container Registry (ACR) pusht voor de CMDB Insight applicatie.
## 📋 Inhoudsopgave
1. [Azure Container Registry Setup](#azure-container-registry-setup)
2. [Lokale Build & Push](#lokale-build--push)
3. [Azure DevOps Pipeline](#azure-devops-pipeline)
4. [Docker Compose Configuration](#docker-compose-configuration)
5. [Best Practices](#best-practices)
---
## 🔧 Azure Container Registry Setup
### 1. Azure Container Registry Aanmaken
Als je nog geen ACR hebt, maak er een aan via Azure Portal of Azure CLI:
```bash
# Resource group (als nog niet bestaat)
az group create --name rg-cmdb-gui --location westeurope
# Azure Container Registry aanmaken
az acr create \
--resource-group rg-cmdb-gui \
--name zuyderlandcmdbacr \
--sku Basic \
--admin-enabled true
```
**ACR SKU Opties:**
- **Basic**: Geschikt voor development/test (~€5/maand)
- **Standard**: Voor productie met geo-replicatie (~€20/maand)
- **Premium**: Voor enterprise met security features (~€50/maand)
### 2. Registry URL
Na aanmaken is je registry beschikbaar op:
```
<acr-name>.azurecr.io
```
Bijvoorbeeld: `zuyderlandcmdbacr.azurecr.io`
### 3. Authentication
ACR ondersteunt meerdere authenticatiemethoden:
**A) Admin Credentials (Eenvoudig, voor development)**
```bash
# Admin credentials ophalen
az acr credential show --name zuyderlandcmdbacr
# Login met Docker
az acr login --name zuyderlandcmdbacr
# OF
docker login zuyderlandcmdbacr.azurecr.io -u <admin-username> -p <admin-password>
```
**B) Azure Service Principal (Aanbevolen voor CI/CD)**
```bash
# Service Principal aanmaken
az ad sp create-for-rbac --name "zuyderland-cmdb-acr-sp" --role acrpull --scopes /subscriptions/<subscription-id>/resourceGroups/rg-cmdb-gui/providers/Microsoft.ContainerRegistry/registries/zuyderlandcmdbacr
# Gebruik de output credentials in CI/CD
```
**C) Managed Identity (Best voor Azure services)**
- Gebruik Managed Identity voor Azure DevOps, App Service, etc.
- Configureer via Azure Portal → ACR → Access Control (IAM)
---
## 🐳 Lokale Build & Push
### Optie 1: Met Script (Aanbevolen)
Gebruik het `build-and-push-azure.sh` script:
```bash
# Maak script uitvoerbaar
chmod +x scripts/build-and-push-azure.sh
# Build en push (gebruikt 'latest' als versie)
./scripts/build-and-push-azure.sh
# Build en push met specifieke versie
./scripts/build-and-push-azure.sh 1.0.0
```
**Environment Variables:**
```bash
export ACR_NAME="zuyderlandcmdbacr"
export REPO_NAME="cmdb-insight"
./scripts/build-and-push-azure.sh 1.0.0
```
### Optie 2: Handmatig met Docker Commands
```bash
# Login
az acr login --name zuyderlandcmdbacr
# Set variabelen
ACR_NAME="zuyderlandcmdbacr"
REGISTRY="${ACR_NAME}.azurecr.io"
REPO_NAME="cmdb-insight"
VERSION="1.0.0"
# Build backend
docker build -t ${REGISTRY}/${REPO_NAME}/backend:${VERSION} \
-t ${REGISTRY}/${REPO_NAME}/backend:latest \
-f backend/Dockerfile.prod ./backend
# Build frontend
docker build -t ${REGISTRY}/${REPO_NAME}/frontend:${VERSION} \
-t ${REGISTRY}/${REPO_NAME}/frontend:latest \
-f frontend/Dockerfile.prod ./frontend
# Push images
docker push ${REGISTRY}/${REPO_NAME}/backend:${VERSION}
docker push ${REGISTRY}/${REPO_NAME}/backend:latest
docker push ${REGISTRY}/${REPO_NAME}/frontend:${VERSION}
docker push ${REGISTRY}/${REPO_NAME}/frontend:latest
```
---
## 🚀 Azure DevOps Pipeline
### 1. Service Connection Aanmaken
In Azure DevOps:
1. **Project Settings****Service connections****New service connection**
2. Kies **Docker Registry**
3. Kies **Azure Container Registry**
4. Selecteer je Azure subscription en ACR
5. Geef een naam: `zuyderland-cmdb-acr-connection`
### 2. Pipeline Configuratie
Het project bevat al een `azure-pipelines.yml` bestand. Configureer deze in Azure DevOps:
1. **Pipelines****New pipeline**
2. Kies je repository (Azure Repos)
3. Kies **Existing Azure Pipelines YAML file**
4. Selecteer `azure-pipelines.yml`
5. Review en run
### 3. Pipeline Variabelen Aanpassen
Pas de variabelen in `azure-pipelines.yml` aan naar jouw instellingen:
```yaml
variables:
acrName: 'zuyderlandcmdbacr' # Jouw ACR naam
repositoryName: 'cmdb-insight'
dockerRegistryServiceConnection: 'zuyderland-cmdb-acr-connection'
```
### 4. Automatische Triggers
De pipeline triggert automatisch bij:
- Push naar `main` branch
- Tags die beginnen met `v*` (bijv. `v1.0.0`)
**Handmatig Triggeren:**
```bash
# Tag aanmaken en pushen
git tag v1.0.0
git push origin v1.0.0
```
---
## 📦 Docker Compose Configuration
### Productie Docker Compose met ACR
Maak `docker-compose.prod.acr.yml`:
```yaml
version: '3.8'
services:
backend:
image: zuyderlandcmdbacr.azurecr.io/cmdb-insight/backend:latest
environment:
- NODE_ENV=production
- PORT=3001
env_file:
- .env.production
volumes:
- backend_data:/app/data
restart: unless-stopped
networks:
- internal
healthcheck:
test: ["CMD", "node", "-e", "require('http').get('http://localhost:3001/health', (r) => {process.exit(r.statusCode === 200 ? 0 : 1)})"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
frontend:
image: zuyderlandcmdbacr.azurecr.io/cmdb-insight/frontend:latest
depends_on:
- backend
restart: unless-stopped
networks:
- internal
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost/"]
interval: 30s
timeout: 10s
retries: 3
nginx:
image: nginx:alpine
ports:
- "80:80"
- "443:443"
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro
- ./nginx/ssl:/etc/nginx/ssl:ro
- nginx_cache:/var/cache/nginx
depends_on:
- frontend
- backend
restart: unless-stopped
networks:
- internal
volumes:
backend_data:
nginx_cache:
networks:
internal:
driver: bridge
```
### Gebruik Specifieke Versies
Voor productie deployments, gebruik specifieke versies:
```yaml
backend:
image: zuyderlandcmdbacr.azurecr.io/cmdb-insight/backend:v1.0.0
frontend:
image: zuyderlandcmdbacr.azurecr.io/cmdb-insight/frontend:v1.0.0
```
### Pull en Deploy
```bash
# Login (als nodig)
az acr login --name zuyderlandcmdbacr
# Pull images
docker-compose -f docker-compose.prod.acr.yml pull
# Deploy
docker-compose -f docker-compose.prod.acr.yml up -d
# Status checken
docker-compose -f docker-compose.prod.acr.yml ps
# Logs bekijken
docker-compose -f docker-compose.prod.acr.yml logs -f
```
---
## 🎯 Best Practices
### 1. Versioning
- **Gebruik semantic versioning**: `v1.0.0`, `v1.0.1`, etc.
- **Tag altijd als `latest`**: Voor development/CI/CD
- **Productie**: Gebruik specifieke versies, nooit `latest`
```bash
# Tag met versie
git tag v1.0.0
git push origin v1.0.0
# Build met versie
./scripts/build-and-push-azure.sh 1.0.0
```
### 2. Security
- **Admin credentials uitschakelen** in productie (gebruik Service Principal)
- **Enable Content Trust** voor image signing (optioneel)
- **Scan images** voor vulnerabilities (Azure Security Center)
```bash
# Admin uitschakelen
az acr update --name zuyderlandcmdbacr --admin-enabled false
```
### 3. Image Cleanup
ACR heeft een retention policy voor oude images:
```bash
# Retention policy instellen (bijv. laatste 10 tags behouden)
az acr repository show-tags --name zuyderlandcmdbacr --repository cmdb-insight/backend --orderby time_desc --top 10
# Oude tags verwijderen (handmatig of via policy)
az acr repository delete --name zuyderlandcmdbacr --image cmdb-insight/backend:old-tag
```
### 4. Multi-Stage Builds
De `Dockerfile.prod` bestanden gebruiken al multi-stage builds voor kleinere images.
### 5. Build Cache
Voor snellere builds, gebruik build cache:
```bash
# Build met cache
docker build --cache-from zuyderlandcmdbacr.azurecr.io/cmdb-insight/backend:latest \
-t zuyderlandcmdbacr.azurecr.io/cmdb-insight/backend:new-tag \
-f backend/Dockerfile.prod ./backend
```
---
## 🔍 Troubleshooting
### Authentication Issues
```bash
# Check Azure login
az account show
# Re-login
az login
az acr login --name zuyderlandcmdbacr
# Check Docker login
cat ~/.docker/config.json
```
### Build Errors
```bash
# Build met verbose output
docker build --progress=plain -t test-image -f backend/Dockerfile.prod ./backend
# Check lokale images
docker images | grep cmdb-insight
```
### Push Errors
```bash
# Check ACR connectivity
az acr check-health --name zuyderlandcmdbacr
# Check repository exists
az acr repository list --name zuyderlandcmdbacr
# View repository tags
az acr repository show-tags --name zuyderlandcmdbacr --repository cmdb-insight/backend
```
### Azure DevOps Pipeline Errors
- Check **Service Connection** permissions
- Verify **ACR naam** in pipeline variables
- Check **Dockerfile paths** zijn correct
- Review pipeline logs in Azure DevOps
---
## 📝 Usage Examples
### Eenvoudige Workflow
```bash
# 1. Code aanpassen en committen
git add .
git commit -m "Update feature"
git push origin main
# 2. Build en push naar ACR
./scripts/build-and-push-azure.sh
# 3. Deploy (op productie server)
az acr login --name zuyderlandcmdbacr
docker-compose -f docker-compose.prod.acr.yml pull
docker-compose -f docker-compose.prod.acr.yml up -d
```
### Versioned Release
```bash
# 1. Tag release
git tag v1.0.0
git push origin v1.0.0
# 2. Build en push met versie
./scripts/build-and-push-azure.sh 1.0.0
# 3. Update docker-compose met versie
# Edit docker-compose.prod.acr.yml: image: ...backend:v1.0.0
# 4. Deploy
docker-compose -f docker-compose.prod.acr.yml pull
docker-compose -f docker-compose.prod.acr.yml up -d
```
### Azure DevOps Automated
1. Push code naar `main` → Pipeline triggert automatisch
2. Pipeline bouwt images en pusht naar ACR
3. Deploy handmatig of via release pipeline
---
## 📚 Additional Resources
- [Azure Container Registry Documentation](https://docs.microsoft.com/en-us/azure/container-registry/)
- [Azure DevOps Docker Task](https://docs.microsoft.com/en-us/azure/devops/pipelines/tasks/build/docker)
- [ACR Best Practices](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-best-practices)
- [Docker Compose Production Guide](./PRODUCTION-DEPLOYMENT.md)
---
## 🔄 Vergelijking: Gitea vs Azure Container Registry
| Feature | Gitea Registry | Azure Container Registry |
|---------|---------------|-------------------------|
| **Kosten** | Gratis (met Gitea) | €5-50/maand (afhankelijk van SKU) |
| **Security** | Basic | Enterprise-grade (RBAC, scanning) |
| **CI/CD** | Gitea Actions | Azure DevOps, GitHub Actions |
| **Geo-replicatie** | Nee | Ja (Standard/Premium) |
| **Image Scanning** | Nee | Ja (Azure Security Center) |
| **Integratie** | Gitea ecosystem | Azure ecosystem (App Service, AKS, etc.) |
**Aanbeveling:**
- **Development/Test**: Gitea Registry (gratis, eenvoudig)
- **Productie**: Azure Container Registry (security, enterprise features)

View File

@@ -1,272 +0,0 @@
# Azure Deployment - Infrastructure Samenvatting
## Applicatie Overzicht
**CMDB Insight** - Web applicatie voor classificatie en beheer van applicatiecomponenten in Jira Assets.
### Technologie Stack
- **Backend**: Node.js 20 (Express, TypeScript)
- **Frontend**: React 18 (Vite, TypeScript)
- **Database**: SQLite (cache layer, ~20MB, geen backup nodig - sync vanuit Jira)
- **Containerization**: Docker
- **Authentication**: Jira OAuth 2.0 of Personal Access Token
- **Gebruikers**: Max. 20 collega's
---
## Infrastructure Vereisten
### 1. Compute Resources
**Aanbevolen: Azure App Service (Basic Tier)**
- **App Service Plan**: B1 (1 vCPU, 1.75GB RAM) - **voldoende voor 20 gebruikers**
- 2 Web Apps: Backend + Frontend (deel dezelfde App Service Plan)
- **Kosten**: ~€15-25/maand
- **Voordelen**: Eenvoudig, managed service, voldoende voor kleine teams
**Alternatief: Azure Container Instances (ACI) - Als je containers prefereert**
- 2 containers: Backend + Frontend
- Backend: 1 vCPU, 2GB RAM
- Frontend: 0.5 vCPU, 1GB RAM
- **Kosten**: ~€30-50/maand
- **Nadeel**: Minder managed features dan App Service
### 2. Database & Storage
**Optie A: PostgreSQL (Aanbevolen) ⭐**
- **Azure Database for PostgreSQL**: Flexible Server Basic tier (B1ms)
- **Database**: ~20MB (huidige grootte, ruimte voor groei)
- **Kosten**: ~€20-30/maand
- **Voordelen**: Identieke dev/prod stack, betere concurrency, connection pooling
**Optie B: SQLite (Huidige situatie)**
- **SQLite Database**: ~20MB (in Azure Storage)
- **Azure Storage Account**: Standard LRS (Hot tier)
- **Kosten**: ~€1-3/maand
- **Nadelen**: Beperkte concurrency, geen connection pooling
**Logs**: ~500MB-1GB/maand (Application Insights)
### 3. Networking
**Vereisten:**
- **HTTPS**: SSL/TLS certificaat (Let's Encrypt of Azure App Service Certificate)
- **DNS**: Subdomain (bijv. `cmdb.zuyderland.nl`)
- **Firewall**: Inbound poorten 80/443, outbound naar Jira API
- **Load Balancer**: Azure Application Gateway (optioneel, voor HA)
**Network Security:**
- Private endpoints (optioneel, voor extra security)
- Network Security Groups (NSG)
- Azure Firewall (optioneel)
### 4. Secrets Management
**Azure Key Vault** voor:
- `JIRA_OAUTH_CLIENT_SECRET`
- `SESSION_SECRET`
- `ANTHROPIC_API_KEY`
- `JIRA_PAT` (indien gebruikt)
**Kosten**: ~€1-5/maand
### 5. Monitoring & Logging
**Azure Monitor:**
- Application Insights (Basic tier - gratis tot 5GB/maand)
- Log Analytics Workspace (Pay-as-you-go)
- Alerts voor health checks, errors
**Kosten**: ~€0-20/maand (met Basic tier vaak gratis voor kleine apps)
### 6. Backup & Disaster Recovery
**Geen backup vereist** - Data wordt gesynchroniseerd vanuit Jira Assets, dus backup is niet nodig.
De SQLite database is een cache layer die opnieuw opgebouwd kan worden via sync.
---
## Deployment Architectuur
### Aanbevolen: Azure App Service (Basic Tier)
**Eenvoudige setup voor kleine teams (20 gebruikers):**
```
┌─────────────────────────────────────┐
│ Azure App Service (B1 Plan) │
│ │
│ ┌──────────┐ ┌──────────┐ │
│ │ Frontend │ │ Backend │ │
│ │ Web App │ │ Web App │ │
│ └──────────┘ └────┬─────┘ │
└─────────────────────────┼──────────┘
┌─────────────┴─────────────┐
│ │
┌───────▼──────┐ ┌────────────▼────┐
│ Azure Storage│ │ Azure Key Vault │
│ (SQLite DB) │ │ (Secrets) │
└──────────────┘ └─────────────────┘
┌───────▼──────┐
│ Application │
│ Insights │
│ (Basic/FREE) │
└──────────────┘
```
**Opmerking**: Application Gateway is niet nodig voor 20 gebruikers - App Service heeft ingebouwde SSL en load balancing.
---
## Security Overwegingen
### 1. Authentication
- **Jira OAuth 2.0**: Gebruikers authenticeren via Jira
- **Session Management**: Sessions in-memory (overweeg Azure Redis Cache voor productie)
### 2. Network Security
- **HTTPS Only**: Alle verkeer via HTTPS
- **CORS**: Alleen toegestaan vanuit geconfigureerde frontend URL
- **Rate Limiting**: 100 requests/minuut per IP (configureerbaar)
### 3. Data Security
- **Secrets**: Alle secrets in Azure Key Vault
- **Database**: SQLite database in Azure Storage (encrypted at rest)
- **In Transit**: TLS 1.2+ voor alle communicatie
### 4. Compliance
- **Logging**: Alle API calls gelogd (geen PII)
- **Audit Trail**: Wijzigingen aan applicaties gelogd
- **Data Residency**: Data blijft in Azure West Europe (of gewenste regio)
---
## Externe Dependencies
### 1. Jira Assets API
- **Endpoint**: `https://jira.zuyderland.nl`
- **Authentication**: OAuth 2.0 of Personal Access Token
- **Rate Limits**: Respecteer Jira API rate limits
- **Network**: Outbound HTTPS naar Jira (poort 443)
### 2. AI API (Optioneel)
- **Anthropic Claude API**: Voor AI classificatie features
- **Network**: Outbound HTTPS naar `api.anthropic.com`
---
## Deployment Stappen
### 1. Azure Resources Aanmaken
```bash
# Resource Group
az group create --name rg-cmdb-gui --location westeurope
# App Service Plan (Basic B1 - voldoende voor 20 gebruikers)
az appservice plan create --name plan-cmdb-gui --resource-group rg-cmdb-gui --sku B1
# Web Apps (delen dezelfde plan - kostenbesparend)
az webapp create --name cmdb-backend --resource-group rg-cmdb-gui --plan plan-cmdb-gui
az webapp create --name cmdb-frontend --resource-group rg-cmdb-gui --plan plan-cmdb-gui
# Key Vault
az keyvault create --name kv-cmdb-gui --resource-group rg-cmdb-gui --location westeurope
# Storage Account (voor SQLite database - alleen bij SQLite optie)
az storage account create --name stcmdbgui --resource-group rg-cmdb-gui --location westeurope --sku Standard_LRS
```
**Met PostgreSQL (Aanbevolen):**
```bash
# PostgreSQL Database (Flexible Server)
az postgres flexible-server create \
--resource-group rg-cmdb-gui \
--name psql-cmdb-gui \
--location westeurope \
--admin-user cmdbadmin \
--admin-password <secure-password-from-key-vault> \
--sku-name Standard_B1ms \
--tier Burstable \
--storage-size 32 \
--version 15
# Database aanmaken
az postgres flexible-server db create \
--resource-group rg-cmdb-gui \
--server-name psql-cmdb-gui \
--database-name cmdb
```
### 2. Configuration
- Environment variabelen via App Service Configuration
- Secrets via Key Vault references
- SSL certificaat via App Service Certificate of Let's Encrypt
### 3. CI/CD
- **Azure DevOps Pipelines** of **GitHub Actions**
- Automatische deployment bij push naar main branch
- Deployment slots voor zero-downtime updates
---
## Kosten Schatting (Maandelijks)
**Voor 20 gebruikers - Basic Setup:**
**Met SQLite (huidige setup):**
| Component | Schatting |
|-----------|-----------|
| App Service Plan (B1) | €15-25 |
| Storage Account | €1-3 |
| Key Vault | €1-2 |
| Application Insights (Basic) | €0-5 |
| **Totaal** | **€17-35/maand** |
**Met PostgreSQL (aanbevolen):**
| Component | Schatting |
|-----------|-----------|
| App Service Plan (B1) | €15-25 |
| PostgreSQL Database (B1ms) | €20-30 |
| Key Vault | €1-2 |
| Application Insights (Basic) | €0-5 |
| **Totaal** | **€36-62/maand** |
*Inclusief: SSL certificaat (gratis via App Service), basis monitoring*
**Opmerking**: Met Basic tier en gratis Application Insights kan dit zelfs onder €20/maand blijven.
**Backup**: Niet nodig - data wordt gesynchroniseerd vanuit Jira Assets.
---
## Vragen voor Infrastructure Team
1. **DNS & Domain**: Kunnen we een subdomain krijgen? (bijv. `cmdb.zuyderland.nl`)
2. **SSL Certificaat**: Azure App Service Certificate of Let's Encrypt via certbot?
3. **Network**: Moeten we via VPN/ExpressRoute of direct internet toegang?
4. **Firewall Rules**: Welke outbound toegang is nodig? (Jira API, Anthropic API)
5. **Monitoring**: Gebruiken we bestaande Azure Monitor setup of aparte workspace?
6. **Backup**: Niet nodig - SQLite database is cache layer, data wordt gesynchroniseerd vanuit Jira Assets
7. **Disaster Recovery**: Data kan opnieuw gesynchroniseerd worden vanuit Jira (geen backup vereist)
8. **Compliance**: Zijn er specifieke compliance requirements? (ISO 27001, NEN 7510)
9. **Scaling**: Niet nodig - max. 20 gebruikers, Basic tier is voldoende
10. **Maintenance Windows**: Wanneer kunnen we updates deployen?
---
## Next Steps
1. **Kick-off Meeting**: Bespreken architectuur en requirements
2. **Proof of Concept**: Deploy naar Azure App Service (test environment)
3. **Security Review**: Security team review van configuratie
4. **Load Testing**: Testen onder verwachte load
5. **Production Deployment**: Go-live met monitoring
---
## Contact & Documentatie
- **Application Code**: [Git Repository]
- **Deployment Guide**: `PRODUCTION-DEPLOYMENT.md`
- **API Documentation**: `/api/config` endpoint

View File

@@ -1,337 +0,0 @@
# Azure Pipeline Automated Deployment Guide
Complete guide for setting up automated deployment from Azure DevOps Pipeline to Azure App Services.
## 📋 Overview
The enhanced `azure-pipelines.yml` now includes:
-**Build Stage**: Builds and pushes Docker images to ACR
-**Deploy Stage**: Automatically deploys to Azure App Services
-**Verification**: Health checks after deployment
## 🚀 Quick Setup
### Step 1: Configure Pipeline Variables
Update the variables in `azure-pipelines.yml`:
```yaml
variables:
# Azure Container Registry
acrName: 'zdlas' # Your ACR name
repositoryName: 'cmdb-insight'
dockerRegistryServiceConnection: 'zuyderland-cmdb-acr-connection'
# Azure App Service
resourceGroup: 'rg-cmdb-insight-prod' # Your resource group
backendAppName: 'cmdb-backend-prod' # Your backend app name
frontendAppName: 'cmdb-frontend-prod' # Your frontend app name
azureSubscription: 'zuyderland-cmdb-subscription' # Azure service connection
# Deployment
deployToProduction: true # Set false to skip deployment
useDeploymentSlots: false # Set true for zero-downtime deployment
```
### Step 2: Create Azure Service Connection
You need an Azure service connection for App Service deployment:
1. **Go to Azure DevOps** → Your Project
2. **Project Settings****Service connections****New service connection**
3. Choose **Azure Resource Manager**
4. Select:
- **Authentication method**: Managed identity (recommended) or Service principal
- **Azure subscription**: Your subscription
- **Resource group**: Your resource group (optional)
5. **Service connection name**: `zuyderland-cmdb-subscription` (match the variable name)
6. Click **Save**
### Step 3: Configure Environment
The pipeline uses an `environment` called `production`:
1. **Go to Pipelines****Environments**
2. Click **Create environment**
3. Name: `production`
4. Add **Approvals and checks** (optional):
- **Approvals**: Require manual approval before deployment
- **Gate**: Health checks before deployment
### Step 4: Run Pipeline
The pipeline will automatically:
1. Build Docker images
2. Push to ACR
3. Deploy to App Services
4. Verify deployment
**Trigger automatically on:**
- Push to `main` branch
- Git tags starting with `v*`
## 🔧 Configuration Options
### Enable/Disable Deployment
To skip deployment (only build images):
```yaml
variables:
deployToProduction: false
```
Or use pipeline variables in Azure DevOps:
1. Go to **Pipelines** → Your pipeline → **Edit**
2. Click **Variables**
3. Add variable: `deployToProduction` = `false`
### Use Specific Image Tag
By default, the pipeline deploys the `latest` tag. To deploy a specific version:
```yaml
# In the Deploy stage, change:
containers: '$(acrName).azurecr.io/$(repositoryName)/backend:$(imageTag)'
```
This will deploy the specific build ID instead of `latest`.
## 🎯 Zero-Downtime Deployment (Deployment Slots)
For production deployments without downtime, use deployment slots:
### Step 1: Create Deployment Slots
```bash
# Create staging slot for backend
az webapp deployment slot create \
--name cmdb-backend-prod \
--resource-group rg-cmdb-insight-prod \
--slot staging
# Create staging slot for frontend
az webapp deployment slot create \
--name cmdb-frontend-prod \
--resource-group rg-cmdb-insight-prod \
--slot staging
```
### Step 2: Update Pipeline for Slots
Create `azure-pipelines-slots.yml` (see advanced example below) or modify the existing pipeline:
```yaml
- task: AzureWebAppContainer@1
displayName: 'Deploy to Staging Slot'
inputs:
azureSubscription: '$(azureSubscription)'
appName: '$(backendAppName)'
deployToSlotOrASE: true
resourceGroupName: '$(resourceGroup)'
slotName: 'staging'
containers: '$(acrName).azurecr.io/$(repositoryName)/backend:latest'
- task: AzureCLI@2
displayName: 'Swap Staging to Production'
inputs:
azureSubscription: '$(azureSubscription)'
scriptType: 'bash'
scriptLocation: 'inlineScript'
inlineScript: |
az webapp deployment slot swap \
--name $(backendAppName) \
--resource-group $(resourceGroup) \
--slot staging \
--target-slot production
```
## 📊 Pipeline Stages
### Stage 1: Build
**What it does:**
- Builds backend Docker image
- Builds frontend Docker image
- Pushes both to ACR with tags: `$(Build.BuildId)` and `latest`
**Output:**
- `backendImage`: Full image URL for backend
- `frontendImage`: Full image URL for frontend
### Stage 2: Deploy
**What it does:**
- Deploys backend container to App Service
- Deploys frontend container to App Service
- Restarts both App Services
- Verifies deployment with health checks
**Conditions:**
- Only runs if `deployToProduction = true`
- Only runs if Build stage succeeded
### Stage 3: Verify
**What it does:**
- Checks backend health endpoint (`/api/health`)
- Checks frontend accessibility
- Reports status
## 🔐 Permissions Required
The Azure service connection needs:
1. **App Service Contributor** role on:
- Backend App Service
- Frontend App Service
- App Service Plan
2. **ACR Pull** permissions (if using Managed Identity):
- Already configured via Managed Identity on App Services
### Grant Permissions
```bash
# Get service principal ID from Azure DevOps service connection
# Then grant permissions:
az role assignment create \
--assignee <service-principal-id> \
--role "Website Contributor" \
--scope /subscriptions/<subscription-id>/resourceGroups/rg-cmdb-insight-prod
```
## 🛠️ Troubleshooting
### Deployment Fails: "Service connection not found"
**Solution:**
- Verify service connection name matches `azureSubscription` variable
- Check service connection exists in Project Settings → Service connections
- Verify service connection has correct permissions
### Deployment Fails: "App Service not found"
**Solution:**
- Verify `backendAppName` and `frontendAppName` variables are correct
- Check `resourceGroup` variable matches your resource group
- Verify App Services exist in Azure
### Images Not Updating
**Solution:**
- Check if images were pushed to ACR successfully
- Verify App Service is pulling from correct ACR
- Check container settings in App Service configuration
- Ensure Managed Identity has ACR pull permissions
### Health Check Fails
**Solution:**
- Wait longer (apps may need time to start)
- Check App Service logs: `az webapp log tail`
- Verify health endpoint exists: `/api/health`
- Check environment variables are configured correctly
## 📝 Manual Deployment (Alternative)
If you prefer manual deployment after pipeline builds:
```bash
# After pipeline builds images, manually deploy:
# Restart backend to pull latest image
az webapp restart \
--name cmdb-backend-prod \
--resource-group rg-cmdb-insight-prod
# Restart frontend to pull latest image
az webapp restart \
--name cmdb-frontend-prod \
--resource-group rg-cmdb-insight-prod
```
## 🎯 Best Practices
### 1. Use Deployment Slots for Production
- Deploy to staging slot first
- Test in staging
- Swap to production when ready
### 2. Use Specific Tags for Production
Instead of `latest`, use version tags:
```yaml
containers: '$(acrName).azurecr.io/$(repositoryName)/backend:v1.0.0'
```
### 3. Add Approvals for Production
Configure environment approvals:
- Go to **Pipelines****Environments****production**
- Add **Approvals** check
- Require manual approval before deployment
### 4. Monitor Deployments
- Set up alerts in Application Insights
- Monitor pipeline runs
- Check deployment logs regularly
### 5. Rollback Strategy
If deployment fails:
```bash
# Rollback to previous image
az webapp config container set \
--name cmdb-backend-prod \
--resource-group rg-cmdb-insight-prod \
--docker-custom-image-name <previous-image-tag>
```
## 🔄 Workflow Example
### Typical Development Workflow
1. **Developer pushes code** to `main` branch
2. **Pipeline triggers automatically**
3. **Build stage**: Builds and pushes images
4. **Deploy stage**: Deploys to App Services
5. **Verify stage**: Checks health
6. **Application updated** - ready to use!
### Release Workflow
1. **Create release tag**: `git tag v1.0.0 && git push origin v1.0.0`
2. **Pipeline triggers** with tag
3. **Build stage**: Builds versioned images (`v1.0.0`)
4. **Deploy stage**: Deploys to staging slot
5. **Manual approval** (if configured)
6. **Swap to production**: Zero-downtime deployment
7. **Verify**: Health checks confirm success
## 📚 Related Documentation
- **`AZURE-NEW-SUBSCRIPTION-SETUP.md`** - Initial Azure setup
- **`AZURE-APP-SERVICE-DEPLOYMENT.md`** - Manual deployment guide
- **`AZURE-CONTAINER-REGISTRY.md`** - ACR setup
- **`AZURE-DEVOPS-SETUP.md`** - Pipeline setup basics
## ✅ Checklist
- [ ] Azure service connection created
- [ ] Pipeline variables configured
- [ ] Environment `production` created
- [ ] App Services exist in Azure
- [ ] Permissions configured
- [ ] Pipeline tested successfully
- [ ] Deployment verified
- [ ] Health checks passing
---
**🎉 Your automated deployment pipeline is ready!**
Every push to `main` will now automatically build and deploy your application.

View File

@@ -1,165 +0,0 @@
# Fix: MSI Authentication Error in Azure Pipeline
## 🔴 Error Message
```
Could not fetch access token for Managed Service Principal.
Please configure Managed Service Identity (MSI) for virtual machine
```
## 🎯 Quick Fix
De service connection gebruikt Managed Service Identity (MSI), wat niet werkt met Azure DevOps Services (cloud).
**Oplossing: Herconfigureer de service connection met Service Principal authenticatie.**
---
## ✅ Stap-voor-Stap Oplossing
### Stap 1: Verwijder Bestaande Service Connection
1. Ga naar **Azure DevOps**
2. **Project Settings** (onderaan links) → **Service connections**
3. Zoek: `zuyderland-cmdb-acr-connection`
4. Klik op **...** (three dots menu) → **Delete**
5. Bevestig verwijdering
### Stap 2: Maak Nieuwe Service Connection
1. **Project Settings****Service connections****New service connection**
2. Kies **"Docker Registry"**
3. Kies **"Azure Container Registry"**
4. **⚠️ BELANGRIJK**: Zorg dat **"Service Principal"** is geselecteerd als Authentication type
- **NIET** "Managed Identity" of "Workload Identity federation"
- **WEL** "Service Principal" (standaard optie)
5. Vul in:
- **Azure subscription**: Selecteer je subscription
- **Azure container registry**: Selecteer `zdlasacr` uit de dropdown
- **Service connection name**: `zuyderland-cmdb-acr-connection`
- **Description**: Optioneel
6. Klik **"Save"** (of **"Verify and save"**)
### Stap 3: Test Pipeline
1. Ga naar **Pipelines** → Je pipeline
2. Klik **"Run pipeline"**
3. De pipeline zou nu moeten werken!
---
## 🔧 Alternatief: Gebruik "Others" Optie
Als de Azure Container Registry optie nog steeds problemen geeft:
### Stap 1: Verwijder Bestaande Service Connection
(Zelfde als hierboven)
### Stap 2: Maak Service Connection met "Others"
1. **Project Settings****Service connections****New service connection**
2. Kies **"Docker Registry"**
3. Kies **"Others"** (in plaats van "Azure Container Registry")
4. Vul handmatig in:
- **Docker Registry**: `zdlasacr.azurecr.io`
- **Docker ID**: (ACR admin username)
- **Docker Password**: (ACR admin password)
- **Service connection name**: `zuyderland-cmdb-acr-connection`
### Stap 3: Haal ACR Admin Credentials Op
```bash
# Login bij Azure
az login
# Haal admin credentials op
az acr credential show --name zdlasacr
```
**Output:**
```json
{
"username": "zdlasacr",
"passwords": [
{
"name": "password",
"value": "xxxxxxxxxxxxx" Gebruik deze
}
]
}
```
**Gebruik:**
- **Docker ID**: `zdlasacr` (of de username uit output)
- **Docker Password**: `passwords[0].value` uit output
### Stap 4: Save en Test
1. Klik **"Save"**
2. Test de pipeline opnieuw
---
## 🔍 Waarom Gebeurt Dit?
**Managed Service Identity (MSI)** werkt alleen met:
- ✅ Azure DevOps Server (on-premises) met Managed Identity
-**NIET** met Azure DevOps Services (cloud) - Dit is jouw situatie!
**Service Principal** werkt met:
- ✅ Azure DevOps Services (cloud) - **Dit is wat je nodig hebt!**
- ✅ Azure DevOps Server (on-premises)
- ✅ Alle Azure services
---
## ✅ Verificatie
Na het herconfigureren, controleer:
1. **Service Connection Details:**
- Ga naar **Service connections**`zuyderland-cmdb-acr-connection`
- Check dat **Authentication type** = **"Service Principal"** (niet MSI)
2. **Test Pipeline:**
- Run de pipeline opnieuw
- De Docker build stap zou nu moeten werken
---
## 📚 Gerelateerde Documentatie
- **`AZURE-SERVICE-CONNECTION-TROUBLESHOOTING.md`** - Algemene troubleshooting
- **`AZURE-SERVICE-CONNECTION-AUTH.md`** - Authentication types uitleg
- **`AZURE-DEVOPS-SETUP.md`** - Service connection setup guide
---
## 🆘 Nog Steeds Problemen?
Als het nog steeds niet werkt:
1. **Check ACR bestaat:**
```bash
az acr show --name zdlasacr
```
2. **Check ACR admin is enabled:**
```bash
az acr show --name zdlasacr --query adminEnabled
```
Moet `true` zijn. Als `false`, enable het:
```bash
az acr update --name zdlasacr --admin-enabled true
```
3. **Check subscription toegang:**
- Ga naar Azure Portal → Subscription → Access control (IAM)
- Check of je account toegang heeft
4. **Gebruik "Others" optie** als laatste redmiddel (werkt altijd)
---
**💡 Tip**: Service Principal is de aanbevolen methode voor Azure DevOps Services. Het is veilig, betrouwbaar en wordt automatisch beheerd door Azure DevOps.

View File

@@ -1,152 +0,0 @@
# Azure Pipeline Quick Reference
Quick reference for configuring and using the automated deployment pipeline.
## 📋 Pipeline Variables
Update these in `azure-pipelines.yml`:
| Variable | Description | Example |
|----------|-------------|---------|
| `acrName` | Azure Container Registry name | `cmdbinsightacr` |
| `repositoryName` | Docker repository name | `cmdb-insight` |
| `dockerRegistryServiceConnection` | ACR service connection name | `zuyderland-cmdb-acr-connection` |
| `resourceGroup` | Azure resource group | `rg-cmdb-insight-prod` |
| `backendAppName` | Backend App Service name | `cmdb-backend-prod` |
| `frontendAppName` | Frontend App Service name | `cmdb-frontend-prod` |
| `azureSubscription` | Azure service connection for deployment | `zuyderland-cmdb-subscription` |
| `deployToProduction` | Enable/disable deployment | `true` or `false` |
| `useDeploymentSlots` | Use staging slots for zero-downtime | `true` or `false` |
## 🔧 Required Service Connections
### 1. Docker Registry Connection
**Purpose**: Push Docker images to ACR
**Setup**:
- Type: Docker Registry → Azure Container Registry
- Name: Match `dockerRegistryServiceConnection` variable
- Subscription: Your Azure subscription
- Registry: Your ACR
### 2. Azure Resource Manager Connection
**Purpose**: Deploy to App Services
**Setup**:
- Type: Azure Resource Manager
- Name: Match `azureSubscription` variable
- Subscription: Your Azure subscription
- Authentication: Managed Identity (recommended) or Service Principal
## 🚀 Pipeline Stages
### 1. Build Stage
- Builds backend Docker image
- Builds frontend Docker image
- Pushes both to ACR with tags: `$(Build.BuildId)` and `latest`
### 2. Deploy Stage
- Deploys backend to App Service
- Deploys frontend to App Service
- Restarts both services
- Verifies deployment
### 3. Verify Stage
- Health check on backend (`/api/health`)
- Accessibility check on frontend
- Reports status
## 🎯 Triggers
**Automatic triggers:**
- Push to `main` branch
- Git tags starting with `v*` (e.g., `v1.0.0`)
**Manual trigger:**
- Go to Pipelines → Your pipeline → Run pipeline
## 📝 Common Commands
### Check Pipeline Status
```bash
# View in Azure DevOps Portal
# Or use Azure CLI (if configured)
az pipelines runs list --organization <org> --project <project>
```
### View Pipeline Logs
- Go to Azure DevOps → Pipelines → Select run → View logs
### Cancel Running Pipeline
- Go to Azure DevOps → Pipelines → Select run → Cancel
## 🔄 Deployment Flow
```
Code Push → Build Images → Push to ACR → Deploy to App Services → Verify
```
**With Slots:**
```
Code Push → Build Images → Push to ACR → Deploy to Staging → Swap to Production → Verify
```
## ⚙️ Configuration Examples
### Basic Deployment (Current)
```yaml
deployToProduction: true
useDeploymentSlots: false
```
→ Direct deployment to production
### Zero-Downtime Deployment
```yaml
deployToProduction: true
useDeploymentSlots: true
```
→ Deploy to staging, then swap to production
### Build Only (No Deployment)
```yaml
deployToProduction: false
```
→ Only build and push images, don't deploy
## 🛠️ Troubleshooting
### Pipeline Fails: "Service connection not found"
- Check service connection name matches variable
- Verify connection exists in Project Settings
### Deployment Fails: "App Service not found"
- Verify app names match your Azure resources
- Check resource group name is correct
### Images Not Updating
- Check ACR has new images
- Verify App Service container settings
- Check Managed Identity has ACR pull permissions
## 📚 Related Files
- **`azure-pipelines.yml`** - Main pipeline (basic deployment)
- **`azure-pipelines-slots.yml`** - Advanced pipeline (with slots)
- **`docs/AZURE-PIPELINE-DEPLOYMENT.md`** - Complete setup guide
- **`docs/AZURE-NEW-SUBSCRIPTION-SETUP.md`** - Initial Azure setup
## ✅ Checklist
- [ ] Service connections created
- [ ] Pipeline variables configured
- [ ] Environment `production` created
- [ ] App Services exist in Azure
- [ ] Pipeline tested successfully
- [ ] Deployment verified
- [ ] Health checks passing
---
**Quick Start**: Update variables in `azure-pipelines.yml` and push to `main` branch!

View File

@@ -1,250 +0,0 @@
# Azure DevOps Pipeline - Repository Not Found
## 🔴 Probleem: "No matching repositories were found"
Als Azure DevOps je repository niet kan vinden bij het aanmaken van een pipeline, probeer deze oplossingen:
---
## ✅ Oplossing 1: Check Repository Naam
**Probleem:** De repository naam komt mogelijk niet overeen.
**Oplossing:**
1. **Ga naar Repos** (links in het menu)
2. **Check de exacte repository naam**
- Kijk naar de repository die je hebt gepusht
- Noteer de exacte naam (inclusief hoofdletters/spaties)
3. **In de pipeline wizard:**
- Zoek naar de repository met de exacte naam
- Of probeer verschillende variaties:
- `CMDB Insight`
- `cmdb-insight`
- `ZuyderlandCMDBGUI`
**Jouw repository naam zou moeten zijn:** `CMDB Insight` (met spaties)
---
## ✅ Oplossing 2: Check Repository Type
**In de pipeline wizard, probeer verschillende repository types:**
1. **Azure Repos Git** (als je code in Azure DevOps staat)
- Dit is waarschijnlijk wat je nodig hebt
- Check of je repository hier staat
2. **GitHub** (als je code in GitHub staat)
- Niet van toepassing voor jou
3. **Other Git** (als je code ergens anders staat)
- Niet van toepassing voor jou
**Voor jouw situatie:** Kies **"Azure Repos Git"**
---
## ✅ Oplossing 3: Check Repository Toegang
**Probleem:** Je hebt mogelijk geen toegang tot de repository.
**Oplossing:**
1. **Ga naar Repos** (links in het menu)
2. **Check of je de repository ziet**
- Als je de repository niet ziet, heb je mogelijk geen toegang
3. **Check permissions:**
- Project Settings → Repositories → Security
- Check of je account toegang heeft
---
## ✅ Oplossing 4: Check Project
**Probleem:** Je bent mogelijk in het verkeerde project.
**Oplossing:**
1. **Check het project naam** (bovenaan links)
- Moet zijn: **"cmdb"**
2. **Als je in een ander project bent:**
- Klik op het project dropdown (bovenaan links)
- Selecteer **"cmdb"**
3. **Probeer opnieuw** de pipeline aan te maken
---
## ✅ Oplossing 5: Refresh/Herlaad
**Soms helpt een simpele refresh:**
1. **Refresh de browser pagina** (F5 of Cmd+R)
2. **Sluit en open opnieuw** de pipeline wizard
3. **Probeer opnieuw**
---
## ✅ Oplossing 6: Check of Repository Bestaat
**Probleem:** De repository bestaat mogelijk niet in Azure DevOps.
**Oplossing:**
1. **Ga naar Repos** (links in het menu)
2. **Check of je repository zichtbaar is**
- Je zou moeten zien: `CMDB Insight` (of jouw repo naam)
3. **Als de repository niet bestaat:**
- Je moet eerst de code pushen naar Azure DevOps
- Of de repository aanmaken in Azure DevOps
**Check of je code al in Azure DevOps staat:**
- Ga naar Repos → Files
- Je zou je code moeten zien (bijv. `azure-pipelines.yml`, `backend/`, `frontend/`, etc.)
---
## ✅ Oplossing 7: Maak Repository Aan (Als Die Niet Bestaat)
**Als de repository nog niet bestaat in Azure DevOps:**
### Optie A: Push Code naar Bestaande Repository
**Als de repository al bestaat maar leeg is:**
1. **Check de repository URL:**
```
git@ssh.dev.azure.com:v3/ZuyderlandMedischCentrum/cmdb/cmdb-insight
```
2. **Push je code:**
```bash
cd /Users/berthausmans/Documents/Development/cmdb-insight
git push azure main
```
3. **Check in Azure DevOps:**
- Ga naar Repos → Files
- Je zou je code moeten zien
### Optie B: Maak Nieuwe Repository Aan
**Als de repository helemaal niet bestaat:**
1. **Ga naar Repos** (links in het menu)
2. **Klik op "New repository"** of het "+" icoon
3. **Vul in:**
- **Repository name**: `CMDB Insight`
- **Type**: Git
4. **Create**
5. **Push je code:**
```bash
cd /Users/berthausmans/Documents/Development/cmdb-insight
git remote add azure git@ssh.dev.azure.com:v3/ZuyderlandMedischCentrum/cmdb/cmdb-insight
git push azure main
```
---
## ✅ Oplossing 8: Gebruik "Other Git" Als Workaround
**Als niets werkt, gebruik "Other Git" als tijdelijke oplossing:**
1. **In de pipeline wizard:**
- Kies **"Other Git"** (in plaats van "Azure Repos Git")
2. **Vul in:**
- **Repository URL**: `git@ssh.dev.azure.com:v3/ZuyderlandMedischCentrum/cmdb/cmdb-insight`
- Of HTTPS: `https://ZuyderlandMedischCentrum@dev.azure.com/ZuyderlandMedischCentrum/cmdb/_git/cmdb-insight`
3. **Branch**: `main`
4. **Continue**
**⚠️ Let op:** Dit werkt, maar "Azure Repos Git" is de voorkeursoptie.
---
## 🔍 Diagnose Stappen
**Om te diagnosticeren wat het probleem is:**
### 1. Check of Repository Bestaat
1. Ga naar **Repos** (links in het menu)
2. Check of je `CMDB Insight` ziet
3. Klik erop en check of je code ziet
### 2. Check Repository URL
**In Terminal:**
```bash
cd /Users/berthausmans/Documents/Development/cmdb-insight
git remote -v
```
**Je zou moeten zien:**
```
azure git@ssh.dev.azure.com:v3/ZuyderlandMedischCentrum/cmdb/cmdb-insight (fetch)
azure git@ssh.dev.azure.com:v3/ZuyderlandMedischCentrum/cmdb/cmdb-insight (push)
```
### 3. Check of Code Gepusht is
**In Terminal:**
```bash
git log azure/main --oneline -5
```
**Als je commits ziet:** ✅ Code is gepusht
**Als je een fout krijgt:** ❌ Code is niet gepusht
### 4. Push Code (Als Niet Gepusht)
```bash
git push azure main
```
---
## 💡 Aanbevolen Aanpak
**Probeer in deze volgorde:**
1. ✅ **Check Repos** - Ga naar Repos en check of je repository bestaat
2. ✅ **Check project naam** - Zorg dat je in "cmdb" project bent
3. ✅ **Refresh pagina** - Soms helpt een simpele refresh
4. ✅ **Push code** - Als repository leeg is, push je code
5. ✅ **Gebruik "Other Git"** - Als workaround
---
## 🎯 Quick Fix (Meest Waarschijnlijk)
**Het probleem is waarschijnlijk dat de repository leeg is of niet bestaat:**
1. **Check in Azure DevOps:**
- Ga naar **Repos** → **Files**
- Check of je code ziet (bijv. `azure-pipelines.yml`)
2. **Als repository leeg is:**
```bash
cd /Users/berthausmans/Documents/Development/cmdb-insight
git push azure main
```
3. **Probeer opnieuw** de pipeline aan te maken
---
## 📚 Meer Informatie
- [Azure DevOps Repositories](https://learn.microsoft.com/en-us/azure/devops/repos/)
- [Create Pipeline from Repository](https://learn.microsoft.com/en-us/azure/devops/pipelines/create-first-pipeline)
---
## 🆘 Nog Steeds Problemen?
Als niets werkt:
1. **Check of je in het juiste project bent** (cmdb)
2. **Check of de repository bestaat** (Repos → Files)
3. **Push je code** naar Azure DevOps
4. **Gebruik "Other Git"** als workaround
**De "Other Git" optie werkt altijd**, ook als de repository niet wordt gevonden in de dropdown.

View File

@@ -1,283 +0,0 @@
# Azure Container Registry - Moet ik dit aanvragen?
## 🤔 Korte Antwoord
**Het hangt af van je deployment strategie:**
1. **Azure App Service (zonder containers)** → ❌ **Geen ACR nodig**
- Direct deployment van code
- Eenvoudiger en goedkoper
- **Aanbevolen voor jouw situatie** (20 gebruikers)
2. **Container-based deployment** → ✅ **ACR nodig** (of alternatief)
- Azure Container Instances (ACI)
- Azure Kubernetes Service (AKS)
- VM met Docker Compose
---
## 📊 Deployment Opties Vergelijking
### Optie 1: Azure App Service (Zonder Containers) ⭐ **AANBEVOLEN**
**Wat je nodig hebt:**
- ✅ Azure App Service Plan (B1) - €15-25/maand
- ✅ Azure Key Vault - €1-2/maand
- ✅ Database (PostgreSQL of SQLite) - €1-30/maand
-**Geen Container Registry nodig!**
**Hoe het werkt:**
- Azure DevOps bouwt je code direct
- Deployt naar App Service via ZIP deploy of Git
- Geen Docker images nodig
**Voordelen:**
- ✅ Eenvoudiger setup
- ✅ Goedkoper (geen ACR kosten)
- ✅ Snellere deployments
- ✅ Managed service (minder onderhoud)
**Nadelen:**
- ❌ Minder flexibel dan containers
- ❌ Platform-specifiek (Azure only)
**Voor jouw situatie:****Dit is de beste optie!**
---
### Optie 2: Container Registry (Als je containers wilt gebruiken)
**Je hebt 3 keuzes voor een registry:**
#### A) Azure Container Registry (ACR) 💰
**Kosten:**
- Basic: ~€5/maand
- Standard: ~€20/maand
- Premium: ~€50/maand
**Voordelen:**
- ✅ Integratie met Azure services
- ✅ Security scanning (Premium)
- ✅ Geo-replicatie (Standard/Premium)
- ✅ RBAC integratie
**Nadelen:**
- ❌ Extra kosten
- ❌ Moet aangevraagd worden bij IT
**Wanneer aanvragen:**
- Als je containers gebruikt in productie
- Als je Azure-native deployment wilt
- Als je security scanning nodig hebt
---
#### B) Gitea Container Registry (Gratis) 🆓
**Kosten:** Gratis (als je al Gitea hebt)
**Voordelen:**
- ✅ Geen extra kosten
- ✅ Al beschikbaar (als Gitea dit ondersteunt)
- ✅ Eenvoudig te gebruiken
**Nadelen:**
- ❌ Minder features dan ACR
- ❌ Geen security scanning
- ❌ Geen geo-replicatie
**Je hebt al:**
- ✅ Script: `scripts/build-and-push.sh`
- ✅ Config: `docker-compose.prod.registry.yml`
- ✅ Documentatie: `docs/GITEA-DOCKER-REGISTRY.md`
**Wanneer gebruiken:**
- ✅ Development/test omgevingen
- ✅ Als je al Gitea hebt met registry enabled
- ✅ Kleine projecten zonder enterprise requirements
---
#### C) Docker Hub (Gratis/Paid)
**Kosten:**
- Free: 1 private repo, unlimited public
- Pro: $5/maand voor unlimited private repos
**Voordelen:**
- ✅ Eenvoudig te gebruiken
- ✅ Gratis voor public images
- ✅ Wereldwijd beschikbaar
**Nadelen:**
- ❌ Rate limits op free tier
- ❌ Minder integratie met Azure
- ❌ Security concerns (voor private data)
**Wanneer gebruiken:**
- Development/test
- Public images
- Als je geen Azure-native oplossing nodig hebt
---
## 🎯 Aanbeveling voor Jouw Situatie
### Scenario 1: Eenvoudige Productie Deployment (Aanbevolen) ⭐
**Gebruik: Azure App Service zonder containers**
**Waarom:**
- ✅ Geen ACR nodig
- ✅ Eenvoudiger en goedkoper
- ✅ Voldoende voor 20 gebruikers
- ✅ Minder complexiteit
**Stappen:**
1. **Niet nodig:** ACR aanvragen
2. **Wel nodig:** Azure App Service Plan aanvragen
3. **Pipeline aanpassen:** Gebruik Azure App Service deployment task in plaats van Docker
**Pipeline aanpassing:**
```yaml
# In plaats van Docker build/push:
- task: AzureWebApp@1
inputs:
azureSubscription: 'your-subscription'
appName: 'cmdb-backend'
package: '$(System.DefaultWorkingDirectory)'
```
---
### Scenario 2: Container-based Deployment
**Als je toch containers wilt gebruiken:**
**Optie A: Gebruik Gitea Registry (als beschikbaar)**
- ✅ Geen aanvraag nodig
- ✅ Gratis
- ✅ Al geconfigureerd in je project
**Optie B: Vraag ACR aan bij IT**
- 📧 Stuur een request naar IT/Infrastructure team
- 📋 Vermeld: "Azure Container Registry - Basic tier voor CMDB GUI project"
- 💰 Budget: ~€5-20/maand (afhankelijk van tier)
**Request Template:**
```
Onderwerp: Azure Container Registry aanvraag - CMDB GUI Project
Beste IT Team,
Voor het CMDB Insight project hebben we een Azure Container Registry nodig
voor het hosten van Docker images.
Details:
- Project: CMDB Insight
- Registry naam: zuyderlandcmdbacr (of zoals jullie naming convention)
- SKU: Basic (voor development/productie)
- Resource Group: rg-cmdb-gui
- Location: West Europe
- Doel: Hosten van backend en frontend Docker images voor productie deployment
Geschatte kosten: €5-20/maand (Basic tier)
Alvast bedankt!
```
---
## 📋 Beslissingsmatrix
| Situatie | Registry Nodig? | Welke? | Kosten |
|----------|----------------|--------|--------|
| **Azure App Service (code deploy)** | ❌ Nee | - | €0 |
| **Gitea Registry beschikbaar** | ✅ Ja | Gitea | €0 |
| **Containers + Azure native** | ✅ Ja | ACR Basic | €5/maand |
| **Containers + Security scanning** | ✅ Ja | ACR Standard | €20/maand |
| **Development/test only** | ✅ Ja | Docker Hub (free) | €0 |
---
## 🚀 Quick Start: Wat Moet Je Nu Doen?
### Als je Azure App Service gebruikt (Aanbevolen):
1.**Geen ACR nodig** - Skip deze stap
2. ✅ Vraag **Azure App Service Plan** aan bij IT
3. ✅ Configureer pipeline voor App Service deployment
4. ✅ Gebruik bestaande `azure-pipelines.yml` maar pas aan voor App Service
### Als je containers gebruikt:
1.**Check eerst:** Is Gitea Container Registry beschikbaar?
- Zo ja → Gebruik Gitea (gratis, al geconfigureerd)
- Zo nee → Vraag ACR Basic aan bij IT
2. ✅ Als je ACR aanvraagt:
- Stuur request naar IT team
- Gebruik request template hierboven
- Wacht op goedkeuring
3. ✅ Configureer pipeline:
- Pas `azure-pipelines.yml` aan met ACR naam
- Maak service connection in Azure DevOps
- Test pipeline
---
## 💡 Mijn Aanbeveling
**Voor jouw situatie (20 gebruikers, interne tool):**
1. **Start met Azure App Service** (zonder containers)
- Eenvoudiger
- Goedkoper
- Voldoende functionaliteit
2. **Als je later containers nodig hebt:**
- Gebruik eerst Gitea Registry (als beschikbaar)
- Vraag ACR aan als Gitea niet voldoet
3. **Vraag ACR alleen aan als:**
- Je security scanning nodig hebt
- Je geo-replicatie nodig hebt
- Je Azure-native container deployment wilt
---
## ❓ Vragen voor IT Team
Als je ACR wilt aanvragen, vraag dan:
1. **Hebben we al een ACR?** (misschien kunnen we die delen)
2. **Wat is de naming convention?** (voor registry naam)
3. **Welke SKU is aanbevolen?** (Basic/Standard/Premium)
4. **Welke resource group gebruiken we?** (best practices)
5. **Zijn er compliance requirements?** (security scanning, etc.)
6. **Heeft Gitea Container Registry?** (gratis alternatief)
---
## 📚 Meer Informatie
- **Azure App Service Deployment**: `docs/AZURE-DEPLOYMENT-SUMMARY.md`
- **Gitea Registry**: `docs/GITEA-DOCKER-REGISTRY.md`
- **Azure Container Registry**: `docs/AZURE-CONTAINER-REGISTRY.md`
- **Azure DevOps Setup**: `docs/AZURE-DEVOPS-SETUP.md`
---
## 🎯 Conclusie
**Kort antwoord:**
- **Azure App Service?** → ❌ Geen ACR nodig
- **Containers?** → ✅ ACR nodig (of Gitea/Docker Hub)
- **Aanbeveling:** Start met App Service, vraag ACR later aan als nodig
**Actie:**
1. Beslis: App Service of Containers?
2. Als Containers: Check Gitea Registry eerst
3. Als ACR nodig: Vraag aan bij IT met request template

View File

@@ -233,7 +233,7 @@ echo "Backend: https://${BACKEND_APP}.azurewebsites.net/api"
- **`AZURE-NEW-SUBSCRIPTION-SETUP.md`** - Complete step-by-step setup guide
- **`AZURE-APP-SERVICE-DEPLOYMENT.md`** - Detailed App Service deployment
- **`AZURE-CONTAINER-REGISTRY.md`** - ACR setup and usage
- **`AZURE-ACR-SETUP.md`** - ACR setup and usage
- **`AZURE-QUICK-REFERENCE.md`** - Quick reference guide
- **`PRODUCTION-DEPLOYMENT.md`** - General production deployment

View File

@@ -214,9 +214,9 @@ Check:
## 📚 Gerelateerde Documentatie
- **`AZURE-PIPELINE-MSI-ERROR-FIX.md`** - MSI error fix
- **`AZURE-PIPELINES.md`** - Pipeline troubleshooting (includes MSI error fix)
- **`AZURE-SERVICE-CONNECTION-TROUBLESHOOTING.md`** - Algemene troubleshooting
- **`AZURE-ACR-PERMISSIONS.md`** - ACR permissions uitleg
- **`AZURE-ACR-SETUP.md`** - ACR setup (includes permissions)
---

View File

@@ -242,20 +242,39 @@ git push origin v1.0.0
**Aanbeveling: Basic SKU** ⭐ (~€5/maand)
**Basic SKU** (Aanbevolen voor jouw situatie):
- ✅ 10GB storage - Ruim voldoende voor backend + frontend met meerdere versies
- ✅ 1GB/day webhook throughput - Voldoende voor CI/CD
- ✅ Unlimited pulls - Geen extra kosten
- ✅ RBAC support - Role-based access control
### Basic SKU (Aanbevolen voor jouw situatie)
**Inclusief:**
- ✅ **10GB storage** - Ruim voldoende voor backend + frontend images met meerdere versies
- ✅ **1GB/day webhook throughput** - Voldoende voor CI/CD
- ✅ **Unlimited pulls** - Geen extra kosten voor image pulls
- ✅ **Admin user enabled** - Voor development/productie
- ✅ **RBAC support** - Role-based access control
- ✅ **Content trust** - Image signing support
- ✅ **Kosten: ~€5/maand**
**Standard SKU** (~€20/maand):
**Limitaties:**
- ❌ Geen geo-replicatie
- ❌ Geen security scanning (vulnerability scanning)
- ❌ Geen content trust storage
**Wanneer gebruiken:**
- ✅ **Jouw situatie** - 20 gebruikers, corporate tool
- ✅ Development en productie omgevingen
- ✅ Kleine tot middelgrote teams
- ✅ Budget-conscious deployments
### Standard SKU (~€20/maand)
**Inclusief (alles van Basic +):**
- 100GB storage
- 10GB/day webhook throughput
- Geo-replicatie
- **Niet nodig voor jouw situatie**
**Premium SKU** (~€50/maand):
### Premium SKU (~€50/maand)
**Inclusief (alles van Standard +):**
- 500GB storage
- Security scanning
- Private endpoints
@@ -263,13 +282,85 @@ git push origin v1.0.0
**Voor jouw situatie (20 gebruikers): Basic is perfect!** ✅
📚 Zie `docs/AZURE-ACR-PRICING.md` voor volledige vergelijking.
---
## 🔐 Permissions Mode
**Aanbeveling: RBAC Registry Permissions** ⭐
### RBAC Registry Permissions (Aanbevolen)
**Hoe het werkt:**
- Permissions worden ingesteld op **registry niveau**
- Alle repositories binnen de registry delen dezelfde permissions
- Gebruikers hebben toegang tot alle repositories of geen
**Voordelen:**
- ✅ **Eenvoudig** - Minder complexiteit
- ✅ **Makkelijk te beheren** - Eén set permissions voor de hele registry
- ✅ **Voldoende voor de meeste scenario's** - Perfect voor jouw situatie
- ✅ **Standaard keuze** - Meest gebruikte optie
**Wanneer gebruiken:**
- ✅ **Jouw situatie** - 20 gebruikers, corporate tool
- ✅ Kleine tot middelgrote teams
- ✅ Alle repositories hebben dezelfde toegangsvereisten
- ✅ Eenvoudige permission structuur gewenst
### RBAC Registry + ABAC Repository Permissions
**Wanneer gebruiken:**
- Als je per-repository permissions nodig hebt
- Grote teams met verschillende toegangsvereisten
- Complexe permission structuur
**Voor jouw situatie: RBAC Registry Permissions is perfect!** ✅
---
## 🔄 Shared ACR Setup (Optioneel)
Als je al een ACR hebt voor andere applicaties, kun je deze hergebruiken:
**Voordelen:**
- ✅ **Cost Savings**: Eén ACR voor alle applicaties (€5-20/month vs multiple ACRs)
- ✅ **Centralized Management**: All images in one place
- ✅ **Easier Collaboration**: Teams can share images
**Hoe het werkt:**
- ACR is shared, maar elke applicatie gebruikt een **unique repository name**
- Repository name (`cmdb-insight`) scheidt jouw app van anderen
- Images zijn georganiseerd per applicatie: `acr.azurecr.io/app-name/service:tag`
**Voorbeeld structuur:**
```
zuyderlandacr.azurecr.io/
├── cmdb-insight/ ← Deze applicatie
│ ├── backend:latest
│ └── frontend:latest
├── other-app/ ← Andere applicatie
│ └── api:latest
└── shared-services/ ← Gedeelde base images
└── nginx:latest
```
**Setup:**
```bash
# Gebruik bestaande ACR
ACR_NAME="your-existing-acr"
ACR_RESOURCE_GROUP="rg-shared-services"
# Verifieer dat het bestaat
az acr show --name $ACR_NAME --resource-group $ACR_RESOURCE_GROUP
# Update pipeline variabelen met bestaande ACR naam
```
---
## 📚 Meer Informatie
- **Volledige ACR Guide**: `docs/AZURE-CONTAINER-REGISTRY.md`
- **Deployment Guide**: `docs/AZURE-APP-SERVICE-DEPLOYMENT.md`
- **Azure DevOps Setup**: `docs/AZURE-DEVOPS-SETUP.md`
- **Deployment Guide**: `docs/PRODUCTION-DEPLOYMENT.md`
@@ -284,4 +375,4 @@ Nu je images in ACR staan, kun je ze deployen naar:
3. **Azure Kubernetes Service (AKS)** - Voor complexere setups
4. **VM met Docker Compose** - Volledige controle
Zie `docs/AZURE-DEPLOYMENT-SUMMARY.md` voor deployment opties.
Zie `docs/AZURE-APP-SERVICE-DEPLOYMENT.md` voor deployment opties.

View File

@@ -2,6 +2,39 @@
Complete deployment guide voor CMDB Insight naar Azure App Service.
## 🎯 Waarom Azure App Service?
Azure App Service is de aanbevolen deployment optie voor CMDB Insight omdat:
1. **Managed Service**
- Geen serverbeheer, SSH, Linux configuratie nodig
- Azure beheert alles (updates, security patches, scaling)
- Perfect voor teams die geen infrastructuur willen beheren
2. **Eenvoudig & Snel**
- Setup in ~15 minuten
- Automatische SSL/TLS certificaten
- Integratie met Azure DevOps pipelines
3. **Kosten-Effectief**
- Basic B1 plan: ~€15-25/maand
- Voldoende voor 20 gebruikers
- Geen verborgen kosten
4. **Flexibel**
- Deployment slots voor testen (staging → productie)
- Eenvoudige rollback
- Integratie met Azure Key Vault voor secrets
5. **Monitoring & Compliance**
- Integratie met Azure Monitor
- Logging en audit trails (NEN 7510 compliance)
- Health checks ingebouwd
**Geschatte kosten:** ~€20-25/maand (met PostgreSQL database)
---
## 📋 Prerequisites
- Azure CLI geïnstalleerd en geconfigureerd (`az login`)
@@ -84,16 +117,83 @@ az webapp config container set \
--docker-registry-server-url https://zdlasacr.azurecr.io
```
### Stap 5: Environment Variabelen
### Stap 5: PostgreSQL Database Setup (Aanbevolen voor Productie)
**Voor PostgreSQL setup, zie:** `docs/AZURE-POSTGRESQL-SETUP.md`
**Quick setup met script:**
```bash
./scripts/setup-postgresql.sh
```
**Of handmatig:**
```bash
# Maak PostgreSQL Flexible Server aan
az postgres flexible-server create \
--resource-group zdl-cmdb-insight-prd-euwe-rg \
--name zdl-cmdb-insight-prd-psql \
--location westeurope \
--admin-user cmdbadmin \
--admin-password $(openssl rand -base64 32) \
--sku-name Standard_B1ms \
--tier Burstable \
--storage-size 32 \
--version 15
# Maak database aan (één database is voldoende)
az postgres flexible-server db create \
--resource-group zdl-cmdb-insight-prd-euwe-rg \
--server-name zdl-cmdb-insight-prd-psql \
--database-name cmdb_insight
```
**Voor SQLite (alternatief, eenvoudiger maar minder geschikt voor productie):**
- Geen extra setup nodig
- Database wordt automatisch aangemaakt in container
- Zie Stap 5b hieronder
### Stap 5a: Environment Variabelen met PostgreSQL
```bash
# Backend (vervang met jouw waarden)
# Backend met PostgreSQL (vervang met jouw waarden)
az webapp config appsettings set \
--name zdl-cmdb-insight-prd-backend-webapp \
--resource-group zdl-cmdb-insight-prd-euwe-rg \
--settings \
NODE_ENV=production \
PORT=3001 \
DATABASE_TYPE=postgres \
DATABASE_HOST=zdl-cmdb-insight-prd-psql.postgres.database.azure.com \
DATABASE_PORT=5432 \
DATABASE_NAME=cmdb_insight \
DATABASE_USER=cmdbadmin \
DATABASE_PASSWORD=your-database-password \
DATABASE_SSL=true \
JIRA_BASE_URL=https://jira.zuyderland.nl \
JIRA_SCHEMA_ID=your-schema-id \
JIRA_PAT=your-pat-token \
SESSION_SECRET=$(openssl rand -hex 32) \
FRONTEND_URL=https://zdl-cmdb-insight-prd-frontend-webapp.azurewebsites.net
# Frontend
az webapp config appsettings set \
--name zdl-cmdb-insight-prd-frontend-webapp \
--resource-group zdl-cmdb-insight-prd-euwe-rg \
--settings \
VITE_API_URL=https://zdl-cmdb-insight-prd-backend-webapp.azurewebsites.net/api
```
### Stap 5b: Environment Variabelen met SQLite (Alternatief)
```bash
# Backend met SQLite (vervang met jouw waarden)
az webapp config appsettings set \
--name zdl-cmdb-insight-prd-backend-webapp \
--resource-group zdl-cmdb-insight-prd-euwe-rg \
--settings \
NODE_ENV=production \
PORT=3001 \
DATABASE_TYPE=sqlite \
JIRA_BASE_URL=https://jira.zuyderland.nl \
JIRA_SCHEMA_ID=your-schema-id \
JIRA_PAT=your-pat-token \
@@ -285,8 +385,7 @@ az webapp restart --name zdl-cmdb-insight-prd-backend-webapp --resource-group zd
## 📚 Meer Informatie
- **Deployment Advies**: `docs/DEPLOYMENT-ADVICE.md`
- **Quick Deployment Guide**: `docs/QUICK-DEPLOYMENT-GUIDE.md`
- **Quick Reference**: `docs/AZURE-QUICK-REFERENCE.md`
- **Production Deployment**: `docs/PRODUCTION-DEPLOYMENT.md`
---

View File

@@ -2,6 +2,36 @@
Guide for using the separate build and deployment pipelines.
## 📋 Quick Reference
### Pipeline Variables
| Variable | Description | Example |
|----------|-------------|---------|
| `acrName` | Azure Container Registry name | `zdlas` |
| `repositoryName` | Docker repository name | `cmdb-insight` |
| `dockerRegistryServiceConnection` | ACR service connection name | `zuyderland-cmdb-acr-connection` |
| `resourceGroup` | Azure resource group | `rg-cmdb-insight-prod` |
| `backendAppName` | Backend App Service name | `cmdb-backend-prod` |
| `frontendAppName` | Frontend App Service name | `cmdb-frontend-prod` |
| `azureSubscription` | Azure service connection for deployment | `zuyderland-cmdb-subscription` |
| `deployToProduction` | Enable/disable deployment | `true` or `false` |
| `useDeploymentSlots` | Use staging slots for zero-downtime | `true` or `false` |
### Required Service Connections
1. **Docker Registry Connection** (for ACR)
- Type: Docker Registry → Azure Container Registry
- Name: Match `dockerRegistryServiceConnection` variable
- Authentication: **Service Principal** (not Managed Identity)
2. **Azure Resource Manager Connection** (for App Service deployment)
- Type: Azure Resource Manager
- Name: Match `azureSubscription` variable
- Authentication: Managed Identity or Service Principal
---
## 📋 Pipeline Files
### 1. `azure-pipelines.yml` - Build and Push Images
@@ -185,20 +215,43 @@ See `azure-pipelines-slots.yml` for an advanced example with deployment slots.
**Issue**: "Service connection not found"
- **Solution**: Verify service connection name matches `dockerRegistryServiceConnection` variable
- Check: **Project Settings****Service connections** → Verify name matches
**Issue**: "ACR not found"
- **Solution**: Check `acrName` variable matches your ACR name
- Verify: `az acr list --query "[].name"`
**Issue**: "MSI Authentication Error" / "Could not fetch access token for Managed Service Principal"
- **Solution**: Service connection must use **Service Principal** authentication (not Managed Identity)
- Recreate service connection: **Docker Registry****Azure Container Registry** → Use **Service Principal**
- See `docs/AZURE-SERVICE-CONNECTION-TROUBLESHOOTING.md` for details
**Issue**: "Permission denied"
- **Solution**: Verify service connection has correct permissions
- Check ACR admin is enabled: `az acr update --name <acr-name> --admin-enabled true`
### Deployment Pipeline Fails
**Issue**: "App Service not found"
- **Solution**: Verify app names match your Azure resources
- Check: `az webapp list --query "[].name"`
**Issue**: "Environment not found"
- **Solution**: Create `production` environment in Azure DevOps
- **Pipelines****Environments** → **Create environment**
**Issue**: "Image not found in ACR"
- **Solution**: Run build pipeline first to push images to ACR
- Verify: `az acr repository show-tags --name <acr-name> --repository cmdb-insight/backend`
### Repository Not Found
**Issue**: "No matching repositories were found" when creating pipeline
- **Solution 1**: Check repository exists in Azure DevOps → **Repos** → **Files**
- **Solution 2**: Push code to Azure DevOps: `git push azure main`
- **Solution 3**: Use **"Other Git"** option in pipeline wizard with repository URL
- **Solution 4**: Verify you're in the correct project (check project name in dropdown)
- See `docs/AZURE-SERVICE-CONNECTION-TROUBLESHOOTING.md` for details
## ✅ Checklist

View File

@@ -0,0 +1,371 @@
# Azure PostgreSQL Setup for Production
Complete guide for setting up Azure Database for PostgreSQL Flexible Server for CMDB Insight production deployment.
## 🎯 Overview
**Why PostgreSQL for Production?**
- ✅ Better concurrency handling (multiple users)
- ✅ Connection pooling support
- ✅ Better performance for 20+ users
- ✅ Production-ready database solution
- ✅ Identical dev/prod stack
**Cost:** ~€20-30/month (Basic B1ms tier)
---
## 📋 Prerequisites
- Azure CLI installed and configured (`az login`)
- Resource group created: `zdl-cmdb-insight-prd-euwe-rg`
- Appropriate permissions to create Azure Database resources
---
## 🚀 Quick Setup (15 minutes)
### Step 1: Create PostgreSQL Flexible Server
```bash
# Set variables
RESOURCE_GROUP="zdl-cmdb-insight-prd-euwe-rg"
SERVER_NAME="zdl-cmdb-insight-prd-psql"
ADMIN_USER="cmdbadmin"
ADMIN_PASSWORD="$(openssl rand -base64 32)" # Generate secure password
LOCATION="westeurope"
# Create PostgreSQL Flexible Server
az postgres flexible-server create \
--resource-group $RESOURCE_GROUP \
--name $SERVER_NAME \
--location $LOCATION \
--admin-user $ADMIN_USER \
--admin-password $ADMIN_PASSWORD \
--sku-name Standard_B1ms \
--tier Burstable \
--storage-size 32 \
--version 15 \
--public-access 0.0.0.0 \
--high-availability Disabled
echo "PostgreSQL server created!"
echo "Server: $SERVER_NAME.postgres.database.azure.com"
echo "Admin User: $ADMIN_USER"
echo "Password: $ADMIN_PASSWORD"
echo ""
echo "⚠️ Save the password securely!"
```
### Step 2: Create Database
**Note:** The application uses a single database for all data. All tables (CMDB cache, classification history, and session state) are stored in the same database.
```bash
# Create main database (this is all you need)
az postgres flexible-server db create \
--resource-group $RESOURCE_GROUP \
--server-name $SERVER_NAME \
--database-name cmdb_insight
echo "✅ Database created"
```
### Step 3: Configure Firewall Rules
Allow Azure App Service to connect:
```bash
# Get App Service outbound IPs
BACKEND_IPS=$(az webapp show \
--name zdl-cmdb-insight-prd-backend-webapp \
--resource-group $RESOURCE_GROUP \
--query "outboundIpAddresses" -o tsv)
# Add firewall rule for App Service (use first IP, or add all)
az postgres flexible-server firewall-rule create \
--resource-group $RESOURCE_GROUP \
--name $SERVER_NAME \
--rule-name AllowAppService \
--start-ip-address 0.0.0.0 \
--end-ip-address 255.255.255.255
# Or more secure: Allow Azure Services only
az postgres flexible-server firewall-rule create \
--resource-group $RESOURCE_GROUP \
--name $SERVER_NAME \
--rule-name AllowAzureServices \
--start-ip-address 0.0.0.0 \
--end-ip-address 0.0.0.0
```
**Note:** `0.0.0.0` to `0.0.0.0` allows all Azure services. For production, consider using specific App Service outbound IPs.
### Step 4: Store Credentials in Key Vault
```bash
KEY_VAULT="zdl-cmdb-insight-prd-kv"
# Store database password
az keyvault secret set \
--vault-name $KEY_VAULT \
--name DatabasePassword \
--value $ADMIN_PASSWORD
# Store connection string (optional, can construct from components)
CONNECTION_STRING="postgresql://${ADMIN_USER}:${ADMIN_PASSWORD}@${SERVER_NAME}.postgres.database.azure.com:5432/cmdb_insight?sslmode=require"
az keyvault secret set \
--vault-name $KEY_VAULT \
--name DatabaseUrl \
--value $CONNECTION_STRING
echo "✅ Credentials stored in Key Vault"
```
### Step 5: Configure App Service App Settings
```bash
# Get Key Vault URL
KV_URL=$(az keyvault show --name $KEY_VAULT --query properties.vaultUri -o tsv)
# Configure backend app settings
az webapp config appsettings set \
--name zdl-cmdb-insight-prd-backend-webapp \
--resource-group $RESOURCE_GROUP \
--settings \
DATABASE_TYPE=postgres \
DATABASE_HOST="${SERVER_NAME}.postgres.database.azure.com" \
DATABASE_PORT=5432 \
DATABASE_NAME=cmdb_insight \
DATABASE_USER=$ADMIN_USER \
DATABASE_PASSWORD="@Microsoft.KeyVault(SecretUri=${KV_URL}secrets/DatabasePassword/)" \
DATABASE_SSL=true
echo "✅ App settings configured"
```
**Alternative: Use DATABASE_URL directly**
```bash
az webapp config appsettings set \
--name zdl-cmdb-insight-prd-backend-webapp \
--resource-group $RESOURCE_GROUP \
--settings \
DATABASE_TYPE=postgres \
DATABASE_URL="@Microsoft.KeyVault(SecretUri=${KV_URL}secrets/DatabaseUrl/)"
```
---
## 🔐 Security Best Practices
### 1. Use Key Vault for Secrets
**Do:** Store database password in Key Vault
**Don't:** Store password in app settings directly
### 2. Enable SSL/TLS
**Do:** Always use `DATABASE_SSL=true` or `?sslmode=require` in connection string
**Don't:** Connect without SSL in production
### 3. Firewall Rules
**Do:** Restrict to specific IPs or Azure services
**Don't:** Allow `0.0.0.0/0` (all IPs) unless necessary
### 4. Use Managed Identity (Advanced)
For even better security, use Managed Identity instead of passwords:
```bash
# Enable Managed Identity on PostgreSQL server
az postgres flexible-server identity assign \
--resource-group $RESOURCE_GROUP \
--name $SERVER_NAME \
--identity /subscriptions/.../resourceGroups/.../providers/Microsoft.ManagedIdentity/userAssignedIdentities/...
# Grant access
az postgres flexible-server ad-admin create \
--resource-group $RESOURCE_GROUP \
--server-name $SERVER_NAME \
--display-name "App Service Identity" \
--object-id <principal-id>
```
---
## 📊 Database Configuration
### Connection Pooling
The application uses connection pooling automatically via the `pg` library:
- **Max connections:** 20 (configured in `PostgresAdapter`)
- **Idle timeout:** 30 seconds
- **Connection timeout:** 10 seconds
### Database Sizes
For 20 users:
- **Database (cmdb_insight):** ~25-60MB total (includes CMDB cache, classification history, and session state)
- **Total storage:** 32GB (plenty of room for growth)
**Note:** All data (CMDB objects, classification history, and session state) is stored in a single database.
---
## 🔄 Migration from SQLite
If you're migrating from SQLite to PostgreSQL:
```bash
# 1. Export data from SQLite (if needed)
# The application will automatically sync from Jira, so migration may not be necessary
# 2. Set DATABASE_TYPE=postgres in app settings
# 3. Restart the app - it will create tables automatically on first run
# 4. The app will sync data from Jira Assets on first sync
```
**Note:** Since the database is a cache layer that syncs from Jira, you typically don't need to migrate data - just let it sync fresh.
---
## 🧪 Testing Connection
### Test from Local Machine
```bash
# Install psql if needed
# macOS: brew install postgresql
# Ubuntu: sudo apt-get install postgresql-client
# Connect (replace with your values)
psql "host=${SERVER_NAME}.postgres.database.azure.com port=5432 dbname=cmdb_insight user=${ADMIN_USER} password=${ADMIN_PASSWORD} sslmode=require"
```
### Test from App Service
```bash
# Check app logs
az webapp log tail \
--name zdl-cmdb-insight-prd-backend-webapp \
--resource-group $RESOURCE_GROUP
# Look for: "Creating PostgreSQL adapter" or connection errors
```
---
## 📈 Monitoring
### Check Database Status
```bash
az postgres flexible-server show \
--resource-group $RESOURCE_GROUP \
--name $SERVER_NAME \
--query "{state:state, version:version, sku:sku}"
```
### View Database Size
```sql
-- Connect to database
SELECT
pg_database.datname,
pg_size_pretty(pg_database_size(pg_database.datname)) AS size
FROM pg_database
WHERE datname = 'cmdb_insight';
```
### Monitor Connections
```sql
SELECT
count(*) as total_connections,
state,
application_name
FROM pg_stat_activity
WHERE datname = 'cmdb_insight'
GROUP BY state, application_name;
```
---
## 💰 Cost Optimization
### Current Setup (Recommended)
- **Tier:** Burstable (B1ms)
- **vCores:** 1
- **RAM:** 2GB
- **Storage:** 32GB
- **Cost:** ~€20-30/month
### If You Need More Performance
- **Upgrade to:** Standard_B2s (2 vCores, 4GB RAM) - ~€40-50/month
- **Or:** Standard_B1ms with more storage if needed
### Cost Savings Tips
1. **Use Burstable tier** - Perfect for 20 users
2. **Start with 32GB storage** - Can scale up later
3. **Disable high availability** - Not needed for small teams
4. **Use same region** - Reduces latency and costs
---
## 🛠️ Troubleshooting
### Connection Refused
**Problem:** Can't connect to database
**Solutions:**
1. Check firewall rules: `az postgres flexible-server firewall-rule list --resource-group $RESOURCE_GROUP --name $SERVER_NAME`
2. Verify SSL is enabled: `DATABASE_SSL=true`
3. Check credentials in Key Vault
### Authentication Failed
**Problem:** Wrong username/password
**Solutions:**
1. Verify admin user: `az postgres flexible-server show --resource-group $RESOURCE_GROUP --name $SERVER_NAME --query administratorLogin`
2. Reset password if needed: `az postgres flexible-server update --resource-group $RESOURCE_GROUP --name $SERVER_NAME --admin-password "new-password"`
### SSL Required Error
**Problem:** "SSL connection required"
**Solution:** Add `DATABASE_SSL=true` or `?sslmode=require` to connection string
---
## 📚 Related Documentation
- **`docs/AZURE-APP-SERVICE-DEPLOYMENT.md`** - Complete App Service deployment
- **`docs/DATABASE-RECOMMENDATION.md`** - Database comparison and recommendations
- **`docs/LOCAL-DEVELOPMENT-SETUP.md`** - Local PostgreSQL setup
---
## ✅ Checklist
- [ ] PostgreSQL Flexible Server created
- [ ] Database created (cmdb_insight)
- [ ] Firewall rules configured
- [ ] Credentials stored in Key Vault
- [ ] App Service app settings configured
- [ ] SSL enabled (`DATABASE_SSL=true`)
- [ ] Connection tested
- [ ] Monitoring configured
---
**🎉 Your PostgreSQL database is ready for production!**

View File

@@ -135,8 +135,43 @@
---
## 📞 Contact
## 📋 Deployment Stappen Overzicht
Voor vragen over de applicatie zelf, zie:
- `PRODUCTION-DEPLOYMENT.md` - Volledige deployment guide
- `AZURE-DEPLOYMENT-SUMMARY.md` - Uitgebreide Azure specifieke info
### 1. Azure Resources Aanmaken
```bash
# Resource Group
az group create --name rg-cmdb-gui --location westeurope
# App Service Plan (Basic B1)
az appservice plan create --name plan-cmdb-gui --resource-group rg-cmdb-gui --sku B1 --is-linux
# Web Apps
az webapp create --name cmdb-backend --resource-group rg-cmdb-gui --plan plan-cmdb-gui
az webapp create --name cmdb-frontend --resource-group rg-cmdb-gui --plan plan-cmdb-gui
# Key Vault
az keyvault create --name kv-cmdb-gui --resource-group rg-cmdb-gui --location westeurope
```
### 2. Database Setup
- **PostgreSQL (Aanbevolen)**: Zie `docs/AZURE-POSTGRESQL-SETUP.md`
- **SQLite**: Geen extra setup nodig (database in container)
### 3. Configuration
- Environment variabelen via App Service Configuration
- Secrets via Key Vault references
- SSL certificaat via App Service (automatisch voor *.azurewebsites.net)
### 4. CI/CD
- Azure DevOps Pipelines: Zie `docs/AZURE-PIPELINES.md`
- Automatische deployment bij push naar main branch
---
## 📞 Contact & Documentatie
Voor volledige deployment guides, zie:
- `docs/AZURE-APP-SERVICE-DEPLOYMENT.md` - Complete stap-voor-stap guide
- `docs/AZURE-POSTGRESQL-SETUP.md` - Database setup
- `docs/AZURE-PIPELINES.md` - CI/CD pipelines
- `docs/PRODUCTION-DEPLOYMENT.md` - Production best practices

View File

@@ -27,23 +27,22 @@ az postgres flexible-server create \
#### 1.2 Database Aanmaken
**Note:** The application uses a single database for all data. All tables (CMDB cache, classification history, and session state) are stored in the same database.
```sql
-- Connect to PostgreSQL
CREATE DATABASE cmdb_cache;
CREATE DATABASE cmdb_classifications;
CREATE DATABASE cmdb_insight;
-- Create user (optional, can use admin user)
CREATE USER cmdb_user WITH PASSWORD 'secure_password';
GRANT ALL PRIVILEGES ON DATABASE cmdb_cache TO cmdb_user;
GRANT ALL PRIVILEGES ON DATABASE cmdb_classifications TO cmdb_user;
GRANT ALL PRIVILEGES ON DATABASE cmdb_insight TO cmdb_user;
```
#### 1.3 Connection String
```env
DATABASE_TYPE=postgres
DATABASE_URL=postgresql://cmdb_user:secure_password@<server-name>.postgres.database.azure.com:5432/cmdb_cache?sslmode=require
CLASSIFICATIONS_DATABASE_URL=postgresql://cmdb_user:secure_password@<server-name>.postgres.database.azure.com:5432/cmdb_classifications?sslmode=require
DATABASE_URL=postgresql://cmdb_user:secure_password@<server-name>.postgres.database.azure.com:5432/cmdb_insight?sslmode=require
```
### Option B: SQLite (Voor development/testing)
@@ -347,8 +346,8 @@ docker-compose logs -f backend
### 10.2 Database Monitoring
```sql
-- Database size
SELECT pg_database_size('cmdb_cache');
-- Database size (single database contains all data)
SELECT pg_database_size('cmdb_insight');
-- Table sizes
SELECT

View File

@@ -1,142 +0,0 @@
# Database Access Guide
This guide shows you how to easily access and view records in the PostgreSQL database.
## Quick Access
### Option 1: Using the Script (Easiest)
```bash
# Connect using psql
./scripts/open-database.sh psql
# Or via Docker
./scripts/open-database.sh docker
# Or get connection string for GUI tools
./scripts/open-database.sh url
```
### Option 2: Direct psql Command
```bash
# If PostgreSQL is running locally
PGPASSWORD=cmdb-dev psql -h localhost -p 5432 -U cmdb -d cmdb
```
### Option 3: Via Docker
```bash
# Connect to PostgreSQL container
docker exec -it $(docker ps | grep postgres | awk '{print $1}') psql -U cmdb -d cmdb
```
## Connection Details
From `docker-compose.yml`:
- **Host**: localhost (or `postgres` if connecting from Docker network)
- **Port**: 5432
- **Database**: cmdb
- **User**: cmdb
- **Password**: cmdb-dev
**Connection String:**
```
postgresql://cmdb:cmdb-dev@localhost:5432/cmdb
```
## GUI Tools
### pgAdmin (Free, Web-based)
1. Download from: https://www.pgadmin.org/download/
2. Add new server with connection details above
3. Browse tables and run queries
### DBeaver (Free, Cross-platform)
1. Download from: https://dbeaver.io/download/
2. Create new PostgreSQL connection
3. Use connection string or individual fields
### TablePlus (macOS, Paid but has free tier)
1. Download from: https://tableplus.com/
2. Create new PostgreSQL connection
3. Enter connection details
### DataGrip (JetBrains, Paid)
1. Part of JetBrains IDEs or standalone
2. Create new PostgreSQL data source
3. Use connection string
## Useful SQL Commands
Once connected, try these commands:
```sql
-- List all tables
\dt
-- Describe a table structure
\d users
\d classifications
\d cache_objects
-- View all users
SELECT * FROM users;
-- View classifications
SELECT * FROM classifications ORDER BY created_at DESC LIMIT 10;
-- View cached objects
SELECT object_key, object_type, updated_at FROM cache_objects ORDER BY updated_at DESC LIMIT 20;
-- Count records per table
SELECT
'users' as table_name, COUNT(*) as count FROM users
UNION ALL
SELECT
'classifications', COUNT(*) FROM classifications
UNION ALL
SELECT
'cache_objects', COUNT(*) FROM cache_objects;
-- View user settings
SELECT u.username, u.email, us.ai_provider, us.ai_enabled
FROM users u
LEFT JOIN user_settings us ON u.id = us.user_id;
```
## Environment Variables
If you're using environment variables instead of Docker:
```bash
# Check your .env file for:
DATABASE_URL=postgresql://cmdb:cmdb-dev@localhost:5432/cmdb
# or
DATABASE_TYPE=postgres
DATABASE_HOST=localhost
DATABASE_PORT=5432
DATABASE_NAME=cmdb
DATABASE_USER=cmdb
DATABASE_PASSWORD=cmdb-dev
```
## Troubleshooting
### Database not running
```bash
# Start PostgreSQL container
docker-compose up -d postgres
# Check if it's running
docker ps | grep postgres
```
### Connection refused
- Make sure PostgreSQL container is running
- Check if port 5432 is already in use
- Verify connection details match docker-compose.yml
### Permission denied
- Verify username and password match docker-compose.yml
- Check if user has access to the database

View File

@@ -1,211 +0,0 @@
# Database-Driven Schema Implementation Plan
## Overzicht
Dit plan beschrijft de migratie van statische schema files naar een volledig database-driven aanpak waarbij:
1. Schema wordt dynamisch opgehaald van Jira Assets API
2. Schema wordt opgeslagen in PostgreSQL database
3. Datamodel en datavalidatie pagina's worden opgebouwd vanuit database
4. TypeScript types worden gegenereerd vanuit database (handmatig)
## Architectuur
```
┌─────────────────┐
│ Jira Assets API │ (Authoritative Source)
└────────┬────────┘
│ Schema Discovery
┌─────────────────┐
│ Schema Discovery│ (Jira API → Database)
│ Service │
└────────┬────────┘
│ Store in DB
┌─────────────────┐
│ PostgreSQL DB │ (Cached Schema)
│ - object_types │
│ - attributes │
└────────┬────────┘
│ Serve to Frontend
┌─────────────────┐
│ API Endpoints │ (/api/schema)
└────────┬────────┘
│ Code Generation
┌─────────────────┐
│ TypeScript Types │ (Handmatig gegenereerd)
└─────────────────┘
```
## Database Schema
De database heeft al de benodigde tabellen in `normalized-schema.ts`:
- `object_types` - Object type definities
- `attributes` - Attribute definities per object type
**Geen extra tabellen nodig!** We gebruiken de bestaande structuur.
## Implementatie Stappen
### Stap 1: Schema Discovery Service Aanpassen ✅
**Huidige situatie:**
- `schemaDiscoveryService` haalt data uit statische `OBJECT_TYPES` file
**Nieuwe situatie:**
- `schemaDiscoveryService` haalt schema direct van Jira Assets API
- Gebruikt `JiraSchemaFetcher` logica (uit `generate-schema.ts`)
- Slaat schema op in database tabellen
**Bestanden:**
- `backend/src/services/schemaDiscoveryService.ts` - Aanpassen om API calls te maken
### Stap 2: Schema Cache Service ✅
**Nieuwe service:**
- In-memory cache met 5 minuten TTL
- Cache invalidation bij schema updates
- Snelle response voor `/api/schema` endpoint
**Bestanden:**
- `backend/src/services/schemaCacheService.ts` - Nieuw bestand
### Stap 3: Schema API Endpoint Migreren ✅
**Huidige situatie:**
- `/api/schema` endpoint leest van statische `OBJECT_TYPES` file
**Nieuwe situatie:**
- `/api/schema` endpoint leest van database (via cache)
- Gebruikt `schemaCacheService` voor performance
**Bestanden:**
- `backend/src/routes/schema.ts` - Aanpassen om database te gebruiken
### Stap 4: Code Generation Script ✅
**Nieuwe functionaliteit:**
- Script dat database schema → TypeScript types genereert
- Handmatig uitvoerbaar via CLI command
- Genereert: `jira-schema.ts`, `jira-types.ts`
**Bestanden:**
- `backend/scripts/generate-types-from-db.ts` - Nieuw bestand
- `package.json` - NPM script toevoegen
### Stap 5: Datavalidatie Pagina Migreren ✅
**Huidige situatie:**
- Gebruikt mogelijk statische schema files
**Nieuwe situatie:**
- Volledig database-driven
- Gebruikt `schemaDiscoveryService` voor schema data
**Bestanden:**
- `backend/src/routes/dataValidation.ts` - Controleren en aanpassen indien nodig
### Stap 6: Database Indexes ✅
**Toevoegen:**
- Indexes voor snelle schema queries
- Performance optimalisatie
**Bestanden:**
- `backend/src/services/database/normalized-schema.ts` - Indexes toevoegen
### Stap 7: CLI Command voor Schema Discovery ✅
**Nieuwe functionaliteit:**
- Handmatige trigger voor schema discovery
- Bijvoorbeeld: `npm run discover-schema`
**Bestanden:**
- `backend/scripts/discover-schema.ts` - Nieuw bestand
- `package.json` - NPM script toevoegen
## API Endpoints
### GET /api/schema
**Huidig:** Leest van statische files
**Nieuw:** Leest van database (via cache)
**Response format:** Ongewijzigd (backward compatible)
### POST /api/schema/discover (Nieuw)
**Functionaliteit:** Handmatige trigger voor schema discovery
**Gebruik:** Admin endpoint voor handmatige schema refresh
## Code Generation
### Script: `generate-types-from-db.ts`
**Input:** Database schema (object_types, attributes)
**Output:**
- `backend/src/generated/jira-schema.ts`
- `backend/src/generated/jira-types.ts`
**Uitvoering:** Handmatig via `npm run generate-types`
## Migratie Strategie
1. **Parallelle implementatie:** Nieuwe code naast oude code
2. **Feature flag:** Optioneel om tussen oude/nieuwe aanpak te switchen
3. **Testing:** Uitgebreide tests voor schema discovery
4. **Handmatige migratie:** Breaking changes worden handmatig opgelost
## Performance Overwegingen
- **In-memory cache:** 5 minuten TTL voor schema endpoints
- **Database indexes:** Voor snelle queries op object_types en attributes
- **Lazy loading:** Schema wordt alleen geladen wanneer nodig
## Breaking Changes
- **Geen fallback:** Als database schema niet beschikbaar is, werkt niets
- **TypeScript errors:** Bij schema wijzigingen ontstaan compile errors
- **Handmatige fix:** Developers lossen errors handmatig op
## Testing Checklist
- [ ] Schema discovery van Jira API werkt
- [ ] Schema wordt correct opgeslagen in database
- [ ] `/api/schema` endpoint retourneert database data
- [ ] Cache werkt correct (TTL, invalidation)
- [ ] Code generation script werkt
- [ ] Datamodel pagina toont database data
- [ ] Datavalidatie pagina toont database data
- [ ] Handmatige schema discovery trigger werkt
## Rollout Plan
1. **Fase 1:** Schema discovery service aanpassen (API calls)
2. **Fase 2:** Schema cache service implementeren
3. **Fase 3:** API endpoints migreren
4. **Fase 4:** Code generation script maken
5. **Fase 5:** Testing en validatie
6. **Fase 6:** Oude statische files verwijderen (na handmatige migratie)
## Risico's en Mitigatie
| Risico | Impact | Mitigatie |
|--------|--------|-----------|
| Jira API niet beschikbaar | Hoog | Geen fallback - downtime acceptabel |
| Schema wijzigingen | Medium | TypeScript errors - handmatig oplossen |
| Performance issues | Laag | Cache + indexes |
| Data migratie fouten | Medium | Uitgebreide tests |
## Success Criteria
✅ Schema wordt dynamisch opgehaald van Jira API
✅ Schema wordt opgeslagen in database
✅ Datamodel pagina toont database data
✅ Datavalidatie pagina toont database data
✅ Code generation script werkt
✅ Handmatige schema discovery werkt
✅ Performance is acceptabel (< 1s voor schema endpoint)

View File

@@ -1,197 +0,0 @@
# Database Normalisatie Voorstel
## Huidige Probleem
De huidige database structuur heeft duplicatie en is niet goed genormaliseerd:
1. **`object_types`** tabel bevat:
- `jira_type_id`, `type_name`, `display_name`, `description`, `sync_priority`, `object_count`
2. **`configured_object_types`** tabel bevat:
- `schema_id`, `schema_name`, `object_type_id`, `object_type_name`, `display_name`, `description`, `object_count`, `enabled`
**Problemen:**
- Duplicatie van `display_name`, `description`, `object_count`
- Geen expliciete relatie tussen schemas en object types
- `schema_name` wordt opgeslagen in elke object type row (niet genormaliseerd)
- Verwarring tussen `object_type_name` en `type_name`
- Twee tabellen die dezelfde informatie bevatten
## Voorgestelde Genormaliseerde Structuur
### 1. `schemas` Tabel
```sql
CREATE TABLE IF NOT EXISTS schemas (
id SERIAL PRIMARY KEY, -- Auto-increment PK
jira_schema_id TEXT NOT NULL UNIQUE, -- Jira schema ID (bijv. "6", "8")
name TEXT NOT NULL, -- Schema naam (bijv. "Application Management")
description TEXT, -- Optionele beschrijving
discovered_at TIMESTAMP NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP NOT NULL DEFAULT NOW()
);
```
**Doel:** Centrale opslag van alle Jira Assets schemas.
### 2. `object_types` Tabel (Aangepast)
```sql
CREATE TABLE IF NOT EXISTS object_types (
id SERIAL PRIMARY KEY, -- Auto-increment PK
schema_id INTEGER NOT NULL REFERENCES schemas(id) ON DELETE CASCADE,
jira_type_id INTEGER NOT NULL, -- Jira object type ID
type_name TEXT NOT NULL UNIQUE, -- PascalCase type name (bijv. "ApplicationComponent")
display_name TEXT NOT NULL, -- Original Jira name (bijv. "Application Component")
description TEXT, -- Optionele beschrijving
sync_priority INTEGER DEFAULT 0, -- Sync prioriteit
object_count INTEGER DEFAULT 0, -- Aantal objecten in Jira
enabled BOOLEAN NOT NULL DEFAULT FALSE, -- KEY CHANGE: enabled flag hier!
discovered_at TIMESTAMP NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP NOT NULL DEFAULT NOW(),
UNIQUE(schema_id, jira_type_id) -- Een object type kan maar 1x per schema voorkomen
);
```
**Doel:** Alle object types met hun schema relatie en enabled status.
### 3. `attributes` Tabel (Ongewijzigd)
```sql
CREATE TABLE IF NOT EXISTS attributes (
id SERIAL PRIMARY KEY,
jira_attr_id INTEGER NOT NULL,
object_type_name TEXT NOT NULL REFERENCES object_types(type_name) ON DELETE CASCADE,
-- ... rest blijft hetzelfde
);
```
## Voordelen van Genormaliseerde Structuur
1. **Geen Duplicatie:**
- Schema informatie staat maar 1x in `schemas` tabel
- Object type informatie staat maar 1x in `object_types` tabel
- `enabled` flag staat direct bij object type
2. **Duidelijke Relaties:**
- Foreign key `schema_id` maakt relatie expliciet
- Database constraints zorgen voor data integriteit
3. **Eenvoudigere Queries:**
```sql
-- Alle enabled object types met hun schema
SELECT ot.*, s.name as schema_name
FROM object_types ot
JOIN schemas s ON ot.schema_id = s.id
WHERE ot.enabled = TRUE;
```
4. **Minder Verwarring:**
- Geen `object_type_name` vs `type_name` meer
- Geen `configured_object_types` vs `object_types` meer
- Eén bron van waarheid
5. **Eenvoudigere Migratie:**
- `configured_object_types` kan worden verwijderd
- Data kan worden gemigreerd naar nieuwe structuur
## Migratie Plan
1. **Nieuwe Tabellen Aanmaken:**
- `schemas` tabel
- `object_types` tabel aanpassen (toevoegen `schema_id`, `enabled`)
2. **Data Migreren:**
- Unieke schemas uit `configured_object_types` naar `schemas`
- Object types uit `configured_object_types` naar `object_types` met juiste `schema_id` FK
- `enabled` flag overnemen
3. **Foreign Keys Aanpassen:**
- `attributes.object_type_name` blijft verwijzen naar `object_types.type_name`
- `objects.object_type_name` blijft verwijzen naar `object_types.type_name`
4. **Code Aanpassen:**
- `schemaConfigurationService` aanpassen voor nieuwe structuur
- `schemaDiscoveryService` aanpassen voor nieuwe structuur
- `schemaCacheService` aanpassen voor JOIN met `schemas`
5. **Oude Tabel Verwijderen:**
- `configured_object_types` tabel verwijderen na migratie
## Impact op Bestaande Code
### Services die aangepast moeten worden:
1. **`schemaConfigurationService.ts`:**
- `discoverAndStoreSchemasAndObjectTypes()` - eerst schemas opslaan, dan object types
- `getConfiguredObjectTypes()` - JOIN met schemas
- `setObjectTypeEnabled()` - direct op object_types.enabled
- `getEnabledObjectTypes()` - WHERE enabled = TRUE
2. **`schemaDiscoveryService.ts`:**
- Moet ook schemas en object types in nieuwe structuur opslaan
- Moet `enabled` flag respecteren
3. **`schemaCacheService.ts`:**
- `fetchFromDatabase()` - JOIN met schemas voor schema naam
- Filter op `object_types.enabled = TRUE`
4. **`syncEngine.ts`:**
- Gebruikt al `getEnabledObjectTypes()` - blijft werken na aanpassing service
## SQL Migratie Script
```sql
-- Stap 1: Maak schemas tabel
CREATE TABLE IF NOT EXISTS schemas (
id SERIAL PRIMARY KEY,
jira_schema_id TEXT NOT NULL UNIQUE,
name TEXT NOT NULL,
description TEXT,
discovered_at TIMESTAMP NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP NOT NULL DEFAULT NOW()
);
-- Stap 2: Voeg schema_id en enabled toe aan object_types
ALTER TABLE object_types
ADD COLUMN IF NOT EXISTS schema_id INTEGER REFERENCES schemas(id) ON DELETE CASCADE,
ADD COLUMN IF NOT EXISTS enabled BOOLEAN NOT NULL DEFAULT FALSE;
-- Stap 3: Migreer data
-- Eerst: unieke schemas
INSERT INTO schemas (jira_schema_id, name, description, discovered_at, updated_at)
SELECT DISTINCT
schema_id as jira_schema_id,
schema_name as name,
NULL as description,
MIN(discovered_at) as discovered_at,
MAX(updated_at) as updated_at
FROM configured_object_types
GROUP BY schema_id, schema_name
ON CONFLICT(jira_schema_id) DO NOTHING;
-- Dan: object types met schema_id FK
UPDATE object_types ot
SET
schema_id = s.id,
enabled = COALESCE(
(SELECT enabled FROM configured_object_types cot
WHERE cot.object_type_id = ot.jira_type_id
AND cot.schema_id = s.jira_schema_id
LIMIT 1),
FALSE
)
FROM schemas s
WHERE EXISTS (
SELECT 1 FROM configured_object_types cot
WHERE cot.object_type_id = ot.jira_type_id
AND cot.schema_id = s.jira_schema_id
);
-- Stap 4: Verwijder oude tabel (na verificatie)
-- DROP TABLE IF EXISTS configured_object_types;
```
## Conclusie
De genormaliseerde structuur is veel cleaner, elimineert duplicatie, en maakt queries eenvoudiger. De `enabled` flag staat nu direct bij het object type, wat logischer is.

View File

@@ -3,8 +3,9 @@
## Huidige Situatie
De applicatie gebruikt momenteel **SQLite** via `better-sqlite3`:
- **cmdb-cache.db**: ~20MB - CMDB object cache
- **classifications.db**: Classification history
- **cmdb-cache.db**: ~20MB - Alle data (CMDB object cache, classification history, session state)
**Note:** All data (cache, classifications, session state) is stored in a single database file.
## Aanbeveling: PostgreSQL

View File

@@ -129,7 +129,7 @@ If you want to clear data but keep the database structure:
```bash
# Connect to database
docker-compose exec postgres psql -U cmdb -d cmdb_cache
docker-compose exec postgres psql -U cmdb -d cmdb_insight
# Clear all data (keeps schema)
TRUNCATE TABLE attribute_values CASCADE;
@@ -175,7 +175,7 @@ EOF
- ✅ All relations (`object_relations` table)
- ❌ Schema cache (kept for faster schema discovery)
- ❌ Schema mappings (kept for configuration)
- ❌ User data and classifications (separate database)
- ❌ User data and classifications (stored in same database, but not cleared by cache clear)
### Cleared by Volume Reset:
- ✅ Everything above
@@ -189,7 +189,7 @@ After reset, verify the database is empty and ready for rebuild:
```bash
# Check object counts (should be 0)
docker-compose exec postgres psql -U cmdb -d cmdb_cache -c "
docker-compose exec postgres psql -U cmdb -d cmdb_insight -c "
SELECT
(SELECT COUNT(*) FROM objects) as objects,
(SELECT COUNT(*) FROM attribute_values) as attributes,

View File

@@ -1,635 +0,0 @@
# Deployment Advies - CMDB Insight 🎯
**Datum:** {{ vandaag }}
**Aanbeveling:** Azure App Service (Basic Tier)
**Geschatte kosten:** ~€20-25/maand
---
## 📊 Analyse van Jouw Situatie
### Jouw Requirements:
-**Managed service** (geen serverbeheer) - **Jouw voorkeur**
-**Interne productie** (niet bedrijfskritisch)
-**20 gebruikers** (kleine team)
-**Downtime acceptabel** (kan zelfs 's avonds/weekend uit)
-**Budget geen probleem** (~€20-25/maand is prima)
-**Monitoring via Elastic stack** (kan geïntegreerd worden)
-**NEN 7510 compliance** (vereist)
-**Updates:** Initieel dagelijks, daarna wekelijks/maandelijks
### Waarom Azure App Service Perfect Past:
1. **Managed Service**
- Geen serverbeheer, SSH, Linux configuratie
- Azure beheert alles (updates, security patches, scaling)
- Perfect voor jouw voorkeur: "liever niet als het niet hoeft"
2. **Eenvoudig & Snel**
- Setup in ~15 minuten
- Automatische SSL/TLS certificaten
- Integratie met Azure DevOps (je pipeline werkt al!)
3. **Kosten-Effectief**
- Basic B1 plan: ~€15-25/maand
- Voldoende voor 20 gebruikers
- Geen verborgen kosten
4. **Flexibel**
- Deployment slots voor testen (staging → productie)
- Eenvoudige rollback
- Integratie met Azure Key Vault voor secrets
5. **Monitoring & Compliance**
- Integratie met Azure Monitor → kan naar Elastic stack
- Logging en audit trails (NEN 7510 compliance)
- Health checks ingebouwd
---
## 🚀 Aanbevolen Architectuur
```
┌─────────────────────────────────────────┐
│ Azure App Service Plan (B1) │
│ │
│ ┌──────────────┐ ┌──────────────┐ │
│ │ Frontend │ │ Backend │ │
│ │ Web App │ │ Web App │ │
│ │ (Container) │ │ (Container) │ │
│ └──────────────┘ └──────┬───────┘ │
└──────────────────────────┼─────────────┘
┌──────────────┼──────────────┐
│ │ │
┌───────▼──────┐ ┌─────▼──────┐ ┌────▼─────┐
│ Azure Key │ │ Azure │ │ Elastic │
│ Vault │ │ Monitor │ │ Stack │
│ (Secrets) │ │ (Logs) │ │ (Export) │
└──────────────┘ └────────────┘ └──────────┘
```
**Componenten:**
- **App Service Plan B1**: 1 vCPU, 1.75GB RAM (voldoende voor 20 gebruikers)
- **2 Web Apps**: Frontend + Backend (delen dezelfde plan = kostenbesparend)
- **Azure Key Vault**: Voor secrets (Jira credentials, session secrets)
- **Azure Monitor**: Logging → kan geëxporteerd worden naar Elastic stack
- **Azure Storage**: Voor SQLite database (als je SQLite blijft gebruiken)
**Kosten Breakdown:**
- App Service Plan B1: ~€15-20/maand
- Azure Key Vault: ~€1-2/maand
- Azure Storage (SQLite): ~€1-2/maand
- **Totaal: ~€17-24/maand**
---
## 📋 Stap-voor-Stap Deployment Plan
### Fase 1: Basis Setup (15 minuten)
#### Stap 1.1: Resource Group Aanmaken
```bash
az group create \
--name rg-cmdb-gui-prod \
--location westeurope
```
#### Stap 1.2: App Service Plan Aanmaken
```bash
az appservice plan create \
--name plan-cmdb-gui-prod \
--resource-group rg-cmdb-gui-prod \
--sku B1 \
--is-linux
```
**Waarom B1?**
- 1 vCPU, 1.75GB RAM
- Voldoende voor 20 gebruikers
- Goede prijs/prestatie verhouding
#### Stap 1.3: Web Apps Aanmaken
```bash
# Backend Web App
az webapp create \
--name cmdb-backend-prod \
--resource-group rg-cmdb-gui-prod \
--plan plan-cmdb-gui-prod \
--deployment-container-image-name zdlas.azurecr.io/cmdb-insight/backend:latest
# Frontend Web App
az webapp create \
--name cmdb-frontend-prod \
--resource-group rg-cmdb-gui-prod \
--plan plan-cmdb-gui-prod \
--deployment-container-image-name zdlas.azurecr.io/cmdb-insight/frontend:latest
```
---
### Fase 2: ACR Authentication (5 minuten)
#### Stap 2.1: Enable Managed Identity
```bash
# Backend
az webapp identity assign \
--name cmdb-backend-prod \
--resource-group rg-cmdb-gui-prod
# Frontend
az webapp identity assign \
--name cmdb-frontend-prod \
--resource-group rg-cmdb-gui-prod
```
#### Stap 2.2: Grant ACR Pull Permissions
```bash
# Get managed identity principal ID
BACKEND_PRINCIPAL_ID=$(az webapp identity show \
--name cmdb-backend-prod \
--resource-group rg-cmdb-gui-prod \
--query principalId -o tsv)
FRONTEND_PRINCIPAL_ID=$(az webapp identity show \
--name cmdb-frontend-prod \
--resource-group rg-cmdb-gui-prod \
--query principalId -o tsv)
# Get ACR resource ID
ACR_ID=$(az acr show \
--name zdlas \
--resource-group <acr-resource-group> \
--query id -o tsv)
# Grant AcrPull role
az role assignment create \
--assignee $BACKEND_PRINCIPAL_ID \
--role AcrPull \
--scope $ACR_ID
az role assignment create \
--assignee $FRONTEND_PRINCIPAL_ID \
--role AcrPull \
--scope $ACR_ID
```
#### Stap 2.3: Configure Container Settings
```bash
# Backend
az webapp config container set \
--name cmdb-backend-prod \
--resource-group rg-cmdb-gui-prod \
--docker-custom-image-name zdlas.azurecr.io/cmdb-insight/backend:latest \
--docker-registry-server-url https://zdlas.azurecr.io
# Frontend
az webapp config container set \
--name cmdb-frontend-prod \
--resource-group rg-cmdb-gui-prod \
--docker-custom-image-name zdlas.azurecr.io/cmdb-insight/frontend:latest \
--docker-registry-server-url https://zdlas.azurecr.io
```
---
### Fase 3: Environment Variabelen (10 minuten)
#### Stap 3.1: Azure Key Vault Aanmaken
```bash
az keyvault create \
--name kv-cmdb-gui-prod \
--resource-group rg-cmdb-gui-prod \
--location westeurope \
--sku standard
```
#### Stap 3.2: Secrets Toevoegen aan Key Vault
```bash
# Jira Personal Access Token (of OAuth credentials)
az keyvault secret set \
--vault-name kv-cmdb-gui-prod \
--name JiraPat \
--value "your-jira-pat-token"
# Session Secret
az keyvault secret set \
--vault-name kv-cmdb-gui-prod \
--name SessionSecret \
--value "$(openssl rand -hex 32)"
# Jira Schema ID
az keyvault secret set \
--vault-name kv-cmdb-gui-prod \
--name JiraSchemaId \
--value "your-schema-id"
# Anthropic API Key (optioneel)
az keyvault secret set \
--vault-name kv-cmdb-gui-prod \
--name AnthropicApiKey \
--value "your-anthropic-key"
```
#### Stap 3.3: Grant Web Apps Access tot Key Vault
```bash
# Backend
az keyvault set-policy \
--name kv-cmdb-gui-prod \
--object-id $BACKEND_PRINCIPAL_ID \
--secret-permissions get list
# Frontend (als nodig)
az keyvault set-policy \
--name kv-cmdb-gui-prod \
--object-id $FRONTEND_PRINCIPAL_ID \
--secret-permissions get list
```
#### Stap 3.4: Configure App Settings met Key Vault References
```bash
# Backend App Settings
az webapp config appsettings set \
--name cmdb-backend-prod \
--resource-group rg-cmdb-gui-prod \
--settings \
NODE_ENV=production \
PORT=3001 \
JIRA_BASE_URL=https://jira.zuyderland.nl \
JIRA_SCHEMA_ID="@Microsoft.KeyVault(SecretUri=https://kv-cmdb-gui-prod.vault.azure.net/secrets/JiraSchemaId/)" \
JIRA_PAT="@Microsoft.KeyVault(SecretUri=https://kv-cmdb-gui-prod.vault.azure.net/secrets/JiraPat/)" \
SESSION_SECRET="@Microsoft.KeyVault(SecretUri=https://kv-cmdb-gui-prod.vault.azure.net/secrets/SessionSecret/)" \
ANTHROPIC_API_KEY="@Microsoft.KeyVault(SecretUri=https://kv-cmdb-gui-prod.vault.azure.net/secrets/AnthropicApiKey/)" \
FRONTEND_URL=https://cmdb-frontend-prod.azurewebsites.net
# Frontend App Settings
az webapp config appsettings set \
--name cmdb-frontend-prod \
--resource-group rg-cmdb-gui-prod \
--settings \
VITE_API_URL=https://cmdb-backend-prod.azurewebsites.net/api
```
---
### Fase 4: SSL/TLS & Domain (10 minuten)
#### Stap 4.1: Gratis SSL via App Service
App Service heeft automatisch SSL voor `*.azurewebsites.net`:
- Frontend: `https://cmdb-frontend-prod.azurewebsites.net`
- Backend: `https://cmdb-backend-prod.azurewebsites.net`
**Geen configuratie nodig!** SSL werkt automatisch.
#### Stap 4.2: Custom Domain (Optioneel - Later)
Als je later een custom domain wilt (bijv. `cmdb.zuyderland.nl`):
```bash
# Add custom domain
az webapp config hostname add \
--webapp-name cmdb-frontend-prod \
--resource-group rg-cmdb-gui-prod \
--hostname cmdb.zuyderland.nl
# Bind SSL certificate (App Service Certificate of Let's Encrypt)
az webapp config ssl bind \
--name cmdb-frontend-prod \
--resource-group rg-cmdb-gui-prod \
--certificate-thumbprint <thumbprint> \
--ssl-type SNI
```
---
### Fase 5: Monitoring & Logging (15 minuten)
#### Stap 5.1: Enable Application Insights
```bash
# Create Application Insights
az monitor app-insights component create \
--app cmdb-gui-prod \
--location westeurope \
--resource-group rg-cmdb-gui-prod \
--application-type web
# Get Instrumentation Key
INSTRUMENTATION_KEY=$(az monitor app-insights component show \
--app cmdb-gui-prod \
--resource-group rg-cmdb-gui-prod \
--query instrumentationKey -o tsv)
# Configure App Settings
az webapp config appsettings set \
--name cmdb-backend-prod \
--resource-group rg-cmdb-gui-prod \
--settings \
APPINSIGHTS_INSTRUMENTATIONKEY=$INSTRUMENTATION_KEY \
APPLICATIONINSIGHTS_CONNECTION_STRING="InstrumentationKey=$INSTRUMENTATION_KEY"
```
#### Stap 5.2: Export naar Elastic Stack (Later)
Azure Monitor kan geëxporteerd worden naar Elastic stack via:
- **Azure Monitor → Log Analytics Workspace → Export naar Elastic**
- Of gebruik **Azure Function** om logs te streamen naar Elastic
**Zie:** `docs/ELASTIC-STACK-INTEGRATION.md` (te maken als nodig)
---
### Fase 6: Test & Start (5 minuten)
#### Stap 6.1: Start Web Apps
```bash
az webapp start --name cmdb-backend-prod --resource-group rg-cmdb-gui-prod
az webapp start --name cmdb-frontend-prod --resource-group rg-cmdb-gui-prod
```
#### Stap 6.2: Test Health Endpoints
```bash
# Backend health check
curl https://cmdb-backend-prod.azurewebsites.net/api/health
# Frontend
curl https://cmdb-frontend-prod.azurewebsites.net
```
#### Stap 6.3: Check Logs
```bash
# Backend logs
az webapp log tail --name cmdb-backend-prod --resource-group rg-cmdb-gui-prod
# Frontend logs
az webapp log tail --name cmdb-frontend-prod --resource-group rg-cmdb-gui-prod
```
---
## 🔒 NEN 7510 Compliance
### Wat App Service Biedt:
1. **Encryption**
- ✅ Data at rest: Azure Storage encryption
- ✅ Data in transit: TLS 1.2+ (automatisch)
- ✅ Secrets: Azure Key Vault (encrypted)
2. **Access Control**
- ✅ Azure AD integratie (RBAC)
- ✅ Managed Identity (geen credentials in code)
- ✅ Network isolation (optioneel via VNet integration)
3. **Logging & Audit**
- ✅ Application Insights (alle API calls)
- ✅ Azure Monitor (resource logs)
- ✅ Activity Logs (wie deed wat)
- ✅ Export naar Elastic stack mogelijk
4. **Backup & Recovery**
- ✅ App Service backups (optioneel)
- ✅ Key Vault soft delete (recovery)
- ⚠️ **Opmerking**: Jouw data wordt gesynchroniseerd vanuit Jira, dus backup is minder kritisch
### Compliance Checklist:
- [ ] Secrets in Azure Key Vault (niet in code)
- [ ] HTTPS only (automatisch via App Service)
- [ ] Logging ingeschakeld (Application Insights)
- [ ] Access control (Azure AD RBAC)
- [ ] Audit trail (Activity Logs)
- [ ] Encryption at rest (Azure Storage)
- [ ] Encryption in transit (TLS 1.2+)
**Zie:** `docs/NEN-7510-COMPLIANCE.md` (te maken als nodig)
---
## 🔐 VPN/Private Network Opties (Voor Later)
### Optie 1: App Service VNet Integration
**Wat het doet:**
- Web App verbindt met Azure Virtual Network
- Toegang tot resources in VNet (bijv. database, andere services)
- **Niet**: Maakt de app niet privé (app blijft publiek bereikbaar)
**Wanneer gebruiken:**
- Als je een database in VNet hebt
- Als je andere Azure services in VNet moet bereiken
**Setup:**
```bash
# Create VNet (als nog niet bestaat)
az network vnet create \
--name vnet-cmdb-gui \
--resource-group rg-cmdb-gui-prod \
--address-prefix 10.0.0.0/16
# Create subnet
az network vnet subnet create \
--name subnet-app-service \
--resource-group rg-cmdb-gui-prod \
--vnet-name vnet-cmdb-gui \
--address-prefix 10.0.1.0/24
# Integrate Web App with VNet
az webapp vnet-integration add \
--name cmdb-backend-prod \
--resource-group rg-cmdb-gui-prod \
--vnet vnet-cmdb-gui \
--subnet subnet-app-service
```
### Optie 2: Private Endpoint (Maakt App Privé)
**Wat het doet:**
- Maakt de Web App alleen bereikbaar via private IP
- Vereist VPN/ExpressRoute om toegang te krijgen
- **Kosten:** ~€7-10/maand per Private Endpoint
**Wanneer gebruiken:**
- Als je de app alleen via VPN wilt bereiken
- Als je geen publieke toegang wilt
**Setup:**
```bash
# Create Private Endpoint
az network private-endpoint create \
--name pe-cmdb-backend \
--resource-group rg-cmdb-gui-prod \
--vnet-name vnet-cmdb-gui \
--subnet subnet-private-endpoint \
--private-connection-resource-id /subscriptions/<sub-id>/resourceGroups/rg-cmdb-gui-prod/providers/Microsoft.Web/sites/cmdb-backend-prod \
--group-id sites \
--connection-name pe-connection-backend
```
### Optie 3: App Service Environment (ASE) - Enterprise
**Wat het doet:**
- Volledig geïsoleerde App Service omgeving
- Alleen bereikbaar via VNet
- **Kosten:** ~€1000+/maand (te duur voor jouw use case)
**Niet aanbevolen** voor jouw situatie (te duur, overkill).
---
## 🔄 Updates Deployen
### Automatische Deployment (Via Pipeline)
Je Azure DevOps pipeline bouwt al automatisch images. Voor automatische deployment:
#### Optie A: Continuous Deployment (Aanbevolen)
```bash
# Enable continuous deployment
az webapp deployment container config \
--name cmdb-backend-prod \
--resource-group rg-cmdb-gui-prod \
--enable-cd true
# Configure deployment slot (staging)
az webapp deployment slot create \
--name cmdb-backend-prod \
--resource-group rg-cmdb-gui-prod \
--slot staging
# Swap staging → production (zero-downtime)
az webapp deployment slot swap \
--name cmdb-backend-prod \
--resource-group rg-cmdb-gui-prod \
--slot staging \
--target-slot production
```
#### Optie B: Manual Deployment (Eenvoudig)
```bash
# Pull nieuwe image en restart
az webapp restart --name cmdb-backend-prod --resource-group rg-cmdb-gui-prod
az webapp restart --name cmdb-frontend-prod --resource-group rg-cmdb-gui-prod
```
**Workflow:**
1. Push code naar `main` branch
2. Pipeline bouwt nieuwe images → `zdlas.azurecr.io/.../backend:88767`
3. Images worden getagged als `latest`
4. Restart Web Apps → pull nieuwe `latest` image
---
## 📊 Monitoring Setup
### Azure Monitor → Elastic Stack Export
**Optie 1: Log Analytics Workspace Export**
```bash
# Create Log Analytics Workspace
az monitor log-analytics workspace create \
--workspace-name law-cmdb-gui-prod \
--resource-group rg-cmdb-gui-prod \
--location westeurope
# Configure App Service to send logs
az webapp log config \
--name cmdb-backend-prod \
--resource-group rg-cmdb-gui-prod \
--application-logging filesystem \
--detailed-error-messages true \
--failed-request-tracing true \
--web-server-logging filesystem
# Export to Elastic (via Azure Function of Event Hub)
# Zie: docs/ELASTIC-STACK-INTEGRATION.md
```
**Optie 2: Application Insights → Elastic**
Application Insights kan geëxporteerd worden via:
- **Continuous Export** (deprecated, maar werkt nog)
- **Azure Function** die Application Insights API gebruikt
- **Log Analytics Workspace** → Export naar Elastic
---
## ✅ Deployment Checklist
### Pre-Deployment:
- [ ] Resource Group aangemaakt
- [ ] App Service Plan aangemaakt (B1)
- [ ] Web Apps aangemaakt (backend + frontend)
- [ ] ACR authentication geconfigureerd
- [ ] Key Vault aangemaakt
- [ ] Secrets toegevoegd aan Key Vault
- [ ] App Settings geconfigureerd (met Key Vault references)
- [ ] Application Insights ingeschakeld
### Post-Deployment:
- [ ] Health checks werken
- [ ] SSL/TLS werkt (automatisch)
- [ ] Logging werkt
- [ ] Monitoring ingesteld
- [ ] Team geïnformeerd over URLs
- [ ] Documentatie bijgewerkt
---
## 🎯 Volgende Stappen
1. **Start met Fase 1-3** (Basis setup + ACR + Environment variabelen)
2. **Test de applicatie** (Fase 6)
3. **Configureer monitoring** (Fase 5)
4. **Documenteer voor team** (URLs, credentials, etc.)
---
## 📚 Gerelateerde Documentatie
- **Quick Deployment Guide**: `docs/QUICK-DEPLOYMENT-GUIDE.md`
- **Production Deployment**: `docs/PRODUCTION-DEPLOYMENT.md`
- **Azure Deployment Summary**: `docs/AZURE-DEPLOYMENT-SUMMARY.md`
---
## ❓ Vragen?
**Veelgestelde vragen:**
**Q: Moet ik PostgreSQL gebruiken of kan ik SQLite houden?**
A: SQLite is prima voor 20 gebruikers. Als je later groeit, kun je migreren naar PostgreSQL.
**Q: Hoe update ik de applicatie?**
A: Push naar `main` → Pipeline bouwt images → Restart Web Apps (of gebruik deployment slots voor zero-downtime).
**Q: Kan ik de app 's avonds/weekend uitzetten?**
A: Ja! `az webapp stop --name cmdb-backend-prod --resource-group rg-cmdb-gui-prod` (bespaart kosten).
**Q: Hoe integreer ik met Elastic stack?**
A: Exporteer Azure Monitor logs via Log Analytics Workspace → Elastic (zie Fase 5).
---
## 🎉 Success!
Je hebt nu een compleet deployment plan voor Azure App Service!
**Start met Fase 1** en laat me weten als je hulp nodig hebt bij een specifieke stap.

View File

@@ -1,324 +0,0 @@
# Deployment Next Steps - Images Gereed! 🚀
Je Docker images zijn succesvol gebouwd en gepusht naar Azure Container Registry! Hier zijn de volgende stappen voor deployment.
## ✅ Wat is al klaar:
- ✅ Azure Container Registry (ACR): `zdlas.azurecr.io`
- ✅ Docker images gebouwd en gepusht:
- `zdlas.azurecr.io/cmdb-insight/backend:latest`
- `zdlas.azurecr.io/cmdb-insight/frontend:latest`
- ✅ Azure DevOps Pipeline: Automatische builds bij push naar `main`
- ✅ Docker Compose configuratie: `docker-compose.prod.acr.yml`
---
## 🎯 Deployment Opties
Je hebt verschillende opties om de applicatie te deployen. Kies de optie die het beste past bij jouw situatie:
### Optie 1: Azure App Service (Aanbevolen voor productie) ⭐
**Voordelen:**
- Managed service (geen server management)
- Automatische scaling
- Ingebouwde SSL/TLS
- Deployment slots (zero-downtime updates)
- Integratie met Azure Key Vault
- Monitoring via Application Insights
**Geschikt voor:** Productie deployment, kleine tot middelgrote teams
**Kosten:** ~€15-25/maand (Basic B1 plan)
**Stappen:**
1. Maak App Service Plan aan
2. Maak 2 Web Apps aan (backend + frontend)
3. Configureer container deployment vanuit ACR
4. Stel environment variabelen in
5. Configureer SSL certificaat
**Zie:** `docs/AZURE-APP-SERVICE-DEPLOYMENT.md` (te maken)
---
### Optie 2: Azure Container Instances (ACI) - Eenvoudig
**Voordelen:**
- Snel op te zetten
- Pay-per-use pricing
- Geen server management
**Geschikt voor:** Test/development, kleine deployments
**Kosten:** ~€30-50/maand (2 containers)
**Stappen:**
1. Maak 2 Container Instances aan
2. Pull images vanuit ACR
3. Configureer environment variabelen
4. Stel networking in
**Zie:** `docs/AZURE-CONTAINER-INSTANCES-DEPLOYMENT.md` (te maken)
---
### Optie 3: VM met Docker Compose (Flexibel)
**Voordelen:**
- Volledige controle
- Eenvoudige deployment met Docker Compose
- Kan lokaal getest worden
**Geschikt voor:** Als je al een VM hebt, of volledige controle wilt
**Kosten:** ~€20-40/maand (Basic VM)
**Stappen:**
1. Maak Azure VM aan (Ubuntu)
2. Installeer Docker en Docker Compose
3. Login naar ACR
4. Gebruik `docker-compose.prod.acr.yml`
5. Configureer Nginx reverse proxy
**Zie:** `docs/VM-DOCKER-COMPOSE-DEPLOYMENT.md` (te maken)
---
### Optie 4: Azure Kubernetes Service (AKS) - Enterprise
**Voordelen:**
- Enterprise-grade scaling
- High availability
- Advanced networking
**Geschikt voor:** Grote deployments, enterprise requirements
**Kosten:** ~€50-100+/maand (minimaal 2 nodes)
**Niet aanbevolen voor:** Kleine teams (20 gebruikers) - overkill
---
## 🔍 Stap 1: Verifieer Images in ACR
**Controleer of de images succesvol zijn gepusht:**
```bash
# Login naar ACR
az acr login --name zdlas
# List repositories
az acr repository list --name zdlas --output table
# List tags voor backend
az acr repository show-tags --name zdlas --repository cmdb-insight/backend --output table
# List tags voor frontend
az acr repository show-tags --name zdlas --repository cmdb-insight/frontend --output table
```
**Verwachte output:**
```
REPOSITORY TAG CREATED
cmdb-insight/backend latest ...
cmdb-insight/backend 88764 ...
cmdb-insight/frontend latest ...
cmdb-insight/frontend 88764 ...
```
---
## 📋 Stap 2: Update Docker Compose voor ACR
**Update `docker-compose.prod.acr.yml` met de juiste ACR naam:**
```yaml
services:
backend:
image: zdlas.azurecr.io/cmdb-insight/backend:latest
frontend:
image: zdlas.azurecr.io/cmdb-insight/frontend:latest
```
**Let op:** De huidige configuratie gebruikt `zuyderlandcmdbacr.azurecr.io` - pas dit aan naar `zdlas.azurecr.io` als dat je ACR naam is.
---
## 🔐 Stap 3: Bereid Environment Variabelen Voor
**Maak een `.env.production` bestand** (niet committen naar Git!):
```bash
# Backend Environment Variables
NODE_ENV=production
PORT=3001
# Jira Configuration
JIRA_BASE_URL=https://jira.zuyderland.nl
JIRA_SCHEMA_ID=your-schema-id
JIRA_PAT=your-personal-access-token
# OF
JIRA_OAUTH_CLIENT_ID=your-client-id
JIRA_OAUTH_CLIENT_SECRET=your-client-secret
# Session
SESSION_SECRET=your-secure-random-secret
# AI (Optioneel)
ANTHROPIC_API_KEY=your-anthropic-key
OPENAI_API_KEY=your-openai-key
# Database (als je PostgreSQL gebruikt)
DATABASE_URL=postgresql://user:password@host:5432/dbname
# Frontend API URL
VITE_API_URL=https://your-backend-url.com/api
```
**Gebruik Azure Key Vault voor secrets in productie!**
---
## 🚀 Stap 4: Kies Deployment Methode
### Quick Start: VM met Docker Compose
**Als je snel wilt starten:**
1. **Maak Azure VM aan:**
```bash
az vm create \
--resource-group rg-cmdb-gui \
--name vm-cmdb-gui \
--image Ubuntu2204 \
--size Standard_B2s \
--admin-username azureuser \
--generate-ssh-keys
```
2. **SSH naar de VM:**
```bash
ssh azureuser@<vm-public-ip>
```
3. **Installeer Docker en Docker Compose:**
```bash
# Docker
curl -fsSL https://get.docker.com -o get-docker.sh
sudo sh get-docker.sh
sudo usermod -aG docker $USER
# Docker Compose
sudo curl -L "https://github.com/docker/compose/releases/latest/download/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
```
4. **Login naar ACR:**
```bash
az acr login --name zdlas
# OF
docker login zdlas.azurecr.io -u <acr-username> -p <acr-password>
```
5. **Clone repository en deploy:**
```bash
git clone <your-repo-url>
cd cmdb-insight
# Update docker-compose.prod.acr.yml met juiste ACR naam
# Maak .env.production aan
docker-compose -f docker-compose.prod.acr.yml up -d
```
---
## 📝 Stap 5: Configureer Nginx Reverse Proxy
**Update `nginx/nginx.conf`** voor productie:
- SSL/TLS certificaat configuratie
- Domain name
- Backend API proxy
- Frontend static files
**Zie:** `nginx/nginx.conf` voor configuratie template.
---
## 🔒 Stap 6: Security Checklist
- [ ] SSL/TLS certificaat geconfigureerd
- [ ] Environment variabelen in Key Vault (niet in code)
- [ ] Firewall rules geconfigureerd
- [ ] CORS correct geconfigureerd
- [ ] Rate limiting ingeschakeld
- [ ] Health checks geconfigureerd
- [ ] Monitoring/alerting ingesteld
---
## 📊 Stap 7: Monitoring Setup
**Azure Application Insights:**
- Application performance monitoring
- Error tracking
- Usage analytics
**Azure Monitor:**
- Container health
- Resource usage
- Alerts
---
## 🎯 Aanbevolen Volgorde
1. **Verifieer images in ACR** (Stap 1)
2. **Kies deployment optie** (Optie 1, 2, of 3)
3. **Bereid environment variabelen voor** (Stap 3)
4. **Deploy naar test environment**
5. **Test functionaliteit**
6. **Configureer SSL/TLS**
7. **Setup monitoring**
8. **Deploy naar productie**
---
## 📚 Gerelateerde Documentatie
- **Azure App Service Deployment**: `docs/AZURE-APP-SERVICE-DEPLOYMENT.md` (te maken)
- **Azure Container Instances**: `docs/AZURE-CONTAINER-INSTANCES-DEPLOYMENT.md` (te maken)
- **VM Docker Compose**: `docs/VM-DOCKER-COMPOSE-DEPLOYMENT.md` (te maken)
- **Production Deployment**: `docs/PRODUCTION-DEPLOYMENT.md`
- **Azure Deployment Summary**: `docs/AZURE-DEPLOYMENT-SUMMARY.md`
---
## ❓ Vragen?
**Veelgestelde vragen:**
**Q: Welke deployment optie moet ik kiezen?**
A: Voor 20 gebruikers: **Azure App Service** (Optie 1) is het meest geschikt - managed service, eenvoudig, voldoende resources.
**Q: Moet ik PostgreSQL gebruiken of kan ik SQLite houden?**
A: SQLite is prima voor 20 gebruikers. PostgreSQL is beter voor groei of als je connection pooling nodig hebt.
**Q: Hoe configureer ik SSL?**
A: Azure App Service heeft ingebouwde SSL. Voor VM: gebruik Let's Encrypt met certbot.
**Q: Hoe update ik de applicatie?**
A: Push naar `main` branch → Pipeline bouwt nieuwe images → Pull nieuwe images in deployment → Restart containers.
---
## 🎉 Success!
Je hebt nu:
- ✅ Docker images in ACR
- ✅ Automatische CI/CD pipeline
- ✅ Deployment configuratie klaar
**Volgende stap:** Kies je deployment optie en volg de stappen!

View File

@@ -0,0 +1,168 @@
# Documentation Consolidation - Completed ✅
## Overview
This document outlines the consolidation work completed to reduce redundancy and improve maintainability.
**Status**: ✅ **COMPLETED**
## Documents to DELETE (Outdated/Status/One-time)
1. **REFACTOR-PHASE-2B-3-STATUS.md** - Implementation complete, historical status
2. **AUTHENTICATION-IMPLEMENTATION-STATUS.md** - Implementation complete, historical status
3. **DATABASE-NORMALIZATION-PROPOSAL.md** - Proposal, already implemented
4. **DATABASE-DRIVEN-SCHEMA-IMPLEMENTATION-PLAN.md** - Planning doc, already implemented
5. **NEXT-STEPS-ACR-CREATED.md** - Temporary next steps, now in main guides
6. **AZURE-REGISTRY-BESLISSING.md** - Decision doc, decision already made
7. **refactor-plan.md** - Old refactor plan, superseded
8. **JIRA-ASSETS-SYNC-REFACTOR-PLAN-2025-01-21** - Old refactor plan, superseded
9. **KEY-VAULT-ACCESS-REQUEST.md** - One-time request doc
## Documents to MERGE
### Deployment Guides → Single Comprehensive Guide
- **KEEP**: `AZURE-APP-SERVICE-DEPLOYMENT.md` (most complete)
- **MERGE INTO IT**:
- `DEPLOYMENT-ADVICE.md` (analysis/advice section)
- `DEPLOYMENT-NEXT-STEPS.md` (next steps section)
- `QUICK-DEPLOYMENT-GUIDE.md` (quick start section)
- **KEEP SEPARATE**: `PRODUCTION-DEPLOYMENT.md` (general production best practices)
- **MERGE**: `AZURE-DEPLOYMENT-SUMMARY.md` + `AZURE-QUICK-REFERENCE.md``AZURE-QUICK-REFERENCE.md` (comprehensive overview)
### ACR Documentation → Single Guide
- **KEEP**: `AZURE-ACR-QUICKSTART.md` (rename to `AZURE-ACR-SETUP.md`)
- **MERGE INTO IT**:
- `AZURE-ACR-SHARED-SETUP.md` (shared setup section)
- `AZURE-CONTAINER-REGISTRY.md` (general info)
- `AZURE-ACR-PRICING.md` (pricing section)
- `AZURE-ACR-PERMISSIONS.md` (permissions section)
- **DELETE**: `AZURE-ACR-NAMING-RECOMMENDATION.md`, `AZURE-ACR-DNL-SCOPE.md` (too specific, merge key points)
### Pipeline Documentation → Single Guide
- **KEEP**: `AZURE-PIPELINES-USAGE.md` (rename to `AZURE-PIPELINES.md`)
- **MERGE INTO IT**:
- `AZURE-PIPELINE-DEPLOYMENT.md` (deployment section)
- `AZURE-PIPELINE-QUICK-REFERENCE.md` (quick reference section)
- `AZURE-PIPELINE-MSI-ERROR-FIX.md` (troubleshooting section)
- `AZURE-PIPELINE-REPO-TROUBLESHOOTING.md` (troubleshooting section)
### Database Documentation → Consolidate
- **KEEP**: `AZURE-POSTGRESQL-SETUP.md` (production setup)
- **KEEP**: `DATABASE-RECOMMENDATION.md` (decision guide)
- **KEEP**: `DATABASE-RESET-GUIDE.md` (operational guide)
- **KEEP**: `LOCAL-POSTGRES-RESET.md` (local development)
- **KEEP**: `LOCAL-DEVELOPMENT-SETUP.md` (local setup)
- **MERGE**: `DATABASE-ACCESS.md` → Into `LOCAL-DEVELOPMENT-SETUP.md`
- **KEEP**: `DATABASE-TABLES-AUDIT.md` (reference)
- **KEEP**: `NORMALIZED-DATABASE-IMPLEMENTATION-PLAN.md` (architecture reference)
- **KEEP**: `SCHEMA-DISCOVERY-FLOW.md` (architecture reference)
## Final Structure
### Core Guides (Keep)
- `AZURE-APP-SERVICE-DEPLOYMENT.md` - Complete deployment guide
- `AZURE-POSTGRESQL-SETUP.md` - Database setup
- `AZURE-ACR-SETUP.md` - Container registry (merged)
- `AZURE-PIPELINES.md` - CI/CD pipelines (merged)
- `AZURE-QUICK-REFERENCE.md` - Quick reference (merged)
- `GREEN-FIELD-DEPLOYMENT-GUIDE.md` - Green field deployment
- `PRODUCTION-DEPLOYMENT.md` - Production best practices
- `LOCAL-DEVELOPMENT-SETUP.md` - Local development
### Reference Docs (Keep)
- `AZURE-CLI-QUICKSTART.md` - Azure CLI commands
- `AZURE-DEVOPS-SETUP.md` - Azure DevOps setup
- `AZURE-NEW-SUBSCRIPTION-SETUP.md` - New subscription setup
- `AZURE-SERVICE-CONNECTION-*` - Service connection docs
- `AZURE-RESOURCES-OVERVIEW.md` - Resources overview
- Database docs (as listed above)
- `cmdb-insight-specificatie.md` - Application specification
### Operational Docs (Keep)
- `AUTHENTICATION-ENV-VARS.md` - Auth configuration
- `DOCKER-COMPOSE-WARNINGS.md` - Docker warnings
- `GITEA-DOCKER-REGISTRY.md` - Gitea registry (if used)
- `POSTGRESQL-VERSION-UPGRADE.md` - Upgrade guide
- `TYPESCRIPT-LOCAL-VS-CI.md` - TypeScript differences
- `DATA-INTEGRITY-PLAN.md` - Data integrity
## ✅ Execution Completed
### 1. ✅ Deleted Outdated/Status Docs (11 files)
- REFACTOR-PHASE-2B-3-STATUS.md
- AUTHENTICATION-IMPLEMENTATION-STATUS.md
- DATABASE-NORMALIZATION-PROPOSAL.md
- DATABASE-DRIVEN-SCHEMA-IMPLEMENTATION-PLAN.md
- NEXT-STEPS-ACR-CREATED.md
- AZURE-REGISTRY-BESLISSING.md
- refactor-plan.md
- JIRA-ASSETS-SYNC-REFACTOR-PLAN-2025-01-21
- KEY-VAULT-ACCESS-REQUEST.md
- AZURE-ACR-NAMING-RECOMMENDATION.md
- AZURE-ACR-DNL-SCOPE.md
### 2. ✅ Merged Deployment Guides (3 → 1)
- **KEPT**: `AZURE-APP-SERVICE-DEPLOYMENT.md` (enhanced with "Why Azure App Service" section)
- **DELETED**:
- DEPLOYMENT-ADVICE.md (merged into AZURE-APP-SERVICE-DEPLOYMENT.md)
- DEPLOYMENT-NEXT-STEPS.md (merged into AZURE-APP-SERVICE-DEPLOYMENT.md)
- QUICK-DEPLOYMENT-GUIDE.md (merged into AZURE-APP-SERVICE-DEPLOYMENT.md)
### 3. ✅ Consolidated ACR Documentation (6 → 1)
- **RENAMED**: `AZURE-ACR-QUICKSTART.md``AZURE-ACR-SETUP.md`
- **ENHANCED**: Added sections from:
- AZURE-ACR-SHARED-SETUP.md (shared setup)
- AZURE-ACR-PRICING.md (pricing details)
- AZURE-ACR-PERMISSIONS.md (permissions mode)
- AZURE-CONTAINER-REGISTRY.md (general info)
- **DELETED**: All redundant ACR docs
### 4. ✅ Consolidated Pipeline Documentation (5 → 1)
- **RENAMED**: `AZURE-PIPELINES-USAGE.md``AZURE-PIPELINES.md`
- **ENHANCED**: Added troubleshooting from:
- AZURE-PIPELINE-DEPLOYMENT.md (deployment section)
- AZURE-PIPELINE-QUICK-REFERENCE.md (quick reference)
- AZURE-PIPELINE-MSI-ERROR-FIX.md (MSI error troubleshooting)
- AZURE-PIPELINE-REPO-TROUBLESHOOTING.md (repository troubleshooting)
- **DELETED**: All redundant pipeline docs
### 5. ✅ Consolidated Database Documentation
- **MERGED**: `DATABASE-ACCESS.md``LOCAL-DEVELOPMENT-SETUP.md`
- **KEPT**: All other database docs (they serve different purposes)
### 6. ✅ Merged Quick References (2 → 1)
- **KEPT**: `AZURE-QUICK-REFERENCE.md` (enhanced with deployment steps)
- **DELETED**: `AZURE-DEPLOYMENT-SUMMARY.md` (merged into AZURE-QUICK-REFERENCE.md)
### 7. ✅ Updated Cross-References
- Fixed all broken references to deleted/renamed files
- Updated links in remaining documentation
## 📊 Results
**Before**: 50+ documents
**After**: ~30 documents
**Deleted**: 20+ redundant/outdated documents
**Merged**: 10+ overlapping documents into comprehensive guides
## 📁 Final Documentation Structure
### Core Deployment Guides
- `AZURE-APP-SERVICE-DEPLOYMENT.md` - Complete deployment guide
- `AZURE-POSTGRESQL-SETUP.md` - Database setup
- `AZURE-ACR-SETUP.md` - Container registry
- `AZURE-PIPELINES.md` - CI/CD pipelines
- `AZURE-QUICK-REFERENCE.md` - Quick reference
- `GREEN-FIELD-DEPLOYMENT-GUIDE.md` - Green field deployment
- `PRODUCTION-DEPLOYMENT.md` - Production best practices
### Setup & Configuration
- `AZURE-NEW-SUBSCRIPTION-SETUP.md` - New subscription setup
- `AZURE-DEVOPS-SETUP.md` - Azure DevOps setup
- `AZURE-CLI-QUICKSTART.md` - Azure CLI commands
- `LOCAL-DEVELOPMENT-SETUP.md` - Local development
### Reference & Troubleshooting
- `AZURE-SERVICE-CONNECTION-*` - Service connection docs
- `AZURE-RESOURCES-OVERVIEW.md` - Resources overview
- Database docs (recommendation, reset, etc.)
- Architecture docs (schema discovery, normalized database plan)

View File

@@ -1,634 +0,0 @@
# Cursor AI Prompt: Jira Assets Schema Synchronization
## Context
This application syncs Jira Assets (Data Center) data to a local database with a generic structure. Your task is to review, implement, and/or modify the schema synchronization feature that fetches the complete Jira Assets configuration structure.
## Objective
Implement or verify the schema sync functionality that extracts the complete Jira Assets schema structure using the REST API. This includes:
- Object Schemas
- Object Types (with hierarchy)
- Object Type Attributes (field definitions)
**Note:** This task focuses on syncing the *structure/configuration* only, not the actual object data.
---
## API Reference
### Base URL
```
{JIRA_BASE_URL}/rest/assets/1.0
```
### Authentication
- HTTP Basic Authentication (username + password/API token)
- All requests require `Accept: application/json` header
---
## Required API Endpoints & Response Structures
### 1. List All Schemas
```
GET /rest/assets/1.0/objectschema/list
```
**Response Structure:**
```json
{
"objectschemas": [
{
"id": 1,
"name": "IT Assets",
"objectSchemaKey": "IT",
"status": "Ok",
"description": "IT Asset Management Schema",
"created": "2024-01-15T10:30:00.000Z",
"updated": "2024-01-20T14:45:00.000Z",
"objectCount": 1500,
"objectTypeCount": 25
}
]
}
```
**Fields to Store:**
| Field | Type | Description |
|-------|------|-------------|
| id | integer | Primary identifier |
| name | string | Schema name |
| objectSchemaKey | string | Unique key (e.g., "IT") |
| status | string | Schema status |
| description | string | Optional description |
| created | datetime | Creation timestamp |
| updated | datetime | Last modification |
| objectCount | integer | Total objects in schema |
| objectTypeCount | integer | Total object types |
---
### 2. Get Schema Details
```
GET /rest/assets/1.0/objectschema/:id
```
**Response Structure:**
```json
{
"id": 1,
"name": "IT Assets",
"objectSchemaKey": "IT",
"status": "Ok",
"description": "IT Asset Management Schema",
"created": "2024-01-15T10:30:00.000Z",
"updated": "2024-01-20T14:45:00.000Z",
"objectCount": 1500,
"objectTypeCount": 25
}
```
---
### 3. Get Object Types (Flat List)
```
GET /rest/assets/1.0/objectschema/:id/objecttypes/flat
```
**Response Structure:**
```json
[
{
"id": 10,
"name": "Hardware",
"type": 0,
"description": "Physical hardware assets",
"icon": {
"id": 1,
"name": "Computer",
"url16": "/rest/assets/1.0/icon/1/16",
"url48": "/rest/assets/1.0/icon/1/48"
},
"position": 0,
"created": "2024-01-15T10:30:00.000Z",
"updated": "2024-01-20T14:45:00.000Z",
"objectCount": 500,
"parentObjectTypeId": null,
"objectSchemaId": 1,
"inherited": false,
"abstractObjectType": false
},
{
"id": 11,
"name": "Computer",
"type": 0,
"description": "Desktop and laptop computers",
"icon": {
"id": 2,
"name": "Laptop",
"url16": "/rest/assets/1.0/icon/2/16",
"url48": "/rest/assets/1.0/icon/2/48"
},
"position": 0,
"created": "2024-01-15T10:35:00.000Z",
"updated": "2024-01-20T14:50:00.000Z",
"objectCount": 200,
"parentObjectTypeId": 10,
"objectSchemaId": 1,
"inherited": true,
"abstractObjectType": false
}
]
```
**Fields to Store:**
| Field | Type | Description |
|-------|------|-------------|
| id | integer | Primary identifier |
| name | string | Object type name |
| type | integer | Type classification (0=normal) |
| description | string | Optional description |
| icon | object | Icon details (id, name, url16, url48) |
| position | integer | Display position in hierarchy |
| created | datetime | Creation timestamp |
| updated | datetime | Last modification |
| objectCount | integer | Number of objects of this type |
| parentObjectTypeId | integer/null | Parent type ID (null if root) |
| objectSchemaId | integer | Parent schema ID |
| inherited | boolean | Whether attributes are inherited |
| abstractObjectType | boolean | Whether type is abstract (no direct objects) |
---
### 4. Get Object Type Details
```
GET /rest/assets/1.0/objecttype/:id
```
**Response Structure:** Same as individual item in the flat list above.
---
### 5. Get Object Type Attributes
```
GET /rest/assets/1.0/objecttype/:id/attributes
```
**Response Structure:**
```json
[
{
"id": 100,
"objectType": {
"id": 11,
"name": "Computer"
},
"name": "Name",
"label": true,
"type": 0,
"description": "Asset name/label",
"defaultType": {
"id": 0,
"name": "Text"
},
"typeValue": null,
"typeValueMulti": [],
"additionalValue": null,
"referenceType": null,
"referenceObjectTypeId": null,
"referenceObjectType": null,
"editable": true,
"system": true,
"sortable": true,
"summable": false,
"indexed": true,
"minimumCardinality": 1,
"maximumCardinality": 1,
"suffix": "",
"removable": false,
"hidden": false,
"includeChildObjectTypes": false,
"uniqueAttribute": false,
"regexValidation": null,
"iql": null,
"options": "",
"position": 0
},
{
"id": 101,
"objectType": {
"id": 11,
"name": "Computer"
},
"name": "Serial Number",
"label": false,
"type": 0,
"description": "Device serial number",
"defaultType": {
"id": 0,
"name": "Text"
},
"typeValue": null,
"typeValueMulti": [],
"additionalValue": null,
"referenceType": null,
"referenceObjectTypeId": null,
"referenceObjectType": null,
"editable": true,
"system": false,
"sortable": true,
"summable": false,
"indexed": true,
"minimumCardinality": 0,
"maximumCardinality": 1,
"suffix": "",
"removable": true,
"hidden": false,
"includeChildObjectTypes": false,
"uniqueAttribute": true,
"regexValidation": "^[A-Z0-9]{10,20}$",
"iql": null,
"options": "",
"position": 1
},
{
"id": 102,
"objectType": {
"id": 11,
"name": "Computer"
},
"name": "Assigned User",
"label": false,
"type": 2,
"description": "User assigned to this asset",
"defaultType": null,
"typeValue": "SHOW_ON_ASSET",
"typeValueMulti": [],
"additionalValue": null,
"referenceType": null,
"referenceObjectTypeId": null,
"referenceObjectType": null,
"editable": true,
"system": false,
"sortable": true,
"summable": false,
"indexed": true,
"minimumCardinality": 0,
"maximumCardinality": 1,
"suffix": "",
"removable": true,
"hidden": false,
"includeChildObjectTypes": false,
"uniqueAttribute": false,
"regexValidation": null,
"iql": null,
"options": "",
"position": 2
},
{
"id": 103,
"objectType": {
"id": 11,
"name": "Computer"
},
"name": "Location",
"label": false,
"type": 1,
"description": "Physical location of the asset",
"defaultType": null,
"typeValue": null,
"typeValueMulti": [],
"additionalValue": null,
"referenceType": {
"id": 1,
"name": "Reference",
"description": "Standard reference",
"color": "#0052CC",
"url16": null,
"removable": false,
"objectSchemaId": 1
},
"referenceObjectTypeId": 20,
"referenceObjectType": {
"id": 20,
"name": "Location",
"objectSchemaId": 1
},
"editable": true,
"system": false,
"sortable": true,
"summable": false,
"indexed": true,
"minimumCardinality": 0,
"maximumCardinality": 1,
"suffix": "",
"removable": true,
"hidden": false,
"includeChildObjectTypes": true,
"uniqueAttribute": false,
"regexValidation": null,
"iql": "objectType = Location",
"options": "",
"position": 3
},
{
"id": 104,
"objectType": {
"id": 11,
"name": "Computer"
},
"name": "Status",
"label": false,
"type": 7,
"description": "Current asset status",
"defaultType": null,
"typeValue": "1",
"typeValueMulti": ["1", "2", "3"],
"additionalValue": null,
"referenceType": null,
"referenceObjectTypeId": null,
"referenceObjectType": null,
"editable": true,
"system": false,
"sortable": true,
"summable": false,
"indexed": true,
"minimumCardinality": 1,
"maximumCardinality": 1,
"suffix": "",
"removable": true,
"hidden": false,
"includeChildObjectTypes": false,
"uniqueAttribute": false,
"regexValidation": null,
"iql": null,
"options": "",
"position": 4
}
]
```
**Attribute Fields to Store:**
| Field | Type | Description |
|-------|------|-------------|
| id | integer | Attribute ID |
| objectType | object | Parent object type {id, name} |
| name | string | Attribute name |
| label | boolean | Is this the label/display attribute |
| type | integer | Attribute type (see type reference below) |
| description | string | Optional description |
| defaultType | object/null | Default type info {id, name} for type=0 |
| typeValue | string/null | Type-specific configuration |
| typeValueMulti | array | Multiple type values (e.g., allowed status IDs) |
| additionalValue | string/null | Additional configuration |
| referenceType | object/null | Reference type details for type=1 |
| referenceObjectTypeId | integer/null | Target object type ID for references |
| referenceObjectType | object/null | Target object type details |
| editable | boolean | Can values be edited |
| system | boolean | Is system attribute (Name, Key, Created, Updated) |
| sortable | boolean | Can sort by this attribute |
| summable | boolean | Can sum values (numeric types) |
| indexed | boolean | Is indexed for search |
| minimumCardinality | integer | Minimum required values (0=optional, 1=required) |
| maximumCardinality | integer | Maximum values (-1=unlimited, 1=single) |
| suffix | string | Display suffix (e.g., "GB", "USD") |
| removable | boolean | Can attribute be deleted |
| hidden | boolean | Is hidden from default view |
| includeChildObjectTypes | boolean | Include child types in reference selection |
| uniqueAttribute | boolean | Must values be unique |
| regexValidation | string/null | Validation regex pattern |
| iql | string/null | IQL/AQL filter for reference selection |
| options | string | Additional options (CSV for Select type) |
| position | integer | Display order position |
---
## Attribute Type Reference
### Main Types (type field)
| Type | Name | Description | Uses defaultType |
|------|------|-------------|------------------|
| 0 | Default | Uses defaultType for specific type | Yes |
| 1 | Object | Reference to another Assets object | No |
| 2 | User | Jira user reference | No |
| 3 | Confluence | Confluence page reference | No |
| 4 | Group | Jira group reference | No |
| 5 | Version | Jira version reference | No |
| 6 | Project | Jira project reference | No |
| 7 | Status | Status type reference | No |
### Default Types (defaultType.id when type=0)
| ID | Name | Description |
|----|------|-------------|
| 0 | Text | Single-line text |
| 1 | Integer | Whole number |
| 2 | Boolean | True/False checkbox |
| 3 | Double | Decimal number |
| 4 | Date | Date only (no time) |
| 5 | Time | Time only (no date) |
| 6 | DateTime | Date and time |
| 7 | URL | Web link |
| 8 | Email | Email address |
| 9 | Textarea | Multi-line text |
| 10 | Select | Dropdown selection (options in `options` field) |
| 11 | IP Address | IP address format |
---
## Implementation Requirements
### 1. Sync Flow
Implement the following synchronization flow:
```
┌─────────────────────────────────────────────────────────────┐
│ Schema Sync Process │
├─────────────────────────────────────────────────────────────┤
│ │
│ 1. GET /objectschema/list │
│ └── Store/Update all schemas in local DB │
│ │
│ 2. For each schema: │
│ ├── GET /objectschema/:id │
│ │ └── Update schema details │
│ │ │
│ └── GET /objectschema/:id/objecttypes/flat │
│ └── Store/Update all object types │
│ │
│ 3. For each object type: │
│ ├── GET /objecttype/:id (optional, for latest details) │
│ │ │
│ └── GET /objecttype/:id/attributes │
│ └── Store/Update all attributes │
│ │
│ 4. Clean up orphaned records (deleted in Jira) │
│ │
└─────────────────────────────────────────────────────────────┘
```
### 2. Database Operations
For each entity type, implement:
- **Upsert logic**: Insert new records, update existing ones based on Jira ID
- **Soft delete or cleanup**: Handle items that exist locally but not in Jira anymore
- **Relationship mapping**: Maintain foreign key relationships (schema → object types → attributes)
### 3. Rate Limiting
Implement rate limiting to avoid overloading the Jira server:
- Add 100-200ms delay between API requests
- Implement exponential backoff on 429 (Too Many Requests) responses
- Maximum 3-5 concurrent requests if using parallel processing
```typescript
// Example rate limiting implementation
const delay = (ms: number) => new Promise(resolve => setTimeout(resolve, ms));
async function fetchWithRateLimit<T>(url: string): Promise<T> {
await delay(150); // 150ms between requests
const response = await fetch(url, { headers: getAuthHeaders() });
if (response.status === 429) {
const retryAfter = parseInt(response.headers.get('Retry-After') || '5');
await delay(retryAfter * 1000);
return fetchWithRateLimit(url);
}
return response.json();
}
```
### 4. Error Handling
Handle these scenarios:
- **401 Unauthorized**: Invalid credentials
- **403 Forbidden**: Insufficient permissions
- **404 Not Found**: Schema/Type deleted during sync
- **429 Too Many Requests**: Rate limited (implement backoff)
- **5xx Server Errors**: Retry with exponential backoff
### 5. Progress Tracking
Implement progress reporting:
- Total schemas to process
- Current schema being processed
- Total object types to process
- Current object type being processed
- Estimated time remaining (optional)
---
## Code Structure Suggestions
### Service/Repository Pattern
```
src/
├── services/
│ └── jira-assets/
│ ├── JiraAssetsApiClient.ts # HTTP client with auth & rate limiting
│ ├── SchemaSyncService.ts # Main sync orchestration
│ ├── ObjectTypeSyncService.ts # Object type sync logic
│ └── AttributeSyncService.ts # Attribute sync logic
├── repositories/
│ ├── SchemaRepository.ts # Schema DB operations
│ ├── ObjectTypeRepository.ts # Object type DB operations
│ └── AttributeRepository.ts # Attribute DB operations
└── models/
├── Schema.ts # Schema entity/model
├── ObjectType.ts # Object type entity/model
└── ObjectTypeAttribute.ts # Attribute entity/model
```
### Sync Service Interface
```typescript
interface SchemaSyncService {
/**
* Sync all schemas and their complete structure
* @returns Summary of sync operation
*/
syncAll(): Promise<SyncResult>;
/**
* Sync a single schema by ID
* @param schemaId - Jira schema ID
*/
syncSchema(schemaId: number): Promise<SyncResult>;
/**
* Get sync status/progress
*/
getProgress(): SyncProgress;
}
interface SyncResult {
success: boolean;
schemasProcessed: number;
objectTypesProcessed: number;
attributesProcessed: number;
errors: SyncError[];
duration: number; // milliseconds
}
interface SyncProgress {
status: 'idle' | 'running' | 'completed' | 'failed';
currentSchema?: string;
currentObjectType?: string;
schemasTotal: number;
schemasCompleted: number;
objectTypesTotal: number;
objectTypesCompleted: number;
startedAt?: Date;
estimatedCompletion?: Date;
}
```
---
## Validation Checklist
After implementation, verify:
- [ ] All schemas are fetched from `/objectschema/list`
- [ ] Schema details are updated from `/objectschema/:id`
- [ ] All object types are fetched for each schema from `/objectschema/:id/objecttypes/flat`
- [ ] Object type hierarchy (parentObjectTypeId) is preserved
- [ ] All attributes are fetched for each object type from `/objecttype/:id/attributes`
- [ ] Attribute types are correctly mapped (type + defaultType)
- [ ] Reference attributes store referenceObjectTypeId and referenceType
- [ ] Status attributes store typeValueMulti (allowed status IDs)
- [ ] Rate limiting prevents 429 errors
- [ ] Error handling covers all failure scenarios
- [ ] Sync can be resumed after failure
- [ ] Orphaned local records are handled (deleted in Jira)
- [ ] Foreign key relationships are maintained
- [ ] Timestamps (created, updated) are stored correctly
---
## Testing Scenarios
1. **Initial sync**: Empty local database, full sync from Jira
2. **Incremental sync**: Existing data, detect changes
3. **Schema added**: New schema created in Jira
4. **Schema deleted**: Schema removed from Jira
5. **Object type added**: New type in existing schema
6. **Object type moved**: Parent changed in hierarchy
7. **Attribute added/modified/removed**: Changes to type attributes
8. **Large schema**: Schema with 50+ object types, 500+ attributes
9. **Network failure**: Handle timeouts and retries
10. **Rate limiting**: Handle 429 responses gracefully
---
## Notes
- The `/objectschema/:id/objecttypes/flat` endpoint returns ALL object types in one call, which is more efficient than fetching hierarchically
- The `label` field on attributes indicates which attribute is used as the display name for objects
- System attributes (system=true) are: Name, Key, Created, Updated - these exist on all object types
- The `iql` field on reference attributes contains the filter query for selecting valid reference targets
- The `options` field on Select type attributes (type=0, defaultType.id=10) contains comma-separated options

View File

@@ -1,101 +0,0 @@
# Key Vault Access Request - For Administrators
## 📋 Request Information
**Requested by:** adm_bhausmans@zuyderland.nl
**Date:** $(date +%Y-%m-%d)
**Purpose:** Grant App Services access to Key Vault for CMDB Insight deployment
## 🔐 Key Vault Details
- **Key Vault Name:** `zdl-cmdb-insight-prd-kv`
- **Resource Group:** `zdl-cmdb-insight-prd-euwe-rg`
- **Key Vault ID:** `/subscriptions/e9c3e35d-5eca-4bfb-aae5-2e2659d1b474/resourceGroups/zdl-cmdb-insight-prd-euwe-rg/providers/Microsoft.KeyVault/vaults/zdl-cmdb-insight-prd-kv`
## 🎯 Required Access
**Role:** `Key Vault Secrets User`
**Scope:** Key Vault resource
**Purpose:** Allow App Services to read secrets from Key Vault
## 📱 App Service Principal IDs
### Backend Web App
- **App Name:** `zdl-cmdb-insight-prd-backend-webapp`
- **Principal ID:** `6bd8373f-f734-4d21-84f2-776fd11b17ae`
### Frontend Web App
- **App Name:** `zdl-cmdb-insight-prd-frontend-webapp`
- **Principal ID:** *(Get with command below)*
## 🚀 Commands for Administrator
### Option 1: Use the Script (Recommended)
```bash
cd /path/to/cmdb-insight
./scripts/grant-keyvault-access-admin.sh
```
### Option 2: Manual Commands
```bash
# Get Key Vault Resource ID
KV_ID=$(az keyvault show \
--name zdl-cmdb-insight-prd-kv \
--query id -o tsv)
# Get Frontend Principal ID (if needed)
FRONTEND_PRINCIPAL_ID=$(az webapp identity show \
--name zdl-cmdb-insight-prd-frontend-webapp \
--resource-group zdl-cmdb-insight-prd-euwe-rg \
--query principalId -o tsv)
# Grant access to Backend
az role assignment create \
--assignee "6bd8373f-f734-4d21-84f2-776fd11b17ae" \
--role "Key Vault Secrets User" \
--scope $KV_ID
# Grant access to Frontend (if needed)
az role assignment create \
--assignee $FRONTEND_PRINCIPAL_ID \
--role "Key Vault Secrets User" \
--scope $KV_ID
```
### Option 3: Via Azure Portal
1. Navigate to Key Vault: `zdl-cmdb-insight-prd-kv`
2. Go to **Access control (IAM)**
3. Click **Add****Add role assignment**
4. Select role: **Key Vault Secrets User**
5. Assign access to: **Managed identity**
6. Select members:
- Backend: `zdl-cmdb-insight-prd-backend-webapp`
- Frontend: `zdl-cmdb-insight-prd-frontend-webapp`
7. Click **Review + assign**
## ✅ Verification
After granting access, verify with:
```bash
# Check role assignments
az role assignment list \
--scope "/subscriptions/e9c3e35d-5eca-4bfb-aae5-2e2659d1b474/resourceGroups/zdl-cmdb-insight-prd-euwe-rg/providers/Microsoft.KeyVault/vaults/zdl-cmdb-insight-prd-kv" \
--query "[?principalId=='6bd8373f-f734-4d21-84f2-776fd11b17ae']" \
--output table
```
## 📝 Notes
- Key Vault uses **RBAC authorization** (not access policies)
- The role "Key Vault Secrets User" only allows reading secrets (not writing/deleting)
- This is the recommended approach for production deployments
- Access is granted via Managed Identity (no credentials stored)
## 🔗 Related Documentation
- `docs/AZURE-APP-SERVICE-DEPLOYMENT.md` - Complete deployment guide
- `scripts/grant-keyvault-access-admin.sh` - Automated script for admins

View File

@@ -47,22 +47,23 @@ docker-compose ps
docker-compose exec postgres pg_isready -U cmdb
```
### Stap 5: Maak Databases Aan (Optioneel)
### Stap 5: Maak Database Aan (Optioneel)
De applicatie maakt databases automatisch aan, maar je kunt ze ook handmatig aanmaken:
De applicatie maakt de database automatisch aan, maar je kunt het ook handmatig doen:
```bash
docker-compose exec postgres psql -U cmdb -c "CREATE DATABASE cmdb_cache;"
docker-compose exec postgres psql -U cmdb -c "CREATE DATABASE cmdb_classifications;"
docker-compose exec postgres psql -U cmdb -c "CREATE DATABASE cmdb_insight;"
```
**Note:** The application uses a single database for all data. All tables (CMDB cache, classification history, and session state) are stored in the same database.
## Verificatie
Na reset, check of alles werkt:
```bash
# Connect to database
docker-compose exec postgres psql -U cmdb -d cmdb_cache
docker-compose exec postgres psql -U cmdb -d cmdb_insight
# Check tables (zou leeg moeten zijn)
\dt
@@ -120,8 +121,8 @@ docker volume ls | grep postgres
Als je een fout krijgt dat de database al bestaat:
```bash
# Drop en recreate
docker-compose exec postgres psql -U cmdb -c "DROP DATABASE IF EXISTS cmdb_cache;"
docker-compose exec postgres psql -U cmdb -c "CREATE DATABASE cmdb_cache;"
docker-compose exec postgres psql -U cmdb -c "DROP DATABASE IF EXISTS cmdb_insight;"
docker-compose exec postgres psql -U cmdb -c "CREATE DATABASE cmdb_insight;"
```
### Connection Issues
@@ -145,7 +146,7 @@ Zorg dat je `.env` bestand correct is:
DATABASE_TYPE=postgres
DATABASE_HOST=postgres
DATABASE_PORT=5432
DATABASE_NAME=cmdb_cache
DATABASE_NAME=cmdb_insight
DATABASE_USER=cmdb
DATABASE_PASSWORD=cmdb-dev
```
@@ -154,7 +155,7 @@ Of gebruik connection string:
```env
DATABASE_TYPE=postgres
DATABASE_URL=postgresql://cmdb:cmdb-dev@postgres:5432/cmdb_cache
DATABASE_URL=postgresql://cmdb:cmdb-dev@postgres:5432/cmdb_insight
```
## Snelle Commando's
@@ -168,16 +169,16 @@ sleep 5 && \
docker-compose exec postgres pg_isready -U cmdb
# Check database size (na sync)
docker-compose exec postgres psql -U cmdb -d cmdb_cache -c "
docker-compose exec postgres psql -U cmdb -d cmdb_insight -c "
SELECT
pg_size_pretty(pg_database_size('cmdb_cache')) as size;
pg_size_pretty(pg_database_size('cmdb_insight')) as size;
"
# List all tables
docker-compose exec postgres psql -U cmdb -d cmdb_cache -c "\dt"
docker-compose exec postgres psql -U cmdb -d cmdb_insight -c "\dt"
# Count objects
docker-compose exec postgres psql -U cmdb -d cmdb_cache -c "
docker-compose exec postgres psql -U cmdb -d cmdb_insight -c "
SELECT object_type_name, COUNT(*)
FROM objects
GROUP BY object_type_name;

View File

@@ -1,320 +0,0 @@
# Next Steps - ACR Created! 🎉
Je Azure Container Registry is aangemaakt! Volg deze stappen om Docker images automatisch te bouwen en te pushen.
## 📋 Checklist
- [ ] Stap 1: Verifieer ACR naam
- [ ] Stap 2: Update pipeline variabelen (als nodig)
- [ ] Stap 3: Service Connection aanmaken in Azure DevOps
- [ ] Stap 4: Pipeline aanmaken en runnen
- [ ] Stap 5: Verifieer images in ACR
---
## 🔍 Stap 1: Verifieer ACR Naam
**Vind je ACR naam:**
**Via Azure Portal:**
1. Ga naar je Container Registry
2. Klik op **"Overview"**
3. Noteer de **"Login server"** (bijv. `zuyderlandcmdbacr.azurecr.io`)
4. De naam vóór `.azurecr.io` is je ACR naam (bijv. `zuyderlandcmdbacr`)
**Via Azure CLI:**
```bash
az acr list --query "[].{Name:name, LoginServer:loginServer}" -o table
```
**Noteer je ACR naam!** (bijv. `zuyderlandcmdbacr`)
---
## 🔧 Stap 2: Update Pipeline Variabelen
**Check of `azure-pipelines.yml` de juiste ACR naam heeft:**
1. **Open** `azure-pipelines.yml`
2. **Controleer regel 17:**
```yaml
acrName: 'zuyderlandcmdbacr' # ← Pas aan naar jouw ACR naam
```
3. **Als je ACR naam anders is**, pas het aan:
```yaml
acrName: 'jouw-acr-naam-hier'
```
4. **Commit en push** (als je het hebt aangepast):
```bash
git add azure-pipelines.yml
git commit -m "Update ACR name in pipeline"
git push origin main
```
**✅ Als de naam al klopt, ga door naar Stap 3!**
---
## 🔗 Stap 3: Service Connection Aanmaken in Azure DevOps
Deze connection geeft Azure DevOps toegang tot je ACR.
### Stappen:
1. **Ga naar je Azure DevOps project**
- Open je project in Azure DevOps
2. **Ga naar Project Settings**
- Klik op **⚙️ Project Settings** (onderaan links in het menu)
3. **Open Service Connections**
- Scroll naar **"Pipelines"** sectie
- Klik op **"Service connections"**
4. **Maak nieuwe connection**
- Klik op **"New service connection"** (of **"Create service connection"**)
- Kies **"Docker Registry"**
- Klik **"Next"**
5. **Selecteer Azure Container Registry**
- Kies **"Azure Container Registry"**
- Klik **"Next"**
6. **Configureer connection**
- **Authentication type**: Kies **"Service Principal"** ⭐ (aanbevolen)
- Dit is de standaard en meest betrouwbare optie
- Azure DevOps maakt automatisch een Service Principal aan
- **Azure subscription**: Selecteer je Azure subscription
- **Azure container registry**: Selecteer je ACR uit de dropdown (bijv. `zdlas`)
- **Service connection name**: `zuyderland-cmdb-acr-connection`
- ⚠️ **Belangrijk**: Deze naam moet overeenkomen met `dockerRegistryServiceConnection` in `azure-pipelines.yml`!
- **Description**: Optioneel (bijv. "ACR for CMDB GUI production")
7. **Save**
- Klik **"Save"** (of **"Verify and save"**)
- Azure DevOps test automatisch de connection
- Azure DevOps maakt automatisch een Service Principal aan met de juiste permissions
**💡 Waarom Service Principal?**
- ✅ Werkt perfect met Azure DevOps Services (cloud)
- ✅ Eenvoudig - Azure DevOps doet alles automatisch
- ✅ Betrouwbaar - Meest ondersteunde optie
- ✅ Veilig - Credentials worden veilig beheerd
📚 Zie `docs/AZURE-SERVICE-CONNECTION-AUTH.md` voor details over alle authentication types.
**✅ Service connection is aangemaakt!**
**Troubleshooting:**
- **"Loading Registries..." blijft hangen?**
- ✅ Refresh de pagina (F5)
- ✅ Check of je de juiste subscription hebt geselecteerd
- ✅ Wacht 10-30 seconden (kan even duren)
- ✅ **Workaround**: Gebruik "Others" optie (zie hieronder)
- Als verificatie faalt, check of je toegang hebt tot de ACR in Azure Portal
**🔧 Workaround: Als dropdown niet werkt, gebruik "Others" optie:**
1. Kies **"Docker Registry"** → **"Others"** (in plaats van "Azure Container Registry")
2. Vul handmatig in:
- **Docker Registry**: `zdlas.azurecr.io`
- **Docker ID**: (haal op met `az acr credential show --name zdlas`)
- **Docker Password**: (haal op met `az acr credential show --name zdlas`)
3. **Service connection name**: `zuyderland-cmdb-acr-connection`
4. Save
**Haal credentials op:**
```bash
az login
az acr credential show --name zdlas
# Gebruik "username" en "passwords[0].value"
```
📚 Zie `docs/AZURE-SERVICE-CONNECTION-TROUBLESHOOTING.md` voor uitgebreide troubleshooting.
---
## 🎯 Stap 4: Pipeline Aanmaken en Run
### Stappen:
1. **Ga naar Pipelines**
- Klik op **"Pipelines"** (links in het menu)
2. **Create New Pipeline**
- Klik op **"New pipeline"** of **"Create Pipeline"**
3. **Selecteer Repository**
- Kies **"Azure Repos Git"** (of waar je code staat)
- Selecteer je repository: **"CMDB Insight"** (of jouw repo naam)
4. **Kies YAML File**
- Kies **"Existing Azure Pipelines YAML file"**
- Selecteer:
- **Branch**: `main` (of jouw default branch)
- **Path**: `/azure-pipelines.yml`
5. **Review Configuration**
- Azure DevOps toont de pipeline configuratie
- Controleer of alles klopt
6. **Run Pipeline**
- Klik **"Run"** om de pipeline te starten
- De pipeline start automatisch met het bouwen van de images
**✅ Pipeline is gestart!**
### Wat gebeurt er nu?
De pipeline zal:
1. ✅ Code uitchecken
2. ✅ Backend Docker image bouwen
3. ✅ Frontend Docker image bouwen
4. ✅ Images naar Azure Container Registry pushen
**Verwachte tijd:** ~5-10 minuten (afhankelijk van build tijd)
---
## ✅ Stap 5: Verifieer Images in ACR
### In Azure Portal:
1. **Ga naar je Container Registry**
- Open Azure Portal
- Ga naar je Container Registry (`zuyderlandcmdbacr`)
2. **Bekijk Repositories**
- Klik op **"Repositories"** (links in het menu)
- Je zou moeten zien:
- `cmdb-insight/backend`
- `cmdb-insight/frontend`
3. **Bekijk Tags**
- Klik op een repository (bijv. `cmdb-insight/backend`)
- Je zou tags moeten zien:
- `latest`
- `123` (of build ID nummer)
**✅ Images zijn succesvol gebouwd en gepusht!**
### Via Azure CLI:
```bash
# Lijst repositories
az acr repository list --name zuyderlandcmdbacr
# Lijst tags voor backend
az acr repository show-tags --name zuyderlandcmdbacr --repository cmdb-insight/backend --orderby time_desc
# Lijst tags voor frontend
az acr repository show-tags --name zuyderlandcmdbacr --repository cmdb-insight/frontend --orderby time_desc
```
### In Azure DevOps:
1. **Ga naar je Pipeline**
- Klik op **"Pipelines"**
- Klik op je pipeline run
2. **Bekijk Logs**
- Klik op een job (bijv. "Build Docker Images")
- Bekijk de logs per stap
- Bij success zie je:
```
Backend Image: zuyderlandcmdbacr.azurecr.io/cmdb-insight/backend:123
Frontend Image: zuyderlandcmdbacr.azurecr.io/cmdb-insight/frontend:123
```
---
## 🚨 Troubleshooting
### Pipeline Fails: "Service connection not found"
**Oplossing:**
- Controleer of de service connection naam in `azure-pipelines.yml` (regel 19) overeenkomt met de naam in Azure DevOps
- Ga naar Project Settings → Service connections en verifieer de naam
- Pas `dockerRegistryServiceConnection` aan in `azure-pipelines.yml` als nodig
### Pipeline Fails: "ACR not found"
**Oplossing:**
- Controleer of de `acrName` variabele correct is in `azure-pipelines.yml` (regel 17)
- Verifieer dat de ACR bestaat: `az acr list`
- Check of je de juiste subscription hebt geselecteerd in de service connection
### Pipeline Fails: "Permission denied"
**Oplossing:**
- Controleer of de service connection de juiste permissions heeft
- Verifieer dat je Azure subscription toegang heeft tot de ACR
- Check of de service connection is geverifieerd (groen vinkje in Azure DevOps)
- Probeer de service connection opnieuw aan te maken
### Images worden niet gepusht
**Oplossing:**
- Check de pipeline logs voor specifieke errors
- Verifieer dat de Docker build succesvol is
- Controleer of de ACR admin-enabled is (voor development)
- Check of de service connection correct is geconfigureerd
### Build Fails: "Dockerfile not found"
**Oplossing:**
- Verifieer dat `backend/Dockerfile.prod` en `frontend/Dockerfile.prod` bestaan
- Check of de paths correct zijn in `azure-pipelines.yml`
- Controleer of de files zijn gecommit en gepusht naar de repository
---
## 🎉 Success!
Als alles goed is gegaan, heb je nu:
- ✅ Azure Container Registry aangemaakt
- ✅ Service Connection geconfigureerd
- ✅ Pipeline aangemaakt en gerund
- ✅ Docker images gebouwd en gepusht naar ACR
**Je images zijn nu beschikbaar op:**
- Backend: `zuyderlandcmdbacr.azurecr.io/cmdb-insight/backend:latest`
- Frontend: `zuyderlandcmdbacr.azurecr.io/cmdb-insight/frontend:latest`
---
## 🚀 Volgende Stappen
Nu je images in ACR staan, kun je ze deployen naar:
1. **Azure Container Instances (ACI)** - Eenvoudig, snel
2. **Azure App Service (Container)** - Managed service
3. **Azure Kubernetes Service (AKS)** - Voor complexere setups
4. **VM met Docker Compose** - Volledige controle
Zie `docs/AZURE-DEPLOYMENT-SUMMARY.md` voor deployment opties.
---
## 📚 Meer Informatie
- **Quick Start Guide**: `docs/AZURE-ACR-QUICKSTART.md`
- **Azure DevOps Setup**: `docs/AZURE-DEVOPS-SETUP.md`
- **Container Registry Guide**: `docs/AZURE-CONTAINER-REGISTRY.md`
- **Deployment Options**: `docs/AZURE-DEPLOYMENT-SUMMARY.md`
---
## 💡 Tips
1. **Automatische Triggers**: De pipeline triggert automatisch bij elke push naar `main` branch
2. **Version Tags**: Gebruik git tags (bijv. `v1.0.0`) voor versie-specifieke builds
3. **Monitor Costs**: Check Azure Portal regelmatig voor storage gebruik
4. **Cleanup**: Overweeg oude images te verwijderen om kosten te besparen
---
**Veel succes! 🚀**

View File

@@ -1083,10 +1083,10 @@ const object: CMDBObject = {
- `routes/cache.ts`
- Easy to replace all at once
4. **Separate Database**
- Classifications database (`database.ts`) is separate
- Won't be affected by changes
- No risk to existing data
4. **Classifications Database**
- Classifications database (`database.ts`) uses the same database as the cache
- All data (CMDB cache, classification history, session state) in one database
- Won't be affected by cache changes regardless
5. **Rollback Plan**
- Old code in git history

View File

@@ -0,0 +1,214 @@
# PostgreSQL Version Upgrade Guide
Guide for upgrading Azure PostgreSQL Flexible Server from version 15 to 18.
## ⚠️ Important Considerations
### Before Upgrading
1. **Backup**: Azure automatically creates backups, but verify backups are working
2. **Downtime**: Major version upgrades require downtime (typically 5-15 minutes)
3. **Application Compatibility**: Ensure your application supports PostgreSQL 18
4. **Extension Compatibility**: Check if any PostgreSQL extensions are compatible with version 18
### PostgreSQL 18 Changes
- **Performance improvements**: Better query optimization
- **New features**: Enhanced JSON support, improved partitioning
- **Breaking changes**: Some deprecated features removed
- **Extension updates**: Some extensions may need updates
---
## 🚀 Upgrade Process
### Step 1: Check Current Version
```bash
az postgres flexible-server show \
--resource-group zdl-cmdb-insight-prd-euwe-rg \
--name zdl-cmdb-insight-prd-psql \
--query "{name:name, version:version, state:state}" -o table
```
### Step 2: Verify Backups
```bash
# List recent backups
az postgres flexible-server backup list \
--resource-group zdl-cmdb-insight-prd-euwe-rg \
--server-name zdl-cmdb-insight-prd-psql \
--query "[0:5].{name:name, timeCreated:timeCreated}" -o table
```
### Step 3: Schedule Maintenance Window
**Recommended**: Perform upgrade during low-traffic period or maintenance window.
### Step 4: Perform Upgrade
```bash
# Upgrade to PostgreSQL 18
az postgres flexible-server upgrade \
--resource-group zdl-cmdb-insight-prd-euwe-rg \
--name zdl-cmdb-insight-prd-psql \
--version 18 \
--yes
```
**What happens:**
- Server will restart
- Database will be upgraded to version 18
- Downtime: ~5-15 minutes
- Existing data is preserved
### Step 5: Verify Upgrade
```bash
# Check new version
az postgres flexible-server show \
--resource-group zdl-cmdb-insight-prd-euwe-rg \
--name zdl-cmdb-insight-prd-psql \
--query "{name:name, version:version, state:state}" -o table
# Should show: version: "18"
```
### Step 6: Test Application
```bash
# Restart backend app to reconnect
az webapp restart \
--name zdl-cmdb-insight-prd-backend-webapp \
--resource-group zdl-cmdb-insight-prd-euwe-rg
# Check logs for connection
az webapp log tail \
--name zdl-cmdb-insight-prd-backend-webapp \
--resource-group zdl-cmdb-insight-prd-euwe-rg
```
---
## 🔄 Alternative: Step-by-Step Upgrade (15 → 16 → 17 → 18)
If you prefer to upgrade incrementally:
```bash
# Upgrade 15 → 16
az postgres flexible-server upgrade \
--resource-group zdl-cmdb-insight-prd-euwe-rg \
--name zdl-cmdb-insight-prd-psql \
--version 16 \
--yes
# Wait for completion, then upgrade 16 → 17
az postgres flexible-server upgrade \
--resource-group zdl-cmdb-insight-prd-euwe-rg \
--name zdl-cmdb-insight-prd-psql \
--version 17 \
--yes
# Wait for completion, then upgrade 17 → 18
az postgres flexible-server upgrade \
--resource-group zdl-cmdb-insight-prd-euwe-rg \
--name zdl-cmdb-insight-prd-psql \
--version 18 \
--yes
```
**Note**: Direct upgrade from 15 → 18 is supported and recommended (faster, less downtime).
---
## 🛠️ Troubleshooting
### Upgrade Fails
If upgrade fails:
1. **Check server state:**
```bash
az postgres flexible-server show \
--resource-group zdl-cmdb-insight-prd-euwe-rg \
--name zdl-cmdb-insight-prd-psql \
--query state -o tsv
```
2. **Check logs:**
```bash
az monitor activity-log list \
--resource-group zdl-cmdb-insight-prd-euwe-rg \
--query "[?contains(operationName.value, 'upgrade')]" \
--output table
```
3. **Restore from backup if needed:**
```bash
# List available backups
az postgres flexible-server backup list \
--resource-group zdl-cmdb-insight-prd-euwe-rg \
--server-name zdl-cmdb-insight-prd-psql
```
### Application Connection Issues
If application can't connect after upgrade:
1. **Verify connection string** (should still work, no changes needed)
2. **Check firewall rules** (should remain unchanged)
3. **Restart application** to refresh connections
4. **Check application logs** for specific errors
---
## 📊 Monitoring
### Check Upgrade Status
```bash
# Monitor upgrade progress
az postgres flexible-server show \
--resource-group zdl-cmdb-insight-prd-euwe-rg \
--name zdl-cmdb-insight-prd-psql \
--query "{state:state, version:version}" -o json
```
### Performance After Upgrade
Monitor performance metrics after upgrade:
- Query performance
- Connection times
- Resource usage
---
## ✅ Post-Upgrade Checklist
- [ ] Version upgraded successfully (verified)
- [ ] Application reconnected successfully
- [ ] All databases accessible
- [ ] Performance metrics normal
- [ ] No errors in application logs
- [ ] Backup system working
---
## 🔗 Related Documentation
- **`docs/AZURE-POSTGRESQL-SETUP.md`** - Initial PostgreSQL setup
- **`docs/AZURE-APP-SERVICE-DEPLOYMENT.md`** - App Service deployment
---
## 💡 Best Practices
1. **Test in staging first** (if you have a staging environment)
2. **Schedule during maintenance window**
3. **Notify users** about planned downtime
4. **Monitor closely** after upgrade
5. **Keep backups** for at least 7 days after upgrade
---
**Note**: PostgreSQL 18 is the latest version with improved performance and features. The upgrade is generally safe and recommended.

View File

@@ -1,323 +0,0 @@
# Quick Deployment Guide - Van Images naar Productie 🚀
Je Docker images zijn klaar! Hier is een snelle guide om ze te deployen.
## 🎯 Snelle Keuze: Welke Deployment Optie?
**Voor 20 gebruikers, productie:**
**Azure App Service** (Managed, eenvoudig, ~€20/maand)
**Voor test/development:**
**VM met Docker Compose** (Flexibel, snel op te zetten)
**Voor enterprise/scale:**
**Azure Kubernetes Service** (Complex, maar krachtig)
---
## ⚡ Optie 1: Azure App Service (Aanbevolen) - 15 minuten
### Stap 1: Maak App Service Plan aan
```bash
# Resource Group (als nog niet bestaat)
az group create --name rg-cmdb-gui --location westeurope
# App Service Plan (Basic B1 - voldoende voor 20 gebruikers)
az appservice plan create \
--name plan-cmdb-gui \
--resource-group rg-cmdb-gui \
--sku B1 \
--is-linux
```
### Stap 2: Maak Web Apps aan
```bash
# Backend Web App
az webapp create \
--name cmdb-backend-prod \
--resource-group rg-cmdb-gui \
--plan plan-cmdb-gui \
--deployment-container-image-name zdlas.azurecr.io/cmdb-insight/backend:latest
# Frontend Web App
az webapp create \
--name cmdb-frontend-prod \
--resource-group rg-cmdb-gui \
--plan plan-cmdb-gui \
--deployment-container-image-name zdlas.azurecr.io/cmdb-insight/frontend:latest
```
### Stap 3: Configureer ACR Authentication
```bash
# Enable managed identity
az webapp identity assign \
--name cmdb-backend-prod \
--resource-group rg-cmdb-gui
az webapp identity assign \
--name cmdb-frontend-prod \
--resource-group rg-cmdb-gui
# Grant ACR pull permissions
az acr update \
--name zdlas \
--admin-enabled true
# Configure ACR credentials
az webapp config container set \
--name cmdb-backend-prod \
--resource-group rg-cmdb-gui \
--docker-custom-image-name zdlas.azurecr.io/cmdb-insight/backend:latest \
--docker-registry-server-url https://zdlas.azurecr.io \
--docker-registry-server-user $(az acr credential show --name zdlas --query username -o tsv) \
--docker-registry-server-password $(az acr credential show --name zdlas --query passwords[0].value -o tsv)
az webapp config container set \
--name cmdb-frontend-prod \
--resource-group rg-cmdb-gui \
--docker-custom-image-name zdlas.azurecr.io/cmdb-insight/frontend:latest \
--docker-registry-server-url https://zdlas.azurecr.io \
--docker-registry-server-user $(az acr credential show --name zdlas --query username -o tsv) \
--docker-registry-server-password $(az acr credential show --name zdlas --query passwords[0].value -o tsv)
```
### Stap 4: Configureer Environment Variabelen
```bash
# Backend environment variables
az webapp config appsettings set \
--name cmdb-backend-prod \
--resource-group rg-cmdb-gui \
--settings \
NODE_ENV=production \
PORT=3001 \
JIRA_BASE_URL=https://jira.zuyderland.nl \
JIRA_SCHEMA_ID=your-schema-id \
SESSION_SECRET=your-secure-secret \
FRONTEND_URL=https://cmdb-frontend-prod.azurewebsites.net
# Frontend environment variables
az webapp config appsettings set \
--name cmdb-frontend-prod \
--resource-group rg-cmdb-gui \
--settings \
VITE_API_URL=https://cmdb-backend-prod.azurewebsites.net/api
```
### Stap 5: Configureer SSL (Gratis via App Service)
```bash
# App Service heeft automatisch SSL via *.azurewebsites.net
# Voor custom domain:
az webapp config hostname add \
--webapp-name cmdb-frontend-prod \
--resource-group rg-cmdb-gui \
--hostname cmdb.zuyderland.nl
# Bind SSL certificate
az webapp config ssl bind \
--name cmdb-frontend-prod \
--resource-group rg-cmdb-gui \
--certificate-thumbprint <thumbprint> \
--ssl-type SNI
```
### Stap 6: Start de Apps
```bash
az webapp start --name cmdb-backend-prod --resource-group rg-cmdb-gui
az webapp start --name cmdb-frontend-prod --resource-group rg-cmdb-gui
```
**Je applicatie is nu live op:**
- Frontend: `https://cmdb-frontend-prod.azurewebsites.net`
- Backend API: `https://cmdb-backend-prod.azurewebsites.net/api`
---
## 🖥️ Optie 2: VM met Docker Compose - 20 minuten
### Stap 1: Maak VM aan
```bash
az vm create \
--resource-group rg-cmdb-gui \
--name vm-cmdb-gui \
--image Ubuntu2204 \
--size Standard_B2s \
--admin-username azureuser \
--generate-ssh-keys \
--public-ip-sku Standard
```
### Stap 2: SSH naar VM en installeer Docker
```bash
# SSH naar VM
ssh azureuser@<vm-public-ip>
# Installeer Docker
curl -fsSL https://get.docker.com -o get-docker.sh
sudo sh get-docker.sh
sudo usermod -aG docker $USER
newgrp docker
# Installeer Docker Compose
sudo curl -L "https://github.com/docker/compose/releases/latest/download/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
```
### Stap 3: Login naar ACR
```bash
# Installeer Azure CLI (als nog niet geïnstalleerd)
curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
az login
# Login naar ACR
az acr login --name zdlas
# OF gebruik credentials
az acr credential show --name zdlas
docker login zdlas.azurecr.io -u <username> -p <password>
```
### Stap 4: Clone Repository en Deploy
```bash
# Clone repository
git clone <your-repo-url>
cd cmdb-insight
# Maak .env.production aan
nano .env.production
# (Plak je environment variabelen)
# Update docker-compose.prod.acr.yml (ACR naam is al correct: zdlas)
# Start containers
docker-compose -f docker-compose.prod.acr.yml up -d
# Check status
docker-compose -f docker-compose.prod.acr.yml ps
docker-compose -f docker-compose.prod.acr.yml logs -f
```
### Stap 5: Configureer Nginx en SSL
```bash
# Installeer certbot voor Let's Encrypt
sudo apt update
sudo apt install certbot python3-certbot-nginx -y
# Configureer SSL
sudo certbot --nginx -d cmdb.zuyderland.nl
```
---
## 🔍 Verificatie
**Test of alles werkt:**
```bash
# Check backend health
curl https://your-backend-url/api/health
# Check frontend
curl https://your-frontend-url
# Check container logs
docker-compose -f docker-compose.prod.acr.yml logs backend
docker-compose -f docker-compose.prod.acr.yml logs frontend
```
---
## 🔄 Updates Deployen
**Wanneer je code pusht naar `main`:**
1. Pipeline bouwt automatisch nieuwe images
2. Images worden gepusht naar ACR met nieuwe tag
3. Pull nieuwe images:
```bash
# VM met Docker Compose
docker-compose -f docker-compose.prod.acr.yml pull
docker-compose -f docker-compose.prod.acr.yml up -d
# App Service (automatisch via Continuous Deployment)
az webapp restart --name cmdb-backend-prod --resource-group rg-cmdb-gui
```
---
## 📝 Environment Variabelen Template
**Maak `.env.production` aan:**
```bash
# Backend
NODE_ENV=production
PORT=3001
# Jira
JIRA_BASE_URL=https://jira.zuyderland.nl
JIRA_SCHEMA_ID=your-schema-id
JIRA_PAT=your-pat-token
# OF OAuth
JIRA_OAUTH_CLIENT_ID=your-client-id
JIRA_OAUTH_CLIENT_SECRET=your-client-secret
JIRA_OAUTH_CALLBACK_URL=https://your-domain/api/auth/callback
# Session
SESSION_SECRET=$(openssl rand -hex 32)
# AI (Optioneel)
ANTHROPIC_API_KEY=your-key
OPENAI_API_KEY=your-key
# Database (als PostgreSQL)
DATABASE_URL=postgresql://user:pass@host:5432/db
# Frontend
VITE_API_URL=https://your-backend-url/api
```
**⚠️ BELANGRIJK:** Gebruik Azure Key Vault voor secrets in productie!
---
## 🎯 Volgende Stappen
1. **Kies deployment optie** (App Service of VM)
2. **Configureer environment variabelen**
3. **Deploy en test**
4. **Configureer SSL/TLS**
5. **Setup monitoring**
6. **Documenteer voor team**
---
## 📚 Meer Informatie
- **Volledige Deployment Guide**: `docs/DEPLOYMENT-NEXT-STEPS.md`
- **Production Deployment**: `docs/PRODUCTION-DEPLOYMENT.md`
- **Azure Deployment Summary**: `docs/AZURE-DEPLOYMENT-SUMMARY.md`
---
## ✅ Success Checklist
- [ ] Images geverifieerd in ACR
- [ ] Deployment optie gekozen
- [ ] Environment variabelen geconfigureerd
- [ ] Applicatie gedeployed
- [ ] SSL/TLS geconfigureerd
- [ ] Health checks werken
- [ ] Monitoring ingesteld
- [ ] Team geïnformeerd
**Veel succes met de deployment! 🚀**

View File

@@ -1,283 +0,0 @@
# Refactor Phase 2B + 3: Implementation Status
**Date:** 2025-01-XX
**Status:** ✅ Phase 2B Complete - New Architecture Implemented
**Next:** Phase 3 - Migration & Cleanup
## Summary
New refactored architecture has been fully implemented and wired behind feature flag `USE_V2_API=true`. All new services, repositories, and API controllers are in place.
---
## ✅ Completed Components
### Infrastructure Layer (`/infrastructure`)
1. **`infrastructure/jira/JiraAssetsClient.ts`** ✅
- Pure HTTP API client (no business logic)
- Methods: `getObject()`, `searchObjects()`, `updateObject()`, `getSchemas()`, `getObjectTypes()`, `getAttributes()`
- Token management (service account for reads, user PAT for writes)
- Returns `ObjectEntry` from domain types
### Domain Layer (`/domain`)
1. **`domain/jiraAssetsPayload.ts`** ✅ (Phase 2A)
- Complete API payload contract
- Type guards: `isReferenceValue()`, `isSimpleValue()`, `hasAttributes()`
2. **`domain/syncPolicy.ts`** ✅
- `SyncPolicy` enum (ENABLED, REFERENCE_ONLY, SKIP)
- Policy resolution logic
### Repository Layer (`/repositories`)
1. **`repositories/SchemaRepository.ts`** ✅
- Schema CRUD: `upsertSchema()`, `getAllSchemas()`
- Object type CRUD: `upsertObjectType()`, `getEnabledObjectTypes()`, `getObjectTypeByJiraId()`
- Attribute CRUD: `upsertAttribute()`, `getAttributesForType()`, `getAttributeByFieldName()`
2. **`repositories/ObjectCacheRepository.ts`** ✅
- Object CRUD: `upsertObject()`, `getObject()`, `getObjectByKey()`, `deleteObject()`
- Attribute value CRUD: `upsertAttributeValue()`, `batchUpsertAttributeValues()`, `getAttributeValues()`, `deleteAttributeValues()`
- Relations: `upsertRelation()`, `deleteRelations()`
- Queries: `getObjectsByType()`, `countObjectsByType()`
### Service Layer (`/services`)
1. **`services/PayloadProcessor.ts`** ✅
- **Recursive reference processing** with visited-set cycle detection
- Processes `ObjectEntry` and `ReferencedObject` recursively (level2, level3, etc.)
- **CRITICAL**: Only replaces attributes if `attributes[]` array is present
- Extracts relations from references
- Normalizes to EAV format
2. **`services/SchemaSyncService.ts`** ✅
- Syncs schemas from Jira API: `syncAllSchemas()`
- Discovers and stores object types and attributes
- Returns enabled types for sync orchestration
3. **`services/ObjectSyncService.ts`** ✅
- Full sync: `syncObjectType()` - syncs all objects of an enabled type
- Incremental sync: `syncIncremental()` - syncs objects updated since timestamp
- Single object sync: `syncSingleObject()` - for refresh operations
- Recursive processing via `PayloadProcessor`
- Respects `SyncPolicy` (ENABLED vs REFERENCE_ONLY)
4. **`services/QueryService.ts`** ✅
- Universal query builder (DB → TypeScript)
- `getObject()` - reconstruct single object
- `getObjects()` - list objects of type
- `countObjects()` - count by type
- `searchByLabel()` - search by label
5. **`services/RefreshService.ts`** ✅
- Force-refresh-on-read with deduplication
- Locking mechanism prevents duplicate refresh operations
- Timeout protection (30s)
6. **`services/WriteThroughService.ts`** ✅
- Write-through updates: Jira API → DB cache
- Builds Jira update payload from field updates
- Uses same normalization logic as sync
7. **`services/ServiceFactory.ts`** ✅
- Singleton factory for all services
- Initializes all dependencies
- Single entry point: `getServices()`
### API Layer (`/api`)
1. **`api/controllers/ObjectsController.ts`** ✅
- `GET /api/v2/objects/:type` - List objects
- `GET /api/v2/objects/:type/:id?refresh=true` - Get object (with force refresh)
- `PUT /api/v2/objects/:type/:id` - Update object
2. **`api/controllers/SyncController.ts`** ✅
- `POST /api/v2/sync/schemas` - Sync all schemas
- `POST /api/v2/sync/objects` - Sync all enabled types
- `POST /api/v2/sync/objects/:typeName` - Sync single type
3. **`api/routes/v2.ts`** ✅
- V2 routes mounted at `/api/v2`
- Feature flag: `USE_V2_API=true` enables routes
- All routes require authentication
### Integration (`/backend/src/index.ts`)
✅ V2 routes wired with feature flag
✅ Token management for new `JiraAssetsClient`
✅ Backward compatible with old services
---
## 🔧 Key Features Implemented
### 1. Recursive Reference Processing ✅
- **Cycle detection**: Visited set using `objectId:objectKey` keys
- **Recursive expansion**: Processes `referencedObject.attributes[]` (level2, level3, etc.)
- **Preserves shallow objects**: Doesn't wipe attributes if `attributes[]` absent
### 2. Sync Policy Enforcement ✅
- **ENABLED**: Full sync with all attributes
- **REFERENCE_ONLY**: Cache minimal metadata for references
- **SKIP**: Unknown types skipped
### 3. Attribute Replacement Logic ✅
**CRITICAL RULE**: Only replaces attributes if `attributes[]` array is present in API response.
```typescript
if (shouldCacheAttributes) {
// attributes[] present - full replace
await deleteAttributeValues(objectId);
await batchUpsertAttributeValues(...);
}
// If attributes[] absent - keep existing attributes
```
### 4. Write-Through Updates ✅
1. Build Jira update payload
2. Send to Jira Assets API
3. Fetch fresh data
4. Update DB cache using same normalization
### 5. Force Refresh with Deduping ✅
- Lock mechanism prevents duplicate refreshes
- Timeout protection (30s)
- Concurrent reads allowed
---
## 📁 File Structure
```
backend/src/
├── domain/
│ ├── jiraAssetsPayload.ts ✅ Phase 2A
│ └── syncPolicy.ts ✅ New
├── infrastructure/
│ └── jira/
│ └── JiraAssetsClient.ts ✅ New (pure API)
├── repositories/
│ ├── SchemaRepository.ts ✅ New
│ └── ObjectCacheRepository.ts ✅ New
├── services/
│ ├── PayloadProcessor.ts ✅ New (recursive)
│ ├── SchemaSyncService.ts ✅ New
│ ├── ObjectSyncService.ts ✅ New
│ ├── QueryService.ts ✅ New
│ ├── RefreshService.ts ✅ New
│ ├── WriteThroughService.ts ✅ New
│ └── ServiceFactory.ts ✅ New
└── api/
├── controllers/
│ ├── ObjectsController.ts ✅ New
│ └── SyncController.ts ✅ New
└── routes/
└── v2.ts ✅ New
```
---
## 🚀 Usage (Feature Flag)
### Enable V2 API
```bash
# .env
USE_V2_API=true
```
### New Endpoints
```
GET /api/v2/objects/:type # List objects
GET /api/v2/objects/:type/:id?refresh=true # Get object (with refresh)
PUT /api/v2/objects/:type/:id # Update object
POST /api/v2/sync/schemas # Sync all schemas
POST /api/v2/sync/objects # Sync all enabled types
POST /api/v2/sync/objects/:typeName # Sync single type
```
---
## ✅ API Payload Contract Compliance
All services correctly handle:
-`objectEntries[]``ObjectEntry[]`
-`ObjectEntry.attributes[]``ObjectAttribute[]` (optional)
-`ObjectAttribute.objectAttributeValues[]``ObjectAttributeValue` union
-`ReferencedObject.attributes[]` → Recursive (level2+)
- ✅ Cycle detection with visited sets
-**CRITICAL**: Don't wipe attributes if `attributes[]` absent on shallow objects
---
## 🧪 Testing Status
**Compilation**: ✅ New code compiles without errors (pre-existing TypeScript config issues unrelated)
**Ready for Testing**:
1. Enable `USE_V2_API=true`
2. Test new endpoints
3. Verify recursive reference processing
4. Verify attribute replacement logic
5. Verify write-through updates
---
## 📋 Next Steps (Phase 3)
### Step 1: Test V2 API ✅ (Ready)
- [ ] Enable feature flag
- [ ] Test schema sync endpoint
- [ ] Test object sync endpoint
- [ ] Test object read endpoint
- [ ] Test object write endpoint
- [ ] Verify recursive references processed
- [ ] Verify attribute replacement logic
### Step 2: Migrate Existing Endpoints
After V2 API is validated:
- [ ] Update `routes/objects.ts` to use new services
- [ ] Update `routes/cache.ts` to use new services
- [ ] Update `routes/schema.ts` to use new services
### Step 3: Delete Old Code
After migration complete:
- [ ] Delete `services/jiraAssets.ts` (merge remaining business logic first)
- [ ] Delete `services/jiraAssetsClient.ts` (replaced by infrastructure client)
- [ ] Delete `services/cacheStore.old.ts`
- [ ] Delete `services/normalizedCacheStore.ts` (replace with repositories)
- [ ] Delete `services/queryBuilder.ts` (functionality in QueryService)
- [ ] Delete `services/schemaDiscoveryService.ts` (replaced by SchemaSyncService)
- [ ] Delete `services/schemaCacheService.ts` (merged into SchemaRepository)
- [ ] Delete `services/schemaConfigurationService.ts` (functionality moved to SchemaRepository)
- [ ] Delete `services/schemaMappingService.ts` (deprecated)
- [ ] Delete `services/syncEngine.ts` (replaced by ObjectSyncService)
- [ ] Delete `services/cmdbService.ts` (functionality split into QueryService + WriteThroughService + RefreshService)
---
## ⚠️ Important Notes
1. **No Functional Changes Yet**: Old code still runs in parallel
2. **Feature Flag Required**: V2 API only active when `USE_V2_API=true`
3. **Token Management**: New client receives tokens from middleware (same as old)
4. **Database Schema**: Uses existing normalized EAV schema (no migration needed)
---
**End of Phase 2B + 3 Implementation Status**

View File

@@ -55,7 +55,7 @@ az acr create \
**Noteer je ACR naam!** Je hebt deze nodig in de volgende stappen.
**📚 Zie `docs/AZURE-ACR-QUICKSTART.md` voor een complete quick-start guide.**
**📚 Zie `docs/AZURE-ACR-SETUP.md` voor een complete quick-start guide.**
---

View File

@@ -773,7 +773,7 @@ For production with zero downtime, use deployment slots:
- Deploys to staging first
- Swaps to production after verification
**See `docs/AZURE-PIPELINE-DEPLOYMENT.md` for complete setup guide.**
**See `docs/AZURE-PIPELINES.md` for complete setup guide.**
---
@@ -939,5 +939,5 @@ az group delete --name $RESOURCE_GROUP --yes --no-wait
For questions or issues, refer to:
- `AZURE-APP-SERVICE-DEPLOYMENT.md` - Detailed App Service deployment guide
- `AZURE-CONTAINER-REGISTRY.md` - ACR setup and usage
- `AZURE-ACR-SETUP.md` - ACR setup and usage
- `PRODUCTION-DEPLOYMENT.md` - General production deployment guide

View File

@@ -21,7 +21,7 @@ docker-compose -f docker-compose.dev.yml logs -f postgres
```env
DATABASE_TYPE=postgres
DATABASE_URL=postgresql://cmdb:cmdb-dev@localhost:5432/cmdb_cache
DATABASE_URL=postgresql://cmdb:cmdb-dev@localhost:5432/cmdb_insight
```
Of individuele variabelen:
@@ -30,7 +30,7 @@ Of individuele variabelen:
DATABASE_TYPE=postgres
DATABASE_HOST=localhost
DATABASE_PORT=5432
DATABASE_NAME=cmdb_cache
DATABASE_NAME=cmdb_insight
DATABASE_USER=cmdb
DATABASE_PASSWORD=cmdb-dev
```
@@ -55,10 +55,10 @@ docker-compose -f docker-compose.dev.yml up -d
```bash
# Via psql
docker-compose -f docker-compose.dev.yml exec postgres psql -U cmdb -d cmdb_cache
docker-compose -f docker-compose.dev.yml exec postgres psql -U cmdb -d cmdb_insight
# Of direct
psql postgresql://cmdb:cmdb-dev@localhost:5432/cmdb_cache
psql postgresql://cmdb:cmdb-dev@localhost:5432/cmdb_insight
```
### Useful Commands
@@ -68,15 +68,15 @@ psql postgresql://cmdb:cmdb-dev@localhost:5432/cmdb_cache
docker-compose -f docker-compose.dev.yml exec postgres psql -U cmdb -c "\l"
# List tables
docker-compose -f docker-compose.dev.yml exec postgres psql -U cmdb -d cmdb_cache -c "\dt"
docker-compose -f docker-compose.dev.yml exec postgres psql -U cmdb -d cmdb_insight -c "\dt"
# Check database size
docker-compose -f docker-compose.dev.yml exec postgres psql -U cmdb -d cmdb_cache -c "
SELECT pg_size_pretty(pg_database_size('cmdb_cache')) as size;
docker-compose -f docker-compose.dev.yml exec postgres psql -U cmdb -d cmdb_insight -c "
SELECT pg_size_pretty(pg_database_size('cmdb_insight')) as size;
"
# Count objects
docker-compose -f docker-compose.dev.yml exec postgres psql -U cmdb -d cmdb_cache -c "
docker-compose -f docker-compose.dev.yml exec postgres psql -U cmdb -d cmdb_insight -c "
SELECT object_type_name, COUNT(*)
FROM objects
GROUP BY object_type_name;
@@ -127,7 +127,7 @@ Maak een `.env` bestand in de root:
```env
# Database (voor backend)
DATABASE_TYPE=postgres
DATABASE_URL=postgresql://cmdb:cmdb-dev@localhost:5432/cmdb_cache
DATABASE_URL=postgresql://cmdb:cmdb-dev@localhost:5432/cmdb_insight
# Jira (optioneel)
JIRA_HOST=https://jira.zuyderland.nl
@@ -138,6 +138,73 @@ JIRA_SCHEMA_ID=your_schema_id
ANTHROPIC_API_KEY=your_key
```
## Database Access
### Quick Access
**Using the script:**
```bash
# Connect using psql
./scripts/open-database.sh psql
# Or via Docker
./scripts/open-database.sh docker
# Or get connection string for GUI tools
./scripts/open-database.sh url
```
**Direct psql command:**
```bash
psql postgresql://cmdb:cmdb-dev@localhost:5432/cmdb_insight
```
**Via Docker:**
```bash
docker-compose -f docker-compose.dev.yml exec postgres psql -U cmdb -d cmdb_insight
```
### GUI Tools
**pgAdmin** (Free, Web-based):
- Download: https://www.pgadmin.org/download/
- Connection: `postgresql://cmdb:cmdb-dev@localhost:5432/cmdb_insight`
**DBeaver** (Free, Cross-platform):
- Download: https://dbeaver.io/download/
- Create new PostgreSQL connection with connection string above
**TablePlus** (macOS, Paid but has free tier):
- Download: https://tableplus.com/
- Create new PostgreSQL connection
### Useful SQL Commands
```sql
-- List all tables
\dt
-- Describe a table structure
\d objects
\d attribute_values
\d classification_history
-- View object counts
SELECT object_type_name, COUNT(*)
FROM objects
GROUP BY object_type_name;
-- View classification history
SELECT * FROM classification_history
ORDER BY timestamp DESC
LIMIT 10;
-- Check database size
SELECT pg_size_pretty(pg_database_size('cmdb_insight')) as size;
```
---
## Troubleshooting
### Port Already in Use
@@ -173,5 +240,5 @@ docker-compose -f docker-compose.dev.yml exec postgres pg_isready -U cmdb
De database wordt automatisch aangemaakt bij eerste start van de backend. Of maak handmatig:
```bash
docker-compose -f docker-compose.dev.yml exec postgres psql -U cmdb -c "CREATE DATABASE cmdb_cache;"
docker-compose -f docker-compose.dev.yml exec postgres psql -U cmdb -c "CREATE DATABASE cmdb_insight;"
```

View File

@@ -1,801 +0,0 @@
# Refactor Plan - Phase 1: Architecture Analysis
**Created:** 2025-01-XX
**Status:** Phase 1 - Analysis Only (No functional changes)
## Executive Summary
This document provides a comprehensive analysis of the current architecture and a plan for refactoring the CMDB Insight codebase to improve maintainability, reduce duplication, and establish clearer separation of concerns.
**Scope:** This is Phase 1 - analysis and planning only. No code changes will be made in this phase.
---
## Table of Contents
1. [Current Architecture Map](#current-architecture-map)
2. [Pain Points & Duplication](#pain-points--duplication)
3. [Target Architecture](#target-architecture)
4. [Migration Steps](#migration-steps)
5. [Explicit Deletion List](#explicit-deletion-list)
6. [API Payload Contract & Recursion Insights](#api-payload-contract--recursion-insights)
---
## Current Architecture Map
### File/Folder Structure
```
backend/src/
├── services/
│ ├── jiraAssets.ts # High-level Jira Assets service (business logic, ~3454 lines)
│ ├── jiraAssetsClient.ts # Low-level Jira Assets API client (~646 lines)
│ ├── schemaDiscoveryService.ts # Discovers schema from Jira API (~520 lines)
│ ├── schemaCacheService.ts # Caches schema metadata
│ ├── schemaConfigurationService.ts # Manages enabled object types
│ ├── schemaMappingService.ts # Maps object types to schema IDs
│ ├── syncEngine.ts # Background sync service (full/incremental) (~630 lines)
│ ├── normalizedCacheStore.ts # EAV pattern DB store (~1695 lines)
│ ├── cmdbService.ts # Universal schema-driven CMDB service (~531 lines)
│ ├── queryBuilder.ts # Dynamic SQL query builder (~278 lines)
│ ├── cacheStore.old.ts # Legacy cache store (deprecated)
│ └── database/
│ ├── normalized-schema.ts # DB schema definitions (Postgres/SQLite)
│ ├── factory.ts # Database adapter factory
│ ├── interface.ts # Database adapter interface
│ ├── postgresAdapter.ts # PostgreSQL adapter
│ ├── sqliteAdapter.ts # SQLite adapter
│ ├── migrate-to-normalized-schema.ts
│ └── fix-object-types-constraints.ts
├── routes/
│ ├── applications.ts # Application-specific endpoints (~780 lines)
│ ├── objects.ts # Generic object endpoints (~185 lines)
│ ├── cache.ts # Cache/sync endpoints (~165 lines)
│ ├── schema.ts # Schema endpoints (~107 lines)
│ └── schemaConfiguration.ts # Schema configuration endpoints
├── generated/
│ ├── jira-types.ts # Generated TypeScript types (~934 lines)
│ └── jira-schema.ts # Generated schema metadata (~895 lines)
└── scripts/
├── discover-schema.ts # Schema discovery CLI
├── generate-types-from-db.ts # Type generation from DB (~485 lines)
└── generate-schema.ts # Legacy schema generation
```
### Module Responsibilities
#### 1. Jira Assets API Client Calls
**Primary Files:**
- `services/jiraAssetsClient.ts` - Low-level HTTP client
- Methods: `getObject()`, `searchObjects()`, `getAllObjectsOfType()`, `updateObject()`, `parseObject()`
- Handles authentication (service account token for reads, user PAT for writes)
- API detection (Data Center vs Cloud)
- Object parsing from Jira format to CMDB format
- `services/jiraAssets.ts` - High-level business logic wrapper
- Application-specific methods (e.g., `getApplications()`, `updateApplication()`)
- Dashboard data aggregation
- Reference data caching
- Team dashboard calculations
- Legacy API methods
**Dependencies:**
- Uses `schemaCacheService` for type lookups
- Uses `schemaMappingService` for schema ID resolution
#### 2. Schema Discovery/Sync
**Primary Files:**
- `services/schemaDiscoveryService.ts`
- Discovers object types from Jira API (`/objectschema/{id}/objecttypes/flat`)
- Discovers attributes for each object type (`/objecttype/{id}/attributes`)
- Stores schema in database (`object_types`, `attributes` tables)
- Provides lookup methods: `getAttribute()`, `getAttributesForType()`, `getObjectType()`
- `services/schemaCacheService.ts`
- Caches schema from database
- Provides runtime schema access
- `services/schemaConfigurationService.ts`
- Manages enabled/disabled object types
- Schema-to-object-type mapping
- Configuration validation
- `services/schemaMappingService.ts`
- Maps object type names to schema IDs
- Legacy compatibility
**Scripts:**
- `scripts/discover-schema.ts` - CLI tool to trigger schema discovery
- `scripts/generate-types-from-db.ts` - Generates TypeScript types from database
#### 3. Object Sync/Import
**Primary Files:**
- `services/syncEngine.ts`
- `fullSync()` - Syncs all enabled object types
- `incrementalSync()` - Periodic sync of updated objects
- `syncType()` - Sync single object type
- `syncObject()` - Sync single object
- Uses `jiraAssetsClient.getAllObjectsOfType()` for fetching
- Uses `normalizedCacheStore.batchUpsertObjects()` for storage
**Flow:**
1. Fetch objects from Jira via `jiraAssetsClient`
2. Parse objects via `jiraAssetsClient.parseObject()`
3. Store objects via `normalizedCacheStore.batchUpsertObjects()`
4. Extract relations via `normalizedCacheStore.extractAndStoreRelations()`
#### 4. DB Normalization Store (EAV)
**Primary Files:**
- `services/normalizedCacheStore.ts` (~1695 lines)
- **Storage:** `normalizeObject()`, `batchUpsertObjects()`, `upsertObject()`
- **Retrieval:** `getObject()`, `getObjects()`, `reconstructObject()`, `loadAttributeValues()`
- **Relations:** `extractAndStoreRelations()`, `getRelatedObjects()`, `getReferencingObjects()`
- **Query:** `queryWithFilters()` (uses `queryBuilder`)
- `services/database/normalized-schema.ts`
- Defines EAV schema: `objects`, `attributes`, `attribute_values`, `object_relations`
**EAV Pattern:**
- `objects` table: Minimal metadata (id, objectKey, label, type, timestamps)
- `attributes` table: Schema metadata (jira_attr_id, field_name, type, is_multiple, etc.)
- `attribute_values` table: Actual values (text_value, number_value, boolean_value, reference_object_id, array_index)
- `object_relations` table: Extracted relationships (source_id, target_id, attribute_id)
#### 5. Backend API Endpoints
**Primary Files:**
- `routes/applications.ts` - Application-specific endpoints
- `POST /applications/search` - Search with filters
- `GET /applications/:id` - Get application details
- `PUT /applications/:id` - Update application
- `GET /applications/:id/related/:type` - Get related objects
- Dashboard endpoints (`/team-dashboard`, `/team-portfolio-health`)
- `routes/objects.ts` - Generic object endpoints
- `GET /objects` - List supported types
- `GET /objects/:type` - Get all objects of type
- `GET /objects/:type/:id` - Get single object
- `GET /objects/:type/:id/related/:relationType` - Get related objects
- `routes/cache.ts` - Cache management
- `POST /cache/sync` - Trigger full sync
- `POST /cache/sync/:objectType` - Sync single type
- `POST /cache/refresh-application/:id` - Refresh single object
- `routes/schema.ts` - Schema endpoints
- `GET /schema` - Get schema metadata
- `GET /schema/types` - List object types
- `GET /schema/types/:type` - Get type definition
**Service Layer:**
- Routes delegate to `cmdbService`, `dataService`, `syncEngine`
- `cmdbService` provides unified interface (read/write with conflict detection)
- `dataService` provides application-specific business logic
#### 6. Query Builder (Object Reconstruction)
**Primary Files:**
- `services/queryBuilder.ts`
- `buildWhereClause()` - Builds WHERE conditions from filters
- `buildFilterCondition()` - Handles different attribute types (text, reference, number, etc.)
- `buildOrderBy()` - ORDER BY clause
- `buildPagination()` - LIMIT/OFFSET clause
**Usage:**
- Used by `normalizedCacheStore.queryWithFilters()` to build dynamic SQL
- Handles complex filters (exact match, exists, contains, reference filters)
#### 7. Generated Types/Reflect Scripts
**Primary Files:**
- `scripts/generate-types-from-db.ts`
- Reads from `object_types` and `attributes` tables
- Generates `generated/jira-types.ts` (TypeScript interfaces)
- Generates `generated/jira-schema.ts` (Schema metadata with lookup maps)
**Generated Output:**
- `jira-types.ts`: TypeScript interfaces for each object type (e.g., `ApplicationComponent`, `Server`)
- `jira-schema.ts`: `OBJECT_TYPES` record, lookup maps (`TYPE_ID_TO_NAME`, `JIRA_NAME_TO_TYPE`), helper functions
---
## Pain Points & Duplication
### 1. Dual API Clients (jiraAssets.ts vs jiraAssetsClient.ts)
**Issue:** Two separate services handling Jira API calls:
- `jiraAssetsClient.ts` - Low-level, focused on API communication
- `jiraAssets.ts` - High-level, contains business logic + API calls
**Problems:**
- Duplication of API request logic
- Inconsistent error handling
- Mixed concerns (business logic + infrastructure)
- `jiraAssets.ts` is huge (~3454 lines) and hard to maintain
**Location:**
- `backend/src/services/jiraAssets.ts` - Contains both API calls and business logic
- `backend/src/services/jiraAssetsClient.ts` - Clean separation but incomplete
### 2. Schema Discovery/Caching Duplication
**Issue:** Multiple services handling schema metadata:
- `schemaDiscoveryService.ts` - Discovers and stores schema
- `schemaCacheService.ts` - Caches schema from DB
- `schemaConfigurationService.ts` - Manages enabled types
- `schemaMappingService.ts` - Maps types to schema IDs
**Problems:**
- Unclear boundaries between services
- Potential for stale cache
- Complex initialization dependencies
**Location:**
- `backend/src/services/schema*.ts` files
### 3. Mixed Responsibilities in normalizedCacheStore.ts
**Issue:** Large file (~1695 lines) handling multiple concerns:
- Database operations (EAV storage/retrieval)
- Object reconstruction (TypeScript object building)
- Reference resolution (fetching missing referenced objects)
- Relation extraction
**Problems:**
- Hard to test individual concerns
- Difficult to optimize specific operations
- Violates single responsibility principle
**Location:**
- `backend/src/services/normalizedCacheStore.ts`
### 4. Application-Specific Logic in Generic Services
**Issue:** Application-specific business logic scattered:
- `routes/applications.ts` - Application-specific endpoints (~780 lines)
- `services/dataService.ts` - Application business logic
- `services/jiraAssets.ts` - Application aggregation logic
- `services/cmdbService.ts` - Generic service used by applications
**Problems:**
- Hard to extend to other object types
- Tight coupling between routes and services
- Business logic mixed with data access
**Location:**
- `backend/src/routes/applications.ts`
- `backend/src/services/dataService.ts`
### 5. Type Generation Pipeline Complexity
**Issue:** Multiple scripts and services involved in type generation:
- `scripts/discover-schema.ts` - Triggers schema discovery
- `services/schemaDiscoveryService.ts` - Discovers schema
- `scripts/generate-types-from-db.ts` - Generates TypeScript files
- `generated/jira-types.ts` - Generated output (must be regenerated when schema changes)
**Problems:**
- Unclear workflow
- Manual steps required
- Generated files can get out of sync
**Location:**
- `backend/scripts/discover-schema.ts`
- `backend/scripts/generate-types-from-db.ts`
- `backend/src/generated/*.ts`
### 6. Legacy Code (cacheStore.old.ts)
**Issue:** Old cache store still present in codebase:
- `services/cacheStore.old.ts` - Deprecated implementation
**Problems:**
- Confusing for new developers
- Takes up space
- No longer used
**Location:**
- `backend/src/services/cacheStore.old.ts`
### 7. Inconsistent Error Handling
**Issue:** Different error handling patterns across services:
- Some use try/catch with logger
- Some throw errors
- Some return null/undefined
- Inconsistent error messages
**Problems:**
- Hard to debug issues
- Inconsistent API responses
- No centralized error handling
**Location:**
- Throughout codebase
---
## Target Architecture
### Domain/Infrastructure/Services/API Separation
```
backend/src/
├── domain/ # Domain models & business logic
│ ├── cmdb/
│ │ ├── Object.ts # CMDBObject base interface
│ │ ├── ObjectType.ts # ObjectTypeDefinition
│ │ ├── Attribute.ts # AttributeDefinition
│ │ └── Reference.ts # ObjectReference
│ ├── schema/
│ │ ├── Schema.ts # Schema domain model
│ │ └── SchemaDiscovery.ts # Schema discovery business logic
│ └── sync/
│ ├── SyncEngine.ts # Sync orchestration logic
│ └── SyncStrategy.ts # Sync strategies (full, incremental)
├── infrastructure/ # External integrations & infrastructure
│ ├── jira/
│ │ ├── JiraAssetsClient.ts # Low-level HTTP client (pure API calls)
│ │ ├── JiraAssetsApi.ts # API contract definitions
│ │ └── JiraResponseParser.ts # Response parsing utilities
│ └── database/
│ ├── adapters/ # Database adapters (Postgres, SQLite)
│ ├── schema/ # Schema definitions
│ └── migrations/ # Database migrations
├── services/ # Application services (use cases)
│ ├── cmdb/
│ │ ├── CmdbReadService.ts # Read operations
│ │ ├── CmdbWriteService.ts # Write operations with conflict detection
│ │ └── CmdbQueryService.ts # Query operations
│ ├── schema/
│ │ ├── SchemaService.ts # Schema CRUD operations
│ │ └── SchemaDiscoveryService.ts # Schema discovery orchestration
│ └── sync/
│ └── SyncService.ts # Sync orchestration
├── repositories/ # Data access layer
│ ├── ObjectRepository.ts # Object CRUD (uses EAV store)
│ ├── AttributeRepository.ts # Attribute value access
│ ├── RelationRepository.ts # Relationship access
│ └── SchemaRepository.ts # Schema metadata access
├── stores/ # Storage implementations
│ ├── NormalizedObjectStore.ts # EAV pattern implementation
│ ├── ObjectReconstructor.ts # Object reconstruction from EAV
│ └── RelationExtractor.ts # Relation extraction logic
├── api/ # HTTP API layer
│ ├── routes/
│ │ ├── objects.ts # Generic object endpoints
│ │ ├── schema.ts # Schema endpoints
│ │ └── sync.ts # Sync endpoints
│ ├── handlers/ # Request handlers (thin layer)
│ │ ├── ObjectHandler.ts
│ │ ├── SchemaHandler.ts
│ │ └── SyncHandler.ts
│ └── middleware/ # Auth, validation, etc.
├── queries/ # Query builders
│ ├── ObjectQueryBuilder.ts # SQL query construction
│ └── FilterBuilder.ts # Filter condition builder
└── scripts/ # CLI tools
├── discover-schema.ts # Schema discovery CLI
└── generate-types.ts # Type generation CLI
```
### Key Principles
1. **Domain Layer**: Pure business logic, no infrastructure dependencies
2. **Infrastructure Layer**: External integrations (Jira API, database)
3. **Services Layer**: Orchestrates domain logic and infrastructure
4. **Repository Layer**: Data access abstraction
5. **Store Layer**: Storage implementations (EAV, caching)
6. **API Layer**: Thin HTTP handlers that delegate to services
---
## Migration Steps
### Step 1: Extract Jira API Client (Infrastructure)
**Goal:** Create pure infrastructure client with no business logic
1. Consolidate `jiraAssetsClient.ts` and `jiraAssets.ts` API methods into single `JiraAssetsClient`
2. Extract API contract types to `infrastructure/jira/JiraAssetsApi.ts`
3. Move response parsing to `infrastructure/jira/JiraResponseParser.ts`
4. Remove business logic from API client (delegate to services)
**Files to Create:**
- `infrastructure/jira/JiraAssetsClient.ts`
- `infrastructure/jira/JiraAssetsApi.ts`
- `infrastructure/jira/JiraResponseParser.ts`
**Files to Modify:**
- `services/jiraAssets.ts` - Remove API calls, keep business logic
- `services/jiraAssetsClient.ts` - Merge into infrastructure client
**Files to Delete:**
- None (yet - deprecate old files after migration)
### Step 2: Extract Schema Domain & Services
**Goal:** Separate schema discovery business logic from infrastructure
1. Create `domain/schema/` with domain models
2. Move schema discovery logic to `services/schema/SchemaDiscoveryService.ts`
3. Consolidate schema caching in `services/schema/SchemaService.ts`
4. Remove duplication between `schemaCacheService`, `schemaConfigurationService`, `schemaMappingService`
**Files to Create:**
- `domain/schema/Schema.ts`
- `services/schema/SchemaService.ts`
- `services/schema/SchemaDiscoveryService.ts`
**Files to Modify:**
- `services/schemaDiscoveryService.ts` - Split into domain + service
- `services/schemaCacheService.ts` - Merge into SchemaService
- `services/schemaConfigurationService.ts` - Merge into SchemaService
- `services/schemaMappingService.ts` - Merge into SchemaService
**Files to Delete:**
- `services/schemaCacheService.ts` (after merge)
- `services/schemaMappingService.ts` (after merge)
### Step 3: Extract Repository Layer
**Goal:** Abstract data access from business logic
1. Create `repositories/ObjectRepository.ts` - Interface for object CRUD
2. Create `repositories/AttributeRepository.ts` - Interface for attribute access
3. Create `repositories/RelationRepository.ts` - Interface for relationships
4. Implement repositories using `NormalizedObjectStore`
**Files to Create:**
- `repositories/ObjectRepository.ts`
- `repositories/AttributeRepository.ts`
- `repositories/RelationRepository.ts`
- `repositories/SchemaRepository.ts`
**Files to Modify:**
- `services/normalizedCacheStore.ts` - Extract repository implementations
### Step 4: Extract Store Implementations
**Goal:** Separate storage implementations from business logic
1. Extract EAV storage to `stores/NormalizedObjectStore.ts`
2. Extract object reconstruction to `stores/ObjectReconstructor.ts`
3. Extract relation extraction to `stores/RelationExtractor.ts`
**Files to Create:**
- `stores/NormalizedObjectStore.ts` - EAV storage/retrieval
- `stores/ObjectReconstructor.ts` - TypeScript object reconstruction
- `stores/RelationExtractor.ts` - Relation extraction from objects
**Files to Modify:**
- `services/normalizedCacheStore.ts` - Split into store classes
### Step 5: Extract Query Builders
**Goal:** Separate query construction from execution
1. Move `queryBuilder.ts` to `queries/ObjectQueryBuilder.ts`
2. Extract filter building to `queries/FilterBuilder.ts`
**Files to Create:**
- `queries/ObjectQueryBuilder.ts`
- `queries/FilterBuilder.ts`
**Files to Modify:**
- `services/queryBuilder.ts` - Move to queries/
### Step 6: Extract CMDB Services
**Goal:** Separate read/write/query concerns
1. Create `services/cmdb/CmdbReadService.ts` - Read operations
2. Create `services/cmdb/CmdbWriteService.ts` - Write operations with conflict detection
3. Create `services/cmdb/CmdbQueryService.ts` - Query operations
**Files to Create:**
- `services/cmdb/CmdbReadService.ts`
- `services/cmdb/CmdbWriteService.ts`
- `services/cmdb/CmdbQueryService.ts`
**Files to Modify:**
- `services/cmdbService.ts` - Split into read/write/query services
### Step 7: Extract Sync Service
**Goal:** Separate sync orchestration from storage
1. Create `domain/sync/SyncEngine.ts` - Sync business logic
2. Create `services/sync/SyncService.ts` - Sync orchestration
**Files to Create:**
- `domain/sync/SyncEngine.ts`
- `services/sync/SyncService.ts`
**Files to Modify:**
- `services/syncEngine.ts` - Split into domain + service
### Step 8: Refactor API Routes
**Goal:** Thin HTTP handlers delegating to services
1. Create `api/handlers/` directory
2. Move route logic to handlers
3. Routes become thin wrappers around handlers
**Files to Create:**
- `api/handlers/ObjectHandler.ts`
- `api/handlers/SchemaHandler.ts`
- `api/handlers/SyncHandler.ts`
**Files to Modify:**
- `routes/applications.ts` - Extract handlers
- `routes/objects.ts` - Extract handlers
- `routes/cache.ts` - Extract handlers
- `routes/schema.ts` - Extract handlers
### Step 9: Clean Up Legacy Code
**Goal:** Remove deprecated files
**Files to Delete:**
- `services/cacheStore.old.ts`
- Deprecated service files after migration complete
### Step 10: Update Type Generation
**Goal:** Simplify type generation workflow
1. Consolidate type generation logic
2. Add automatic type generation on schema discovery
3. Update documentation
**Files to Modify:**
- `scripts/generate-types-from-db.ts` - Enhance with auto-discovery
- `scripts/discover-schema.ts` - Auto-generate types after discovery
---
## Explicit Deletion List
### Phase 2 (After Migration Complete)
1. **`backend/src/services/cacheStore.old.ts`**
- Reason: Legacy implementation, replaced by `normalizedCacheStore.ts`
- Deprecation date: TBD
2. **`backend/src/services/jiraAssets.ts`** (after extracting business logic)
- Reason: API calls moved to infrastructure layer, business logic to services
- Replacement: `infrastructure/jira/JiraAssetsClient.ts` + `services/cmdb/Cmdb*Service.ts`
3. **`backend/src/services/schemaCacheService.ts`** (after consolidation)
- Reason: Merged into `services/schema/SchemaService.ts`
4. **`backend/src/services/schemaMappingService.ts`** (after consolidation)
- Reason: Merged into `services/schema/SchemaService.ts`
5. **`backend/scripts/generate-schema.ts`** (if still present)
- Reason: Replaced by `generate-types-from-db.ts`
### Notes
- Keep old files until migration is complete and tested
- Mark as deprecated with `@deprecated` JSDoc comments
- Add migration guide for each deprecated file
---
## API Payload Contract & Recursion Insights
### Jira Assets API Payload Structure
The Jira Assets API returns objects with the following nested structure:
```typescript
interface JiraAssetsSearchResponse {
objectEntries: JiraAssetsObject[]; // Top-level array of objects
// ... pagination metadata
}
interface JiraAssetsObject {
id: number;
objectKey: string;
label: string;
objectType: {
id: number;
name: string;
};
attributes: JiraAssetsAttribute[]; // Array of attributes
updated?: string;
created?: string;
}
interface JiraAssetsAttribute {
objectTypeAttributeId: number;
objectTypeAttribute?: {
id: number;
name: string;
};
objectAttributeValues: Array<{ // Union type of value representations
value?: string; // For scalar values (text, number, etc.)
displayValue?: string; // Human-readable value
referencedObject?: { // For reference attributes
id: number;
objectKey: string;
label: string;
// ⚠️ CRITICAL: referencedObject may include attributes (level 2)
attributes?: JiraAssetsAttribute[]; // Recursive structure
};
status?: { // For status attributes
name: string;
};
// ... other type-specific fields
}>;
}
```
### Key Insights
#### 1. Recursive Structure
**Issue:** `referencedObject` may include `attributes[]` array (level 2 recursion).
**Current Handling:**
- `jiraAssetsClient.ts` uses `includeAttributesDeep=2` parameter
- This causes referenced objects to include their attributes
- Referenced objects' attributes may themselves contain referenced objects (level 3, 4, etc.)
- **Cycles are possible** (Object A references Object B, Object B references Object A)
**Current Code Location:**
- `backend/src/services/jiraAssetsClient.ts:222` - `includeAttributesDeep=2`
- `backend/src/services/jiraAssetsClient.ts:259-260` - Search with deep attributes
- `backend/src/services/jiraAssetsClient.ts:285-286` - POST search with deep attributes
**Impact:**
- Response payloads can be very large (deeply nested)
- Memory usage increases with depth
- Parsing becomes more complex
#### 2. Shallow Referenced Objects
**Issue:** When `attributes[]` is absent on a shallow `referencedObject`, **do not wipe attributes**.
**Current Behavior:**
- Some code paths may clear attributes if `attributes` is missing
- This is incorrect - absence of `attributes` array does not mean "no attributes"
- It simply means "attributes not included in this response"
**Critical Rule:**
```typescript
// ❌ WRONG: Don't do this
if (!referencedObject.attributes) {
referencedObject.attributes = []; // This wipes existing attributes!
}
// ✅ CORRECT: Preserve existing attributes if missing from response
if (referencedObject.attributes === undefined) {
// Don't modify - attributes simply not included in this response
// Keep any existing attributes that were previously loaded
}
```
**Code Locations to Review:**
- `backend/src/services/jiraAssetsClient.ts:parseObject()` - Object parsing
- `backend/src/services/jiraAssetsClient.ts:parseAttributeValue()` - Reference parsing
- `backend/src/services/normalizedCacheStore.ts:loadAttributeValues()` - Reference reconstruction
#### 3. Attribute Values Union Type
**Issue:** `objectAttributeValues` is a union type - different value representations based on attribute type.
**Value Types:**
- Scalar (text, number, boolean): `{ value?: string, displayValue?: string }`
- Reference: `{ referencedObject?: { id, objectKey, label, attributes? } }`
- Status: `{ status?: { name: string } }`
- Date/Datetime: `{ value?: string }` (ISO string)
**Current Handling:**
- `jiraAssetsClient.ts:parseAttributeValue()` uses switch on `attrDef.type`
- Different parsing logic for each type
- Reference types extract `referencedObject`, others use `value` or `displayValue`
**Code Location:**
- `backend/src/services/jiraAssetsClient.ts:521-628` - `parseAttributeValue()` method
#### 4. Cycles and Recursion Depth
**Issue:** Recursive references can create cycles.
**Examples:**
- Application A references Team X
- Team X references Application A (via some attribute)
- This creates a cycle at depth 2
**Current Handling:**
- No explicit cycle detection
- `includeAttributesDeep=2` limits depth but doesn't prevent cycles
- Potential for infinite loops during reconstruction
**Recommendation:**
- Add cycle detection during object reconstruction
- Use visited set to track processed object IDs
- Limit recursion depth explicitly (not just via API parameter)
**Code Locations:**
- `backend/src/services/normalizedCacheStore.ts:loadAttributeValues()` - Reference resolution
- `backend/src/services/normalizedCacheStore.ts:reconstructObject()` - Object reconstruction
### Refactoring Considerations
1. **Create dedicated parser module** for handling recursive payloads
2. **Add cycle detection** utility
3. **Separate shallow vs deep parsing** logic
4. **Preserve attribute state** when attributes array is absent
5. **Document recursion depth limits** clearly
---
## Appendix: Module Dependency Graph
```
┌─────────────────────────────────────────────────────────┐
│ API Routes │
│ (applications.ts, objects.ts, cache.ts, schema.ts) │
└────────────┬────────────────────────────────────────────┘
┌─────────────────────────────────────────────────────────┐
│ Application Services │
│ (cmdbService.ts, dataService.ts) │
└────────────┬────────────────────────────────────────────┘
┌────────┴────────┐
▼ ▼
┌────────────┐ ┌──────────────────────────────┐
│ Sync │ │ Normalized Cache Store │
│ Engine │ │ (EAV Pattern) │
└─────┬──────┘ └──────────┬───────────────────┘
│ │
│ ▼
│ ┌─────────────────┐
│ │ Query Builder │
│ └─────────────────┘
┌─────────────────────────────────────────┐
│ Jira Assets Client │
│ (jiraAssetsClient.ts, jiraAssets.ts) │
└────────────┬────────────────────────────┘
┌─────────────────────────────────────────┐
│ Schema Services │
│ (schemaDiscovery, schemaCache, etc.) │
└─────────────────────────────────────────┘
```
---
## Next Steps (Phase 2)
1. Review and approve this plan
2. Create detailed task breakdown for each migration step
3. Set up feature branch for refactoring
4. Implement changes incrementally with tests
5. Update documentation as we go
---
**End of Phase 1 - Analysis Document**

View File

@@ -71,11 +71,11 @@ done
echo "✅ PostgreSQL is ready"
echo ""
# Create databases (if needed)
echo "📊 Creating databases..."
docker-compose -f "$COMPOSE_FILE" exec -T postgres psql -U cmdb -c "CREATE DATABASE cmdb_cache;" 2>/dev/null || echo " Database cmdb_cache already exists or will be created automatically"
docker-compose -f "$COMPOSE_FILE" exec -T postgres psql -U cmdb -c "CREATE DATABASE cmdb_classifications;" 2>/dev/null || echo " Database cmdb_classifications already exists or will be created automatically"
echo "✅ Databases ready"
# Create database (if needed)
echo "📊 Creating database..."
echo " Note: Single database is used by default (contains all tables)"
docker-compose -f "$COMPOSE_FILE" exec -T postgres psql -U cmdb -c "CREATE DATABASE cmdb_insight;" 2>/dev/null || echo " Database cmdb_insight already exists or will be created automatically"
echo "✅ Database ready"
echo ""
echo "✨ PostgreSQL database has been reset!"

130
scripts/setup-postgresql.sh Executable file
View File

@@ -0,0 +1,130 @@
#!/bin/bash
# Azure PostgreSQL Setup Script for CMDB Insight
# Creates PostgreSQL Flexible Server and configures it for production
set -e
# Configuration
RESOURCE_GROUP="zdl-cmdb-insight-prd-euwe-rg"
SERVER_NAME="zdl-cmdb-insight-prd-psql"
ADMIN_USER="cmdbadmin"
LOCATION="westeurope"
KEY_VAULT="zdl-cmdb-insight-prd-kv"
BACKEND_APP_NAME="zdl-cmdb-insight-prd-backend-webapp"
echo "🐘 Setting up Azure PostgreSQL for CMDB Insight..."
echo ""
# Step 1: Generate secure password
echo "🔐 Step 1: Generating secure password..."
ADMIN_PASSWORD=$(openssl rand -base64 32)
echo "✅ Password generated (will be stored in Key Vault)"
echo ""
# Step 2: Create PostgreSQL Flexible Server
echo "📦 Step 2: Creating PostgreSQL Flexible Server..."
az postgres flexible-server create \
--resource-group $RESOURCE_GROUP \
--name $SERVER_NAME \
--location $LOCATION \
--admin-user $ADMIN_USER \
--admin-password $ADMIN_PASSWORD \
--sku-name Standard_B1ms \
--tier Burstable \
--storage-size 32 \
--version 15 \
--public-access 0.0.0.0 \
--high-availability Disabled \
--output none
if [ $? -eq 0 ]; then
echo "✅ PostgreSQL server created: ${SERVER_NAME}.postgres.database.azure.com"
else
echo "❌ Failed to create PostgreSQL server"
exit 1
fi
# Step 3: Create database
echo ""
echo "📊 Step 3: Creating database..."
echo " Note: Single database is used by default (contains all tables)"
az postgres flexible-server db create \
--resource-group $RESOURCE_GROUP \
--server-name $SERVER_NAME \
--database-name cmdb_insight \
--output none
echo "✅ Database created: cmdb_insight"
# Step 4: Configure firewall (allow Azure services)
echo ""
echo "🔥 Step 4: Configuring firewall rules..."
az postgres flexible-server firewall-rule create \
--resource-group $RESOURCE_GROUP \
--name $SERVER_NAME \
--rule-name AllowAzureServices \
--start-ip-address 0.0.0.0 \
--end-ip-address 0.0.0.0 \
--output none
echo "✅ Firewall rule created (allows Azure services)"
# Step 5: Store credentials in Key Vault
echo ""
echo "🔐 Step 5: Storing credentials in Key Vault..."
az keyvault secret set \
--vault-name $KEY_VAULT \
--name DatabasePassword \
--value "$ADMIN_PASSWORD" \
--output none
# Create connection string
CONNECTION_STRING="postgresql://${ADMIN_USER}:${ADMIN_PASSWORD}@${SERVER_NAME}.postgres.database.azure.com:5432/cmdb_insight?sslmode=require"
az keyvault secret set \
--vault-name $KEY_VAULT \
--name DatabaseUrl \
--value "$CONNECTION_STRING" \
--output none
echo "✅ Credentials stored in Key Vault"
# Step 6: Configure App Service app settings
echo ""
echo "⚙️ Step 6: Configuring App Service app settings..."
# Get Key Vault URL
KV_URL=$(az keyvault show --name $KEY_VAULT --query properties.vaultUri -o tsv)
# Configure database settings
az webapp config appsettings set \
--name $BACKEND_APP_NAME \
--resource-group $RESOURCE_GROUP \
--settings \
DATABASE_TYPE=postgres \
DATABASE_HOST="${SERVER_NAME}.postgres.database.azure.com" \
DATABASE_PORT=5432 \
DATABASE_NAME=cmdb_insight \
DATABASE_USER=$ADMIN_USER \
DATABASE_PASSWORD="@Microsoft.KeyVault(SecretUri=${KV_URL}secrets/DatabasePassword/)" \
DATABASE_SSL=true \
--output none
echo "✅ App settings configured"
# Summary
echo ""
echo "✅ PostgreSQL setup completed successfully!"
echo ""
echo "📋 Summary:"
echo " Server: ${SERVER_NAME}.postgres.database.azure.com"
echo " Admin User: $ADMIN_USER"
echo " Database: cmdb_insight (single database for all data)"
echo " Password: Stored in Key Vault ($KEY_VAULT)"
echo ""
echo "⚠️ Next Steps:"
echo " 1. Grant Key Vault access to App Service (if not done yet)"
echo " 2. Restart the backend app to connect to PostgreSQL:"
echo " az webapp restart --name $BACKEND_APP_NAME --resource-group $RESOURCE_GROUP"
echo " 3. Check logs to verify connection:"
echo " az webapp log tail --name $BACKEND_APP_NAME --resource-group $RESOURCE_GROUP"
echo ""