Compare commits
No commits in common. "dfef34576a01ab29428ee5e5d99d325d93747a47" and "854afd4c4154866fa9a7fd5034b803444bc23f6c" have entirely different histories.
dfef34576a
...
854afd4c41
|
|
@ -1,66 +0,0 @@
|
||||||
# Dependencies
|
|
||||||
node_modules
|
|
||||||
npm-debug.log*
|
|
||||||
yarn-debug.log*
|
|
||||||
yarn-error.log*
|
|
||||||
pnpm-debug.log*
|
|
||||||
|
|
||||||
# Next.js build output
|
|
||||||
.next
|
|
||||||
out
|
|
||||||
|
|
||||||
# Production build
|
|
||||||
dist
|
|
||||||
|
|
||||||
# Environment variables
|
|
||||||
.env
|
|
||||||
.env.local
|
|
||||||
.env.development.local
|
|
||||||
.env.test.local
|
|
||||||
.env.production.local
|
|
||||||
|
|
||||||
# IDE and editor files
|
|
||||||
.vscode
|
|
||||||
.idea
|
|
||||||
*.swp
|
|
||||||
*.swo
|
|
||||||
*~
|
|
||||||
|
|
||||||
# OS generated files
|
|
||||||
.DS_Store
|
|
||||||
.DS_Store?
|
|
||||||
._*
|
|
||||||
.Spotlight-V100
|
|
||||||
.Trashes
|
|
||||||
ehthumbs.db
|
|
||||||
Thumbs.db
|
|
||||||
|
|
||||||
# Git
|
|
||||||
.git
|
|
||||||
.gitignore
|
|
||||||
|
|
||||||
# Docker
|
|
||||||
Dockerfile
|
|
||||||
.dockerignore
|
|
||||||
docker-compose.yml
|
|
||||||
|
|
||||||
# Documentation
|
|
||||||
README.md
|
|
||||||
*.md
|
|
||||||
|
|
||||||
# Logs
|
|
||||||
logs
|
|
||||||
*.log
|
|
||||||
|
|
||||||
# Runtime data
|
|
||||||
pids
|
|
||||||
*.pid
|
|
||||||
*.seed
|
|
||||||
*.pid.lock
|
|
||||||
|
|
||||||
# Coverage directory used by tools like istanbul
|
|
||||||
coverage
|
|
||||||
|
|
||||||
# Temporary folders
|
|
||||||
tmp
|
|
||||||
temp
|
|
||||||
|
|
@ -33,12 +33,6 @@ Deployment:
|
||||||
2. Dockerfile should be defined
|
2. Dockerfile should be defined
|
||||||
3. docker-compose.yml should be defined.
|
3. docker-compose.yml should be defined.
|
||||||
|
|
||||||
Private Docker Image Repo:
|
|
||||||
http://192.168.2.212:3000/tigeren/
|
|
||||||
|
|
||||||
|
|
||||||
Development Rules:
|
Development Rules:
|
||||||
1. Everytime after making all the changes, run 'pnpm build' to verify the changes are compiling correct.
|
1. Everytime after making all the changes, run 'pnpm build' to verify the changes are compiling correct.
|
||||||
2. Once added debug logs, don't delete it until told so.
|
2. Once added debug logs, don't delete it until told so.
|
||||||
3. When creating guideline doc, place it under folder docs with markdown format
|
|
||||||
|
|
||||||
|
|
|
||||||
79
Dockerfile
79
Dockerfile
|
|
@ -1,79 +0,0 @@
|
||||||
# Use official Node.js runtime as the base image
|
|
||||||
FROM node:22.18.0 AS base
|
|
||||||
|
|
||||||
# Rebuild the source code only when needed
|
|
||||||
FROM base AS builder
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Install build dependencies for native modules
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
python3 \
|
|
||||||
make \
|
|
||||||
g++ \
|
|
||||||
libsqlite3-dev \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# Install pnpm globally
|
|
||||||
RUN npm install -g pnpm
|
|
||||||
|
|
||||||
# Copy package files and install all dependencies (including dev dependencies)
|
|
||||||
COPY package.json package-lock.json ./
|
|
||||||
RUN pnpm install
|
|
||||||
|
|
||||||
# Copy source code
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Rebuild better-sqlite3 to ensure native bindings are compiled correctly
|
|
||||||
RUN pnpm rebuild better-sqlite3 || npm rebuild better-sqlite3
|
|
||||||
|
|
||||||
# Verify native bindings are compiled
|
|
||||||
RUN find /app/node_modules -name "better_sqlite3.node" -type f
|
|
||||||
|
|
||||||
# Database file will be created at runtime via docker-compose
|
|
||||||
|
|
||||||
# Create directories for media storage
|
|
||||||
RUN mkdir -p /app/data /app/media
|
|
||||||
|
|
||||||
# Build the application
|
|
||||||
RUN pnpm buildprod
|
|
||||||
|
|
||||||
# Production image, copy all the files and run next
|
|
||||||
FROM base AS runner
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
ENV NODE_ENV=production
|
|
||||||
ENV NEXT_TELEMETRY_DISABLED=1
|
|
||||||
|
|
||||||
# No additional packages needed for production
|
|
||||||
|
|
||||||
RUN groupadd --system --gid 1001 nodejs
|
|
||||||
RUN useradd --system --uid 1001 --gid nodejs nextjs
|
|
||||||
|
|
||||||
# Create media directories
|
|
||||||
RUN mkdir -p /app/data /app/media
|
|
||||||
|
|
||||||
# Ensure directories have correct permissions
|
|
||||||
RUN chown -R nextjs:nodejs /app/data /app/media
|
|
||||||
|
|
||||||
# Copy built application
|
|
||||||
COPY --from=builder /app/public ./public
|
|
||||||
COPY --from=builder --chown=nextjs:nodejs /app/.next/standalone ./
|
|
||||||
COPY --from=builder --chown=nextjs:nodejs /app/.next/static ./.next/static
|
|
||||||
# Copy node_modules to ensure native bindings are available
|
|
||||||
COPY --from=builder --chown=nextjs:nodejs /app/node_modules ./node_modules
|
|
||||||
|
|
||||||
# Rebuild native bindings for the production environment
|
|
||||||
RUN npm rebuild better-sqlite3
|
|
||||||
|
|
||||||
# Set up volume for persistent data
|
|
||||||
VOLUME ["/app/data", "/app/media"]
|
|
||||||
|
|
||||||
# Switch to non-root user
|
|
||||||
USER nextjs
|
|
||||||
|
|
||||||
EXPOSE 3000
|
|
||||||
|
|
||||||
ENV PORT=3000
|
|
||||||
ENV HOSTNAME="0.0.0.0"
|
|
||||||
|
|
||||||
CMD ["node", "server.js"]
|
|
||||||
|
|
@ -1,57 +0,0 @@
|
||||||
version: '3.8'
|
|
||||||
|
|
||||||
services:
|
|
||||||
nextav:
|
|
||||||
build:
|
|
||||||
context: .
|
|
||||||
dockerfile: Dockerfile
|
|
||||||
ports:
|
|
||||||
- "3000:3000"
|
|
||||||
volumes:
|
|
||||||
- ./data:/app/data
|
|
||||||
- ./media:/app/media
|
|
||||||
environment:
|
|
||||||
- NODE_ENV=production
|
|
||||||
- DATABASE_URL=file:/app/data/nextav.db
|
|
||||||
restart: unless-stopped
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "curl", "-f", "http://localhost:3000/api/health"]
|
|
||||||
interval: 30s
|
|
||||||
timeout: 10s
|
|
||||||
retries: 3
|
|
||||||
start_period: 40s
|
|
||||||
depends_on:
|
|
||||||
- ffmpeg
|
|
||||||
|
|
||||||
# FFmpeg service for thumbnail generation (optional - can use host FFmpeg)
|
|
||||||
ffmpeg:
|
|
||||||
image: jrottenberg/ffmpeg:4.4-alpine
|
|
||||||
volumes:
|
|
||||||
- ./media:/media:ro
|
|
||||||
command: tail -f /dev/null # Keep container running
|
|
||||||
restart: unless-stopped
|
|
||||||
|
|
||||||
# Nginx reverse proxy (optional for production)
|
|
||||||
nginx:
|
|
||||||
image: nginx:alpine
|
|
||||||
ports:
|
|
||||||
- "80:80"
|
|
||||||
- "443:443"
|
|
||||||
volumes:
|
|
||||||
- ./nginx.conf:/etc/nginx/nginx.conf:ro
|
|
||||||
- ./ssl:/etc/nginx/ssl:ro
|
|
||||||
depends_on:
|
|
||||||
- nextav
|
|
||||||
restart: unless-stopped
|
|
||||||
profiles:
|
|
||||||
- production
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
nextav_data:
|
|
||||||
driver: local
|
|
||||||
nextav_media:
|
|
||||||
driver: local
|
|
||||||
|
|
||||||
networks:
|
|
||||||
default:
|
|
||||||
name: nextav-network
|
|
||||||
Binary file not shown.
|
|
@ -1,32 +0,0 @@
|
||||||
version: '3.8'
|
|
||||||
|
|
||||||
services:
|
|
||||||
nextav:
|
|
||||||
image: ${REGISTRY_URL:-192.168.2.212:3000}/${IMAGE_NAME:-tigeren/nextav}:${IMAGE_TAG:-latest}
|
|
||||||
container_name: nextav-app
|
|
||||||
restart: unless-stopped
|
|
||||||
ports:
|
|
||||||
- "3000:3000"
|
|
||||||
volumes:
|
|
||||||
- ${DB_PATH:-./data}:/app/data
|
|
||||||
- ${MEDIA_PATH:-./media}:/app/media
|
|
||||||
- /mnt/data1:/mnt/data1
|
|
||||||
command: node server.js
|
|
||||||
environment:
|
|
||||||
- NODE_ENV=production
|
|
||||||
- NEXT_PUBLIC_MEDIA_ROOT=${NEXT_PUBLIC_MEDIA_ROOT:-/app/media}
|
|
||||||
- DB_FILE=/app/data/media.db
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD-SHELL", "node -e \"require('http').get('http://localhost:3000/api/health', (res) => { process.exit(res.statusCode === 200 ? 0 : 1) })\""]
|
|
||||||
interval: 30s
|
|
||||||
timeout: 10s
|
|
||||||
retries: 3
|
|
||||||
start_period: 40s
|
|
||||||
networks:
|
|
||||||
- nextav-network
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
networks:
|
|
||||||
nextav-network:
|
|
||||||
driver: bridge
|
|
||||||
|
|
@ -1,258 +0,0 @@
|
||||||
# NextAV Deployment Guide
|
|
||||||
|
|
||||||
This guide will help you deploy NextAV using Docker for a production-ready setup.
|
|
||||||
|
|
||||||
## Quick Start
|
|
||||||
|
|
||||||
### Prerequisites
|
|
||||||
- Docker & Docker Compose
|
|
||||||
- At least 2GB RAM and 10GB disk space
|
|
||||||
- FFmpeg (for thumbnail generation)
|
|
||||||
|
|
||||||
### 1. Clone and Setup
|
|
||||||
```bash
|
|
||||||
git clone <your-repo-url> nextav
|
|
||||||
cd nextav
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. Configure Environment
|
|
||||||
```bash
|
|
||||||
# Copy environment file
|
|
||||||
cp .env.example .env
|
|
||||||
|
|
||||||
# Edit .env with your settings
|
|
||||||
nano .env
|
|
||||||
```
|
|
||||||
|
|
||||||
### 3. Deploy with Docker
|
|
||||||
```bash
|
|
||||||
# Make deploy script executable
|
|
||||||
chmod +x deploy.sh
|
|
||||||
|
|
||||||
# Run deployment
|
|
||||||
./deploy.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
### 4. Access NextAV
|
|
||||||
Open your browser to `http://localhost:3000`
|
|
||||||
|
|
||||||
## Manual Deployment
|
|
||||||
|
|
||||||
### Using Docker Compose
|
|
||||||
```bash
|
|
||||||
# Build and start
|
|
||||||
docker-compose up -d --build
|
|
||||||
|
|
||||||
# View logs
|
|
||||||
docker-compose logs -f nextav
|
|
||||||
|
|
||||||
# Stop services
|
|
||||||
docker-compose down
|
|
||||||
```
|
|
||||||
|
|
||||||
### Production with SSL (Optional)
|
|
||||||
```bash
|
|
||||||
# For production with SSL
|
|
||||||
docker-compose --profile production up -d
|
|
||||||
```
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
### Environment Variables
|
|
||||||
|
|
||||||
| Variable | Description | Default |
|
|
||||||
|----------|-------------|---------|
|
|
||||||
| `DATABASE_URL` | SQLite database path | `file:./data/nextav.db` |
|
|
||||||
| `NODE_ENV` | Environment mode | `production` |
|
|
||||||
| `NEXT_PUBLIC_BASE_URL` | Base URL for the app | `http://localhost:3000` |
|
|
||||||
| `NEXT_PUBLIC_MEDIA_ROOT` | Media library root | `/app/media` |
|
|
||||||
|
|
||||||
### Directory Structure
|
|
||||||
```
|
|
||||||
nextav/
|
|
||||||
├── data/ # Database and app data
|
|
||||||
├── media/ # Media libraries
|
|
||||||
├── ssl/ # SSL certificates (optional)
|
|
||||||
├── docker-compose.yml
|
|
||||||
├── Dockerfile
|
|
||||||
├── .env
|
|
||||||
└── deploy.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
## Media Libraries Setup
|
|
||||||
|
|
||||||
### Adding Media Libraries
|
|
||||||
1. Create directories in the `media/` folder:
|
|
||||||
```bash
|
|
||||||
mkdir -p media/videos media/photos
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Mount your existing media:
|
|
||||||
```bash
|
|
||||||
# Example: mount existing photo library
|
|
||||||
docker-compose down
|
|
||||||
# Edit docker-compose.yml to add your volume
|
|
||||||
docker-compose up -d
|
|
||||||
```
|
|
||||||
|
|
||||||
### Volume Mapping Examples
|
|
||||||
In `docker-compose.yml`:
|
|
||||||
```yaml
|
|
||||||
volumes:
|
|
||||||
- /path/to/your/media:/app/media/yourlibrary
|
|
||||||
- /path/to/another/library:/app/media/another
|
|
||||||
```
|
|
||||||
|
|
||||||
## Production Deployment
|
|
||||||
|
|
||||||
### SSL Certificates
|
|
||||||
```bash
|
|
||||||
# Create SSL directory
|
|
||||||
mkdir ssl
|
|
||||||
|
|
||||||
# Generate self-signed certificates (for testing)
|
|
||||||
openssl req -x509 -nodes -days 365 -newkey rsa:2048 \
|
|
||||||
-keyout ssl/key.pem -out ssl/cert.pem
|
|
||||||
|
|
||||||
# For production, use Let's Encrypt certificates
|
|
||||||
```
|
|
||||||
|
|
||||||
### Reverse Proxy (Nginx)
|
|
||||||
The production profile includes Nginx with:
|
|
||||||
- SSL termination
|
|
||||||
- Gzip compression
|
|
||||||
- Security headers
|
|
||||||
- WebSocket support
|
|
||||||
|
|
||||||
### Health Checks
|
|
||||||
The application includes health checks at `/api/health` which verify:
|
|
||||||
- Database connectivity
|
|
||||||
- Media directory accessibility
|
|
||||||
- Application responsiveness
|
|
||||||
|
|
||||||
## Monitoring
|
|
||||||
|
|
||||||
### View Logs
|
|
||||||
```bash
|
|
||||||
# All services
|
|
||||||
docker-compose logs -f
|
|
||||||
|
|
||||||
# Specific service
|
|
||||||
docker-compose logs -f nextav
|
|
||||||
```
|
|
||||||
|
|
||||||
### Service Status
|
|
||||||
```bash
|
|
||||||
docker-compose ps
|
|
||||||
docker stats
|
|
||||||
```
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
|
|
||||||
### Common Issues
|
|
||||||
|
|
||||||
**Port 3000 already in use:**
|
|
||||||
```bash
|
|
||||||
# Check what's using port 3000
|
|
||||||
lsof -i :3000
|
|
||||||
|
|
||||||
# Use different port
|
|
||||||
docker-compose up -d --scale nextav=1 --build
|
|
||||||
```
|
|
||||||
|
|
||||||
**Database permission issues:**
|
|
||||||
```bash
|
|
||||||
# Fix permissions
|
|
||||||
sudo chown -R 1001:1001 ./data
|
|
||||||
```
|
|
||||||
|
|
||||||
**FFmpeg not found:**
|
|
||||||
```bash
|
|
||||||
# Ensure FFmpeg is available
|
|
||||||
which ffmpeg
|
|
||||||
# Or use Docker container
|
|
||||||
```
|
|
||||||
|
|
||||||
### Reset Everything
|
|
||||||
```bash
|
|
||||||
# Stop and remove containers
|
|
||||||
docker-compose down
|
|
||||||
|
|
||||||
# Remove volumes (WARNING: deletes data)
|
|
||||||
docker-compose down -v
|
|
||||||
|
|
||||||
# Rebuild and start
|
|
||||||
docker-compose up -d --build
|
|
||||||
```
|
|
||||||
|
|
||||||
## Updates
|
|
||||||
|
|
||||||
### Updating NextAV
|
|
||||||
```bash
|
|
||||||
# Pull latest changes
|
|
||||||
git pull origin main
|
|
||||||
|
|
||||||
# Rebuild and restart
|
|
||||||
docker-compose down
|
|
||||||
docker-compose up -d --build
|
|
||||||
```
|
|
||||||
|
|
||||||
### Database Updates
|
|
||||||
For database schema changes:
|
|
||||||
```bash
|
|
||||||
# Backup database
|
|
||||||
cp data/nextav.db data/nextav.db.backup
|
|
||||||
|
|
||||||
# Apply updates (if needed)
|
|
||||||
docker-compose down
|
|
||||||
docker-compose up -d
|
|
||||||
```
|
|
||||||
|
|
||||||
## Security
|
|
||||||
|
|
||||||
### Basic Security
|
|
||||||
- Uses non-root user in container
|
|
||||||
- Runs with minimal privileges
|
|
||||||
- Includes security headers in production
|
|
||||||
|
|
||||||
### Production Hardening
|
|
||||||
- Use proper SSL certificates
|
|
||||||
- Configure firewall rules
|
|
||||||
- Set up log rotation
|
|
||||||
- Use secrets management for sensitive data
|
|
||||||
|
|
||||||
## Performance Tuning
|
|
||||||
|
|
||||||
### Resource Limits
|
|
||||||
Edit `docker-compose.yml` to set limits:
|
|
||||||
```yaml
|
|
||||||
services:
|
|
||||||
nextav:
|
|
||||||
deploy:
|
|
||||||
resources:
|
|
||||||
limits:
|
|
||||||
cpus: '2.0'
|
|
||||||
memory: 2G
|
|
||||||
reservations:
|
|
||||||
cpus: '0.5'
|
|
||||||
memory: 512M
|
|
||||||
```
|
|
||||||
|
|
||||||
### Database Optimization
|
|
||||||
- SQLite is optimized for read-heavy workloads
|
|
||||||
- Consider PostgreSQL for high-traffic sites
|
|
||||||
- Regular database maintenance recommended
|
|
||||||
|
|
||||||
## Support
|
|
||||||
|
|
||||||
For issues and feature requests, please check:
|
|
||||||
1. Docker logs: `docker-compose logs`
|
|
||||||
2. Health endpoint: `curl http://localhost:3000/api/health`
|
|
||||||
3. System resources: `docker stats`
|
|
||||||
|
|
||||||
|
|
||||||
## Build/Push Docker image to private repo
|
|
||||||
Usage:
|
|
||||||
# Build & push to private registry
|
|
||||||
docker build -t 192.168.2.212:3000/tigeren/nextav:latest .
|
|
||||||
docker push 192.168.2.212:3000/tigeren/nextav:latest
|
|
||||||
|
|
@ -1,218 +0,0 @@
|
||||||
# NextAV Deployment Guide
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
NextAV is a Next.js application that provides media library management with SQLite database storage. This guide covers Docker deployment, troubleshooting, and best practices for the NextAV application.
|
|
||||||
|
|
||||||
## Prerequisites
|
|
||||||
- Docker and Docker Compose installed
|
|
||||||
- At least 4GB of available disk space
|
|
||||||
- Port 3000 available
|
|
||||||
|
|
||||||
## Quick Start
|
|
||||||
|
|
||||||
### 1. Clone and Setup
|
|
||||||
```bash
|
|
||||||
git clone <repository-url>
|
|
||||||
cd nextav
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. Environment Configuration
|
|
||||||
Create a `.env` file in the `docker` directory:
|
|
||||||
```bash
|
|
||||||
cd docker
|
|
||||||
cp .env.example .env
|
|
||||||
# Edit .env with your configuration
|
|
||||||
```
|
|
||||||
|
|
||||||
### 3. Create Required Directories
|
|
||||||
```bash
|
|
||||||
mkdir -p data media
|
|
||||||
chmod 755 data media
|
|
||||||
```
|
|
||||||
|
|
||||||
### 4. Build and Run
|
|
||||||
```bash
|
|
||||||
# Build the Docker image
|
|
||||||
docker build -t nextav:latest ..
|
|
||||||
|
|
||||||
# Start the application
|
|
||||||
docker compose up -d
|
|
||||||
```
|
|
||||||
|
|
||||||
### 5. Verify Deployment
|
|
||||||
```bash
|
|
||||||
# Check container status
|
|
||||||
docker compose ps
|
|
||||||
|
|
||||||
# Test health endpoint
|
|
||||||
curl http://localhost:3000/api/health
|
|
||||||
|
|
||||||
# Access the application
|
|
||||||
open http://localhost:3000
|
|
||||||
```
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
### Environment Variables
|
|
||||||
| Variable | Default | Description |
|
|
||||||
|----------|---------|-------------|
|
|
||||||
| `DB_PATH` | `./data` | Path to database storage directory |
|
|
||||||
| `MEDIA_PATH` | `./media` | Path to media files directory |
|
|
||||||
| `DB_FILE` | `/app/data/media.db` | Database file path inside container |
|
|
||||||
| `NEXT_PUBLIC_MEDIA_ROOT` | `/app/media` | Media root path for the application |
|
|
||||||
|
|
||||||
### Volume Mounts
|
|
||||||
- `./data:/app/data` - Database and application data
|
|
||||||
- `./media:/app/media` - Media files storage
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
|
|
||||||
### Database Access Issues
|
|
||||||
**Problem**: `SqliteError: unable to open database file`
|
|
||||||
|
|
||||||
**Solution**:
|
|
||||||
1. Ensure data directory exists and has correct permissions:
|
|
||||||
```bash
|
|
||||||
mkdir -p data
|
|
||||||
chmod 755 data
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Fix container permissions:
|
|
||||||
```bash
|
|
||||||
docker exec -u root nextav-app chown -R nextjs:nodejs /app/data /app/media
|
|
||||||
```
|
|
||||||
|
|
||||||
3. Restart the container:
|
|
||||||
```bash
|
|
||||||
docker compose restart nextav
|
|
||||||
```
|
|
||||||
|
|
||||||
### Native Module Issues
|
|
||||||
**Problem**: `Could not locate the bindings file. Tried: ... better_sqlite3.node`
|
|
||||||
|
|
||||||
**Solution**:
|
|
||||||
1. Rebuild the Docker image:
|
|
||||||
```bash
|
|
||||||
docker build --no-cache -t nextav:latest ..
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Ensure build dependencies are available in Dockerfile
|
|
||||||
|
|
||||||
### Container Health Issues
|
|
||||||
**Problem**: Container shows as "unhealthy"
|
|
||||||
|
|
||||||
**Solution**:
|
|
||||||
1. Check logs:
|
|
||||||
```bash
|
|
||||||
docker compose logs nextav
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Test health endpoint manually:
|
|
||||||
```bash
|
|
||||||
curl http://localhost:3000/api/health
|
|
||||||
```
|
|
||||||
|
|
||||||
3. Verify database connectivity and permissions
|
|
||||||
|
|
||||||
### Build Performance Issues
|
|
||||||
**Problem**: Docker build takes too long
|
|
||||||
|
|
||||||
**Solutions**:
|
|
||||||
1. Use multi-stage builds (already implemented)
|
|
||||||
2. Remove unnecessary packages from production stage
|
|
||||||
3. Use `.dockerignore` to exclude unnecessary files
|
|
||||||
4. Consider using build cache
|
|
||||||
|
|
||||||
## Best Practices
|
|
||||||
|
|
||||||
### Security
|
|
||||||
- Run container as non-root user (implemented)
|
|
||||||
- Use specific image tags instead of `latest`
|
|
||||||
- Regularly update base images
|
|
||||||
- Scan images for vulnerabilities
|
|
||||||
|
|
||||||
### Performance
|
|
||||||
- Use multi-stage builds to reduce image size
|
|
||||||
- Implement proper caching strategies
|
|
||||||
- Monitor resource usage
|
|
||||||
- Use health checks for reliability
|
|
||||||
|
|
||||||
### Data Persistence
|
|
||||||
- Use named volumes for production
|
|
||||||
- Regular database backups
|
|
||||||
- Monitor disk space usage
|
|
||||||
- Implement proper backup strategies
|
|
||||||
|
|
||||||
### Monitoring
|
|
||||||
- Health check endpoint: `/api/health`
|
|
||||||
- Application logs: `docker compose logs nextav`
|
|
||||||
- Resource monitoring: `docker stats`
|
|
||||||
|
|
||||||
## Production Deployment
|
|
||||||
|
|
||||||
### 1. Environment Setup
|
|
||||||
```bash
|
|
||||||
# Production environment variables
|
|
||||||
NODE_ENV=production
|
|
||||||
DB_PATH=/var/lib/nextav/data
|
|
||||||
MEDIA_PATH=/var/lib/nextav/media
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. Reverse Proxy (Optional)
|
|
||||||
Configure Nginx or Traefik for SSL termination and load balancing.
|
|
||||||
|
|
||||||
### 3. Backup Strategy
|
|
||||||
```bash
|
|
||||||
# Database backup
|
|
||||||
docker exec nextav-app sqlite3 /app/data/media.db ".backup /app/data/backup.db"
|
|
||||||
|
|
||||||
# Volume backup
|
|
||||||
docker run --rm -v nextav_data:/data -v $(pwd):/backup alpine tar czf /backup/nextav_data.tar.gz -C /data .
|
|
||||||
```
|
|
||||||
|
|
||||||
### 4. Monitoring
|
|
||||||
- Set up log aggregation
|
|
||||||
- Monitor container health
|
|
||||||
- Track resource usage
|
|
||||||
- Set up alerts for failures
|
|
||||||
|
|
||||||
## Maintenance
|
|
||||||
|
|
||||||
### Regular Tasks
|
|
||||||
1. **Update Dependencies**: Monthly security updates
|
|
||||||
2. **Database Maintenance**: Regular backups and optimization
|
|
||||||
3. **Log Rotation**: Prevent disk space issues
|
|
||||||
4. **Image Updates**: Keep base images current
|
|
||||||
|
|
||||||
### Troubleshooting Commands
|
|
||||||
```bash
|
|
||||||
# View logs
|
|
||||||
docker compose logs -f nextav
|
|
||||||
|
|
||||||
# Access container shell
|
|
||||||
docker exec -it nextav-app sh
|
|
||||||
|
|
||||||
# Check resource usage
|
|
||||||
docker stats nextav-app
|
|
||||||
|
|
||||||
# Restart services
|
|
||||||
docker compose restart nextav
|
|
||||||
|
|
||||||
# Update and rebuild
|
|
||||||
docker compose down
|
|
||||||
docker build -t nextav:latest ..
|
|
||||||
docker compose up -d
|
|
||||||
```
|
|
||||||
|
|
||||||
## Support
|
|
||||||
|
|
||||||
For issues not covered in this guide:
|
|
||||||
1. Check application logs
|
|
||||||
2. Review Docker documentation
|
|
||||||
3. Consult Next.js deployment guides
|
|
||||||
4. Open an issue in the project repository
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
**Last Updated**: August 30, 2025
|
|
||||||
**Version**: 1.0.0
|
|
||||||
|
|
@ -1,30 +0,0 @@
|
||||||
# 打标规范
|
|
||||||
https://docs.gitea.com/1.21/usage/packages/container
|
|
||||||
|
|
||||||
## build an image with tag
|
|
||||||
docker build -t {registry}/{owner}/{image}:{tag} .
|
|
||||||
## name an existing image with tag
|
|
||||||
docker tag {some-existing-image}:{tag} {registry}/{owner}/{image}:{tag}
|
|
||||||
## push an image
|
|
||||||
docker push {registry}/{owner}/{image}:{tag}
|
|
||||||
|
|
||||||
# 给现有的image打标
|
|
||||||
docker tag magicdoc-magicdoc-api:latest 192.168.2.212:3000/tigeren/magicdoc-magicdoc-api
|
|
||||||
|
|
||||||
# 推送
|
|
||||||
docker tag magicdoc-magicdoc-api:latest 192.168.2.212:3000/tigeren/magicdoc-magicdoc-api
|
|
||||||
docker push 192.168.2.212:3000/tigeren/magicdoc-magicdoc-api
|
|
||||||
|
|
||||||
docker tag backend-api:latest 192.168.2.212:3000/tigeren/backend-api
|
|
||||||
docker push 192.168.2.212:3000/tigeren/backend-api:latest
|
|
||||||
|
|
||||||
# 访问查看images
|
|
||||||
http://192.168.2.212:3000/tigeren/
|
|
||||||
|
|
||||||
# orbstack配置: settings -> docker
|
|
||||||
{
|
|
||||||
"insecure-registries" : [
|
|
||||||
"192.168.2.212:3000"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
import type { NextConfig } from "next";
|
import type { NextConfig } from "next";
|
||||||
|
|
||||||
const nextConfig: NextConfig = {
|
const nextConfig: NextConfig = {
|
||||||
output: 'standalone',
|
/* config options here */
|
||||||
};
|
};
|
||||||
|
|
||||||
export default nextConfig;
|
export default nextConfig;
|
||||||
|
|
|
||||||
|
|
@ -5,7 +5,6 @@
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"dev": "next dev --turbopack",
|
"dev": "next dev --turbopack",
|
||||||
"build": "next build --turbopack",
|
"build": "next build --turbopack",
|
||||||
"buildprod": "next build",
|
|
||||||
"start": "next start"
|
"start": "next start"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
import { NextResponse } from 'next/server';
|
import { NextResponse } from 'next/server';
|
||||||
import { getDatabase } from '@/db';
|
import db from '@/db';
|
||||||
|
|
||||||
export async function POST(request: Request, { params }: { params: Promise<{ id: string }> }) {
|
export async function POST(request: Request, { params }: { params: Promise<{ id: string }> }) {
|
||||||
const { id } = await params;
|
const { id } = await params;
|
||||||
|
|
@ -10,7 +10,6 @@ export async function POST(request: Request, { params }: { params: Promise<{ id:
|
||||||
return NextResponse.json({ error: 'Invalid media ID' }, { status: 400 });
|
return NextResponse.json({ error: 'Invalid media ID' }, { status: 400 });
|
||||||
}
|
}
|
||||||
|
|
||||||
const db = getDatabase();
|
|
||||||
// Check if media exists
|
// Check if media exists
|
||||||
const media = db.prepare(`
|
const media = db.prepare(`
|
||||||
SELECT id FROM media WHERE id = ?
|
SELECT id FROM media WHERE id = ?
|
||||||
|
|
@ -56,7 +55,6 @@ export async function DELETE(request: Request, { params }: { params: Promise<{ i
|
||||||
return NextResponse.json({ error: 'Invalid media ID' }, { status: 400 });
|
return NextResponse.json({ error: 'Invalid media ID' }, { status: 400 });
|
||||||
}
|
}
|
||||||
|
|
||||||
const db = getDatabase();
|
|
||||||
// Check if bookmark exists
|
// Check if bookmark exists
|
||||||
const bookmark = db.prepare(`
|
const bookmark = db.prepare(`
|
||||||
SELECT id FROM bookmarks WHERE media_id = ?
|
SELECT id FROM bookmarks WHERE media_id = ?
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
import { NextResponse } from 'next/server';
|
import { NextResponse } from 'next/server';
|
||||||
import { getDatabase } from '@/db';
|
import db from '@/db';
|
||||||
|
|
||||||
export async function GET(request: Request) {
|
export async function GET(request: Request) {
|
||||||
const { searchParams } = new URL(request.url);
|
const { searchParams } = new URL(request.url);
|
||||||
|
|
@ -26,7 +26,6 @@ export async function GET(request: Request) {
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const db = getDatabase();
|
|
||||||
// Get total count for pagination
|
// Get total count for pagination
|
||||||
const countQuery = `
|
const countQuery = `
|
||||||
SELECT COUNT(*) as total
|
SELECT COUNT(*) as total
|
||||||
|
|
@ -64,7 +63,6 @@ export async function GET(request: Request) {
|
||||||
|
|
||||||
export async function POST(request: Request) {
|
export async function POST(request: Request) {
|
||||||
try {
|
try {
|
||||||
const db = getDatabase();
|
|
||||||
const { mediaId } = await request.json();
|
const { mediaId } = await request.json();
|
||||||
|
|
||||||
if (!mediaId) {
|
if (!mediaId) {
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@
|
||||||
import { NextResponse } from "next/server";
|
import { NextResponse } from "next/server";
|
||||||
import fs from "fs";
|
import fs from "fs";
|
||||||
import path from "path";
|
import path from "path";
|
||||||
import { getDatabase } from '@/db';
|
import db from '@/db';
|
||||||
|
|
||||||
const VIDEO_EXTENSIONS = ["mp4", "mkv", "avi", "mov", "wmv", "flv", "webm", "m4v"];
|
const VIDEO_EXTENSIONS = ["mp4", "mkv", "avi", "mov", "wmv", "flv", "webm", "m4v"];
|
||||||
const PHOTO_EXTENSIONS = ["jpg", "jpeg", "png", "gif", "bmp", "webp", "tiff", "svg"];
|
const PHOTO_EXTENSIONS = ["jpg", "jpeg", "png", "gif", "bmp", "webp", "tiff", "svg"];
|
||||||
|
|
@ -16,7 +16,6 @@ export async function GET(request: Request) {
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const db = getDatabase();
|
|
||||||
const files = fs.readdirSync(dirPath);
|
const files = fs.readdirSync(dirPath);
|
||||||
|
|
||||||
// Get media files from database for this path
|
// Get media files from database for this path
|
||||||
|
|
|
||||||
|
|
@ -1,36 +0,0 @@
|
||||||
import { NextResponse } from 'next/server';
|
|
||||||
import { existsSync } from 'fs';
|
|
||||||
import path from 'path';
|
|
||||||
import { getDatabase } from '@/db';
|
|
||||||
|
|
||||||
export async function GET() {
|
|
||||||
try {
|
|
||||||
// Check if database is accessible by actually connecting to it
|
|
||||||
try {
|
|
||||||
const db = getDatabase();
|
|
||||||
// Test a simple query
|
|
||||||
db.prepare('SELECT 1').get();
|
|
||||||
} catch (dbError) {
|
|
||||||
return NextResponse.json(
|
|
||||||
{ status: 'unhealthy', error: `Database not accessible: ${(dbError as Error).message}` },
|
|
||||||
{ status: 503 }
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if media directory is accessible
|
|
||||||
const mediaRoot = process.env.NEXT_PUBLIC_MEDIA_ROOT || '/app/media';
|
|
||||||
if (!existsSync(mediaRoot)) {
|
|
||||||
return NextResponse.json(
|
|
||||||
{ status: 'unhealthy', error: 'Media directory not accessible' },
|
|
||||||
{ status: 503 }
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
return NextResponse.json({ status: 'healthy' });
|
|
||||||
} catch (error) {
|
|
||||||
return NextResponse.json(
|
|
||||||
{ status: 'unhealthy', error: (error as Error).message },
|
|
||||||
{ status: 500 }
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,10 +1,9 @@
|
||||||
|
|
||||||
import { NextRequest, NextResponse } from 'next/server';
|
import { NextRequest, NextResponse } from 'next/server';
|
||||||
import { getDatabase } from '@/db';
|
import db from '@/db';
|
||||||
|
|
||||||
export async function DELETE(request: NextRequest, { params: paramsPromise }: { params: Promise<{ id: string }> }) {
|
export async function DELETE(request: NextRequest, { params: paramsPromise }: { params: Promise<{ id: string }> }) {
|
||||||
const params = await paramsPromise;
|
const params = await paramsPromise;
|
||||||
const db = getDatabase();
|
|
||||||
const id = parseInt(params.id, 10);
|
const id = parseInt(params.id, 10);
|
||||||
if (isNaN(id)) {
|
if (isNaN(id)) {
|
||||||
return NextResponse.json({ error: 'Invalid ID' }, { status: 400 });
|
return NextResponse.json({ error: 'Invalid ID' }, { status: 400 });
|
||||||
|
|
|
||||||
|
|
@ -1,9 +1,8 @@
|
||||||
|
|
||||||
import { NextResponse } from 'next/server';
|
import { NextResponse } from 'next/server';
|
||||||
import { getDatabase } from '@/db';
|
import db from '@/db';
|
||||||
|
|
||||||
export async function GET() {
|
export async function GET() {
|
||||||
const db = getDatabase();
|
|
||||||
const libraries = db.prepare('SELECT * FROM libraries').all();
|
const libraries = db.prepare('SELECT * FROM libraries').all();
|
||||||
return NextResponse.json(libraries);
|
return NextResponse.json(libraries);
|
||||||
}
|
}
|
||||||
|
|
@ -15,7 +14,6 @@ export async function POST(request: Request) {
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const db = getDatabase();
|
|
||||||
const info = db.prepare('INSERT INTO libraries (path) VALUES (?)').run(path);
|
const info = db.prepare('INSERT INTO libraries (path) VALUES (?)').run(path);
|
||||||
return NextResponse.json({ id: info.lastInsertRowid, path });
|
return NextResponse.json({ id: info.lastInsertRowid, path });
|
||||||
} catch (error: any) {
|
} catch (error: any) {
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
import { NextRequest, NextResponse } from "next/server";
|
import { NextRequest, NextResponse } from "next/server";
|
||||||
import { getDatabase } from "@/db";
|
import db from "@/db";
|
||||||
import fs from "fs";
|
import fs from "fs";
|
||||||
import path from "path";
|
import path from "path";
|
||||||
|
|
||||||
|
|
@ -8,7 +8,6 @@ export async function GET(
|
||||||
{ params }: { params: Promise<{ id: string }> }
|
{ params }: { params: Promise<{ id: string }> }
|
||||||
) {
|
) {
|
||||||
const { id } = await params;
|
const { id } = await params;
|
||||||
const db = getDatabase();
|
|
||||||
try {
|
try {
|
||||||
const photoId = parseInt(id);
|
const photoId = parseInt(id);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
import { NextResponse } from 'next/server';
|
import { NextResponse } from 'next/server';
|
||||||
import { getDatabase } from '@/db';
|
import db from '@/db';
|
||||||
|
|
||||||
export async function GET(request: Request) {
|
export async function GET(request: Request) {
|
||||||
const { searchParams } = new URL(request.url);
|
const { searchParams } = new URL(request.url);
|
||||||
|
|
@ -25,7 +25,6 @@ export async function GET(request: Request) {
|
||||||
params.push(`%${search}%`, `%${search}%`);
|
params.push(`%${search}%`, `%${search}%`);
|
||||||
}
|
}
|
||||||
|
|
||||||
const db = getDatabase();
|
|
||||||
// Get total count for pagination
|
// Get total count for pagination
|
||||||
const countQuery = `
|
const countQuery = `
|
||||||
SELECT COUNT(*) as total
|
SELECT COUNT(*) as total
|
||||||
|
|
|
||||||
|
|
@ -1,9 +1,8 @@
|
||||||
import { NextResponse } from 'next/server';
|
import { NextResponse } from 'next/server';
|
||||||
import { getDatabase } from '@/db';
|
import db from '@/db';
|
||||||
|
|
||||||
export async function DELETE(request: Request, { params }: { params: Promise<{ id: string }> }) {
|
export async function DELETE(request: Request, { params }: { params: Promise<{ id: string }> }) {
|
||||||
const { id } = await params;
|
const { id } = await params;
|
||||||
const db = getDatabase();
|
|
||||||
try {
|
try {
|
||||||
const parsedId = parseInt(id);
|
const parsedId = parseInt(id);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,9 +1,8 @@
|
||||||
import { NextResponse } from 'next/server';
|
import { NextResponse } from 'next/server';
|
||||||
import { getDatabase } from '@/db';
|
import db from '@/db';
|
||||||
|
|
||||||
export async function GET(request: Request) {
|
export async function GET(request: Request) {
|
||||||
try {
|
try {
|
||||||
const db = getDatabase();
|
|
||||||
const { searchParams } = new URL(request.url);
|
const { searchParams } = new URL(request.url);
|
||||||
const mediaId = searchParams.get('mediaId');
|
const mediaId = searchParams.get('mediaId');
|
||||||
|
|
||||||
|
|
@ -34,7 +33,6 @@ export async function GET(request: Request) {
|
||||||
|
|
||||||
export async function POST(request: Request) {
|
export async function POST(request: Request) {
|
||||||
try {
|
try {
|
||||||
const db = getDatabase();
|
|
||||||
const { mediaId, rating } = await request.json();
|
const { mediaId, rating } = await request.json();
|
||||||
|
|
||||||
if (!mediaId || !rating) {
|
if (!mediaId || !rating) {
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
import { NextRequest, NextResponse } from "next/server";
|
import { NextRequest, NextResponse } from "next/server";
|
||||||
import { getDatabase } from "@/db";
|
import db from "@/db";
|
||||||
import fs from "fs";
|
import fs from "fs";
|
||||||
import path from "path";
|
import path from "path";
|
||||||
|
|
||||||
|
|
@ -8,7 +8,6 @@ export async function GET(
|
||||||
{ params }: { params: Promise<{ id: string }> }
|
{ params }: { params: Promise<{ id: string }> }
|
||||||
) {
|
) {
|
||||||
const { id } = await params;
|
const { id } = await params;
|
||||||
const db = getDatabase();
|
|
||||||
try {
|
try {
|
||||||
const videoId = parseInt(id);
|
const videoId = parseInt(id);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,9 +1,8 @@
|
||||||
import { NextResponse } from 'next/server';
|
import { NextResponse } from 'next/server';
|
||||||
import { getDatabase } from '@/db';
|
import db from '@/db';
|
||||||
|
|
||||||
export async function GET(request: Request, { params }: { params: Promise<{ id: string }> }) {
|
export async function GET(request: Request, { params }: { params: Promise<{ id: string }> }) {
|
||||||
const { id } = await params;
|
const { id } = await params;
|
||||||
const db = getDatabase();
|
|
||||||
try {
|
try {
|
||||||
const parsedId = parseInt(id);
|
const parsedId = parseInt(id);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,10 +1,9 @@
|
||||||
|
|
||||||
import { NextResponse } from "next/server";
|
import { NextResponse } from "next/server";
|
||||||
import { getDatabase } from "@/db";
|
import db from "@/db";
|
||||||
|
|
||||||
export async function GET(request: Request) {
|
export async function GET(request: Request) {
|
||||||
const { searchParams } = new URL(request.url);
|
const { searchParams } = new URL(request.url);
|
||||||
const db = getDatabase();
|
|
||||||
|
|
||||||
const limit = Math.min(parseInt(searchParams.get('limit') || '50'), 100);
|
const limit = Math.min(parseInt(searchParams.get('limit') || '50'), 100);
|
||||||
const offset = parseInt(searchParams.get('offset') || '0');
|
const offset = parseInt(searchParams.get('offset') || '0');
|
||||||
|
|
|
||||||
|
|
@ -1,33 +1,18 @@
|
||||||
|
|
||||||
import Database, { Database as DatabaseType } from 'better-sqlite3';
|
import Database from 'better-sqlite3';
|
||||||
import path from 'path';
|
import path from 'path';
|
||||||
import fs from 'fs';
|
|
||||||
|
|
||||||
let db: DatabaseType | null = null;
|
const dbPath = path.join(process.cwd(), 'media.db');
|
||||||
|
const db = new Database(dbPath);
|
||||||
|
|
||||||
function initializeDatabase() {
|
db.exec(`
|
||||||
if (db) return db;
|
|
||||||
|
|
||||||
// const dbPath = process.env.DB_FILE || path.join(process.cwd(), 'media.db');
|
|
||||||
const dbPath = path.join(process.cwd(), 'data', 'media.db');
|
|
||||||
|
|
||||||
// Ensure the data directory exists
|
|
||||||
const dataDir = path.dirname(dbPath);
|
|
||||||
if (!fs.existsSync(dataDir)) {
|
|
||||||
fs.mkdirSync(dataDir, { recursive: true });
|
|
||||||
}
|
|
||||||
|
|
||||||
db = new Database(dbPath);
|
|
||||||
|
|
||||||
// Create tables
|
|
||||||
db.exec(`
|
|
||||||
CREATE TABLE IF NOT EXISTS libraries (
|
CREATE TABLE IF NOT EXISTS libraries (
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
path TEXT NOT NULL UNIQUE
|
path TEXT NOT NULL UNIQUE
|
||||||
);
|
);
|
||||||
`);
|
`);
|
||||||
|
|
||||||
db.exec(`
|
db.exec(`
|
||||||
CREATE TABLE IF NOT EXISTS media (
|
CREATE TABLE IF NOT EXISTS media (
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
library_id INTEGER,
|
library_id INTEGER,
|
||||||
|
|
@ -42,9 +27,9 @@ function initializeDatabase() {
|
||||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||||
FOREIGN KEY (library_id) REFERENCES libraries (id)
|
FOREIGN KEY (library_id) REFERENCES libraries (id)
|
||||||
);
|
);
|
||||||
`);
|
`);
|
||||||
|
|
||||||
db.exec(`
|
db.exec(`
|
||||||
CREATE TABLE IF NOT EXISTS bookmarks (
|
CREATE TABLE IF NOT EXISTS bookmarks (
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
media_id INTEGER NOT NULL,
|
media_id INTEGER NOT NULL,
|
||||||
|
|
@ -52,9 +37,9 @@ function initializeDatabase() {
|
||||||
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||||
FOREIGN KEY (media_id) REFERENCES media(id) ON DELETE CASCADE
|
FOREIGN KEY (media_id) REFERENCES media(id) ON DELETE CASCADE
|
||||||
);
|
);
|
||||||
`);
|
`);
|
||||||
|
|
||||||
db.exec(`
|
db.exec(`
|
||||||
CREATE TABLE IF NOT EXISTS stars (
|
CREATE TABLE IF NOT EXISTS stars (
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
media_id INTEGER NOT NULL,
|
media_id INTEGER NOT NULL,
|
||||||
|
|
@ -63,29 +48,21 @@ function initializeDatabase() {
|
||||||
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||||
FOREIGN KEY (media_id) REFERENCES media(id) ON DELETE CASCADE
|
FOREIGN KEY (media_id) REFERENCES media(id) ON DELETE CASCADE
|
||||||
);
|
);
|
||||||
`);
|
`);
|
||||||
|
|
||||||
// Create indexes for performance
|
// Create indexes for performance
|
||||||
db.exec(`CREATE INDEX IF NOT EXISTS idx_bookmarks_media_id ON bookmarks(media_id);`);
|
db.exec(`CREATE INDEX IF NOT EXISTS idx_bookmarks_media_id ON bookmarks(media_id);`);
|
||||||
db.exec(`CREATE INDEX IF NOT EXISTS idx_stars_media_id ON stars(media_id);`);
|
db.exec(`CREATE INDEX IF NOT EXISTS idx_stars_media_id ON stars(media_id);`);
|
||||||
db.exec(`CREATE INDEX IF NOT EXISTS idx_media_bookmark_count ON media(bookmark_count);`);
|
db.exec(`CREATE INDEX IF NOT EXISTS idx_media_bookmark_count ON media(bookmark_count);`);
|
||||||
db.exec(`CREATE INDEX IF NOT EXISTS idx_media_star_count ON media(star_count);`);
|
db.exec(`CREATE INDEX IF NOT EXISTS idx_media_star_count ON media(star_count);`);
|
||||||
|
|
||||||
// Pagination and filtering indexes
|
// Pagination and filtering indexes
|
||||||
db.exec(`CREATE INDEX IF NOT EXISTS idx_media_type_created_at ON media(type, created_at);`);
|
db.exec(`CREATE INDEX IF NOT EXISTS idx_media_type_created_at ON media(type, created_at);`);
|
||||||
db.exec(`CREATE INDEX IF NOT EXISTS idx_media_path ON media(path);`);
|
db.exec(`CREATE INDEX IF NOT EXISTS idx_media_path ON media(path);`);
|
||||||
db.exec(`CREATE INDEX IF NOT EXISTS idx_media_library_id ON media(library_id);`);
|
db.exec(`CREATE INDEX IF NOT EXISTS idx_media_library_id ON media(library_id);`);
|
||||||
|
|
||||||
// Full-text search indexes
|
// Full-text search indexes
|
||||||
db.exec(`CREATE INDEX IF NOT EXISTS idx_media_title ON media(title);`);
|
db.exec(`CREATE INDEX IF NOT EXISTS idx_media_title ON media(title);`);
|
||||||
db.exec(`CREATE INDEX IF NOT EXISTS idx_media_type_path ON media(type, path);`);
|
db.exec(`CREATE INDEX IF NOT EXISTS idx_media_type_path ON media(type, path);`);
|
||||||
|
|
||||||
return db;
|
export default db;
|
||||||
}
|
|
||||||
|
|
||||||
export function getDatabase(): DatabaseType {
|
|
||||||
return initializeDatabase();
|
|
||||||
}
|
|
||||||
|
|
||||||
// For backward compatibility, export the database instance getter
|
|
||||||
export default getDatabase;
|
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
import { getDatabase } from "@/db";
|
import db from "@/db";
|
||||||
import { glob } from "glob";
|
import { glob } from "glob";
|
||||||
import path from "path";
|
import path from "path";
|
||||||
import fs from "fs";
|
import fs from "fs";
|
||||||
|
|
@ -34,7 +34,6 @@ const generatePhotoThumbnail = (photoPath: string, thumbnailPath: string) => {
|
||||||
};
|
};
|
||||||
|
|
||||||
const scanLibrary = async (library: { id: number; path: string }) => {
|
const scanLibrary = async (library: { id: number; path: string }) => {
|
||||||
const db = getDatabase();
|
|
||||||
// Scan videos - handle all case variations
|
// Scan videos - handle all case variations
|
||||||
const videoFiles = await glob(`${library.path}/**/*.*`, { nodir: true });
|
const videoFiles = await glob(`${library.path}/**/*.*`, { nodir: true });
|
||||||
|
|
||||||
|
|
@ -119,7 +118,6 @@ const scanLibrary = async (library: { id: number; path: string }) => {
|
||||||
};
|
};
|
||||||
|
|
||||||
export const scanAllLibraries = async () => {
|
export const scanAllLibraries = async () => {
|
||||||
const db = getDatabase();
|
|
||||||
const libraries = db.prepare("SELECT * FROM libraries").all() as { id: number; path: string }[];
|
const libraries = db.prepare("SELECT * FROM libraries").all() as { id: number; path: string }[];
|
||||||
for (const library of libraries) {
|
for (const library of libraries) {
|
||||||
await scanLibrary(library);
|
await scanLibrary(library);
|
||||||
|
|
@ -127,7 +125,6 @@ export const scanAllLibraries = async () => {
|
||||||
};
|
};
|
||||||
|
|
||||||
export const scanSelectedLibrary = async (libraryId: number) => {
|
export const scanSelectedLibrary = async (libraryId: number) => {
|
||||||
const db = getDatabase();
|
|
||||||
const library = db.prepare("SELECT * FROM libraries WHERE id = ?").get(libraryId) as { id: number; path: string } | undefined;
|
const library = db.prepare("SELECT * FROM libraries WHERE id = ?").get(libraryId) as { id: number; path: string } | undefined;
|
||||||
if (!library) {
|
if (!library) {
|
||||||
throw new Error(`Library with ID ${libraryId} not found`);
|
throw new Error(`Library with ID ${libraryId} not found`);
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue