Compare commits

..

3 Commits

Author SHA1 Message Date
mmomjian
2d309e91da change all refs 2025-04-22 17:43:12 -04:00
mmomjian
a2c0ecfc2a translation keys 2025-04-22 12:16:00 -04:00
mmomjian
085f39dea6 fix 2025-04-22 11:45:52 -04:00
2791 changed files with 145731 additions and 287130 deletions

2
.devcontainer/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
.env
library

16
.devcontainer/Dockerfile Normal file
View File

@@ -0,0 +1,16 @@
ARG BASEIMAGE=mcr.microsoft.com/devcontainers/typescript-node:22@sha256:a20b8a3538313487ac9266875bbf733e544c1aa2091df2bb99ab592a6d4f7399
FROM ${BASEIMAGE}
# Flutter SDK
# https://flutter.dev/docs/development/tools/sdk/releases?tab=linux
ENV FLUTTER_CHANNEL="stable"
ENV FLUTTER_VERSION="3.29.1"
ENV FLUTTER_HOME=/flutter
ENV PATH=${PATH}:${FLUTTER_HOME}/bin
# Flutter SDK
RUN mkdir -p ${FLUTTER_HOME} \
&& curl -C - --output flutter.tar.xz https://storage.googleapis.com/flutter_infra_release/releases/${FLUTTER_CHANNEL}/linux/flutter_linux_${FLUTTER_VERSION}-${FLUTTER_CHANNEL}.tar.xz \
&& tar -xf flutter.tar.xz --strip-components=1 -C ${FLUTTER_HOME} \
&& rm flutter.tar.xz \
&& chown -R 1000:1000 ${FLUTTER_HOME}

View File

@@ -1,67 +1,26 @@
{
"name": "Immich - Backend, Frontend and ML",
"service": "immich-server",
"runServices": [
"immich-server",
"redis",
"database",
"immich-machine-learning"
],
"name": "Immich",
"service": "immich-devcontainer",
"dockerComposeFile": [
"../docker/docker-compose.dev.yml",
"./server/container-compose-overrides.yml"
"docker-compose.yml",
"../docker/docker-compose.dev.yml"
],
"customizations": {
"vscode": {
"extensions": [
"Dart-Code.dart-code",
"Dart-Code.flutter",
"dbaeumer.vscode-eslint",
"dcmdev.dcm-vscode-extension",
"esbenp.prettier-vscode",
"svelte.svelte-vscode",
"ms-vscode-remote.remote-containers",
"foxundermoon.shell-format",
"timonwong.shellcheck",
"rvest.vs-code-prettier-eslint",
"bluebrown.yamlfmt",
"vkrishna04.cspell-sync",
"vitest.explorer",
"ms-playwright.playwright",
"ms-azuretools.vscode-docker"
"svelte.svelte-vscode"
]
}
},
"forwardPorts": [3000, 9231, 9230, 2283],
"portsAttributes": {
"3000": {
"label": "Immich - Frontend HTTP",
"description": "The frontend of the Immich project",
"onAutoForward": "openBrowserOnce"
},
"2283": {
"label": "Immich - API Server - HTTP",
"description": "The API server of the Immich project"
},
"9231": {
"label": "Immich - API Server - DEBUG",
"description": "The API server of the Immich project"
},
"9230": {
"label": "Immich - Workers - DEBUG",
"description": "The workers of the Immich project"
}
},
"forwardPorts": [],
"initializeCommand": "bash .devcontainer/scripts/initializeCommand.sh",
"onCreateCommand": "bash .devcontainer/scripts/onCreateCommand.sh",
"overrideCommand": true,
"workspaceFolder": "/workspaces/immich",
"remoteUser": "node",
"userEnvProbe": "loginInteractiveShell",
"remoteEnv": {
// The location where your uploaded files are stored
"UPLOAD_LOCATION": "${localEnv:UPLOAD_LOCATION:./library}",
// Connection secret for postgres. You should change it to a random password
// Please use only the characters `A-Za-z0-9`, without special characters or spaces
"DB_PASSWORD": "${localEnv:DB_PASSWORD:postgres}",
// The database username
"DB_USERNAME": "${localEnv:DB_USERNAME:postgres}",
// The database name
"DB_DATABASE_NAME": "${localEnv:DB_DATABASE_NAME:immich}"
}
"workspaceFolder": "/immich",
"remoteUser": "node"
}

View File

@@ -0,0 +1,8 @@
services:
immich-devcontainer:
build:
dockerfile: Dockerfile
extra_hosts:
- 'host.docker.internal:host-gateway'
volumes:
- ..:/immich:cached

View File

@@ -1,33 +0,0 @@
services:
immich-server:
build:
target: dev-container-mobile
environment:
- IMMICH_SERVER_URL=http://127.0.0.1:2283/
volumes: !override # bind mount host to /workspaces/immich
- ..:/workspaces/immich
- cli_node_modules:/workspaces/immich/cli/node_modules
- e2e_node_modules:/workspaces/immich/e2e/node_modules
- open_api_node_modules:/workspaces/immich/open-api/typescript-sdk/node_modules
- server_node_modules:/workspaces/immich/server/node_modules
- web_node_modules:/workspaces/immich/web/node_modules
- ${UPLOAD_LOCATION}/photos:/data
- /etc/localtime:/etc/localtime:ro
database:
volumes:
- ${UPLOAD_LOCATION}/postgres:/var/lib/postgresql/data
volumes:
# Node modules for each service to avoid conflicts and ensure consistent dependencies
cli_node_modules:
e2e_node_modules:
open_api_node_modules:
server_node_modules:
web_node_modules:
# UPLOAD_LOCATION must be set to a absolute path or vol-upload
vol-upload:
# DB_DATA_LOCATION must be set to a absolute path or vol-database
vol-database:

View File

@@ -1,52 +0,0 @@
{
"name": "Immich - Mobile",
"service": "immich-server",
"runServices": [
"immich-server",
"redis",
"database",
"immich-machine-learning"
],
"dockerComposeFile": [
"../../docker/docker-compose.dev.yml",
"./container-compose-overrides.yml"
],
"customizations": {
"vscode": {
"extensions": [
"Dart-Code.dart-code",
"Dart-Code.flutter",
"dcmdev.dcm-vscode-extension",
"esbenp.prettier-vscode",
"dbaeumer.vscode-eslint",
"esbenp.prettier-vscode",
"svelte.svelte-vscode",
"ms-vscode-remote.remote-containers",
"foxundermoon.shell-format",
"timonwong.shellcheck",
"rvest.vs-code-prettier-eslint",
"bluebrown.yamlfmt",
"vkrishna04.cspell-sync",
"vitest.explorer",
"ms-playwright.playwright",
"ms-azuretools.vscode-docker"
]
}
},
"forwardPorts": [],
"overrideCommand": true,
"workspaceFolder": "/workspaces/immich",
"remoteUser": "node",
"userEnvProbe": "loginInteractiveShell",
"remoteEnv": {
// The location where your uploaded files are stored
"UPLOAD_LOCATION": "${localEnv:UPLOAD_LOCATION:./Library}",
// Connection secret for postgres. You should change it to a random password
// Please use only the characters `A-Za-z0-9`, without special characters or spaces
"DB_PASSWORD": "${localEnv:DB_PASSWORD:postgres}",
// The database username
"DB_USERNAME": "${localEnv:DB_USERNAME:postgres}",
// The database name
"DB_DATABASE_NAME": "${localEnv:DB_DATABASE_NAME:immich}"
}
}

View File

@@ -0,0 +1,6 @@
#!/bin/bash
# If .env file does not exist, create it by copying example.env from the docker folder
if [ ! -f ".devcontainer/.env" ]; then
cp docker/example.env .devcontainer/.env
fi

View File

@@ -0,0 +1,25 @@
#!/bin/bash
# Enable multiarch for arm64 if necessary
if [ "$(dpkg --print-architecture)" = "arm64" ]; then
sudo dpkg --add-architecture amd64 && \
sudo apt-get update && \
sudo apt-get install -y --no-install-recommends \
qemu-user-static \
libc6:amd64 \
libstdc++6:amd64 \
libgcc1:amd64
fi
# Install DCM
wget -qO- https://dcm.dev/pgp-key.public | sudo gpg --dearmor -o /usr/share/keyrings/dcm.gpg
sudo echo 'deb [signed-by=/usr/share/keyrings/dcm.gpg arch=amd64] https://dcm.dev/debian stable main' | sudo tee /etc/apt/sources.list.d/dart_stable.list
sudo apt-get update
sudo apt-get install dcm
dart --disable-analytics
# Install immich
cd /immich || exit
make install-all

View File

@@ -1,81 +0,0 @@
#!/bin/bash
export IMMICH_PORT="${DEV_SERVER_PORT:-2283}"
export DEV_PORT="${DEV_PORT:-3000}"
# search for immich directory inside workspace.
# /workspaces/immich is the bind mount, but other directories can be mounted if runing
# Devcontainer: Clone [repository|pull request] in container volumne
WORKSPACES_DIR="/workspaces"
IMMICH_DIR="$WORKSPACES_DIR/immich"
IMMICH_DEVCONTAINER_LOG="$HOME/immich-devcontainer.log"
log() {
# Display command on console, log with timestamp to file
echo "$*"
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $*" >>"$IMMICH_DEVCONTAINER_LOG"
}
run_cmd() {
# Ensure log directory exists
mkdir -p "$(dirname "$IMMICH_DEVCONTAINER_LOG")"
log "$@"
# Execute command: display normally on console, log with timestamps to file
"$@" 2>&1 | tee >(while IFS= read -r line; do
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $line" >>"$IMMICH_DEVCONTAINER_LOG"
done)
# Preserve exit status
return "${PIPESTATUS[0]}"
}
# Find directories excluding /workspaces/immich
mapfile -t other_dirs < <(find "$WORKSPACES_DIR" -mindepth 1 -maxdepth 1 -type d ! -path "$IMMICH_DIR" ! -name ".*")
if [ ${#other_dirs[@]} -gt 1 ]; then
log "Error: More than one directory found in $WORKSPACES_DIR other than $IMMICH_DIR."
exit 1
elif [ ${#other_dirs[@]} -eq 1 ]; then
export IMMICH_WORKSPACE="${other_dirs[0]}"
else
export IMMICH_WORKSPACE="$IMMICH_DIR"
fi
log "Found immich workspace in $IMMICH_WORKSPACE"
log ""
fix_permissions() {
log "Fixing permissions for ${IMMICH_WORKSPACE}"
# Change ownership for directories that exist
for dir in "${IMMICH_WORKSPACE}/.vscode" \
"${IMMICH_WORKSPACE}/server/upload" \
"${IMMICH_WORKSPACE}/.pnpm-store" \
"${IMMICH_WORKSPACE}/.github/node_modules" \
"${IMMICH_WORKSPACE}/cli/node_modules" \
"${IMMICH_WORKSPACE}/e2e/node_modules" \
"${IMMICH_WORKSPACE}/open-api/typescript-sdk/node_modules" \
"${IMMICH_WORKSPACE}/server/node_modules" \
"${IMMICH_WORKSPACE}/server/dist" \
"${IMMICH_WORKSPACE}/web/node_modules" \
"${IMMICH_WORKSPACE}/web/dist"; do
if [ -d "$dir" ]; then
run_cmd sudo chown node -R "$dir"
fi
done
log ""
}
install_dependencies() {
log "Installing dependencies"
(
cd "${IMMICH_WORKSPACE}" || exit 1
export CI=1 FROZEN=1 OFFLINE=1
run_cmd make setup-web-dev setup-server-dev
)
log ""
}

View File

@@ -1,42 +0,0 @@
services:
immich-server:
build:
target: dev-container-server
env_file: !reset []
hostname: immich-dev
environment:
- IMMICH_SERVER_URL=http://127.0.0.1:2283/
volumes: !override
- ..:/workspaces/immich
- ${UPLOAD_LOCATION:-upload-devcontainer-volume}${UPLOAD_LOCATION:+/photos}:/data
- /etc/localtime:/etc/localtime:ro
- pnpm-store:/usr/src/app/.pnpm-store
- server-node_modules:/usr/src/app/server/node_modules
- web-node_modules:/usr/src/app/web/node_modules
- github-node_modules:/usr/src/app/.github/node_modules
- cli-node_modules:/usr/src/app/cli/node_modules
- docs-node_modules:/usr/src/app/docs/node_modules
- e2e-node_modules:/usr/src/app/e2e/node_modules
- sdk-node_modules:/usr/src/app/open-api/typescript-sdk/node_modules
- app-node_modules:/usr/src/app/node_modules
- sveltekit:/usr/src/app/web/.svelte-kit
- coverage:/usr/src/app/web/coverage
immich-web:
env_file: !reset []
immich-machine-learning:
env_file: !reset []
database:
env_file: !reset []
environment: !override
POSTGRES_PASSWORD: ${DB_PASSWORD-postgres}
POSTGRES_USER: ${DB_USERNAME-postgres}
POSTGRES_DB: ${DB_DATABASE_NAME-immich}
POSTGRES_INITDB_ARGS: '--data-checksums'
POSTGRES_HOST_AUTH_METHOD: md5
volumes:
- ${UPLOAD_LOCATION:-postgres-devcontainer-volume}${UPLOAD_LOCATION:+/postgres}:/var/lib/postgresql/data
redis:
env_file: !reset []
volumes:
upload-devcontainer-volume:
postgres-devcontainer-volume:

View File

@@ -1,22 +0,0 @@
#!/bin/bash
# shellcheck source=common.sh
# shellcheck disable=SC1091
source /immich-devcontainer/container-common.sh
log "Preparing Immich Nest API Server"
log ""
export CI=1
run_cmd pnpm --filter immich install
log "Starting Nest API Server"
log ""
cd "${IMMICH_WORKSPACE}/server" || (
log "Immich workspace not found"
exit 1
)
while true; do
run_cmd pnpm --filter immich exec nest start --debug "0.0.0.0:9230" --watch
log "Nest API Server crashed with exit code $?. Respawning in 3s ..."
sleep 3
done

View File

@@ -1,29 +0,0 @@
#!/bin/bash
# shellcheck source=common.sh
# shellcheck disable=SC1091
source /immich-devcontainer/container-common.sh
export CI=1
log "Preparing Immich Web Frontend"
log ""
run_cmd pnpm --filter @immich/sdk install
run_cmd pnpm --filter @immich/sdk build
run_cmd pnpm --filter immich-web install
log "Starting Immich Web Frontend"
log ""
cd "${IMMICH_WORKSPACE}/web" || (
log "Immich Workspace not found"
exit 1
)
until curl --output /dev/null --silent --head --fail "http://127.0.0.1:${IMMICH_PORT}/api/server/config"; do
log "Waiting for api server..."
sleep 1
done
while true; do
run_cmd pnpm --filter immich-web exec vite dev --host 0.0.0.0 --port "${DEV_PORT}"
log "Web crashed with exit code $?. Respawning in 3s ..."
sleep 3
done

View File

@@ -1,17 +0,0 @@
#!/bin/bash
# shellcheck source=common.sh
# shellcheck disable=SC1091
source /immich-devcontainer/container-common.sh
log "Setting up Immich dev container..."
fix_permissions
log "Setup complete, please wait while backend and frontend services automatically start"
log
log "If necessary, the services may be manually started using"
log
log "$ /immich-devcontainer/container-start-backend.sh"
log "$ /immich-devcontainer/container-start-frontend.sh"
log
log "From different terminal windows, as these scripts automatically restart the server"
log "on error, and will continuously run in a loop"

View File

@@ -1,41 +1,33 @@
.vscode/
.github/
.git/
.env*
*.log
*.tmp
*.temp
**/Dockerfile
**/node_modules/
**/.pnpm-store/
**/dist/
**/coverage/
**/build/
design/
docker/
!docker/scripts
docs/
!docs/package.json
!docs/package-lock.json
e2e/
!e2e/package.json
!e2e/package-lock.json
fastlane/
machine-learning/
misc/
mobile/
open-api/typescript-sdk/build/
!open-api/typescript-sdk/package.json
!open-api/typescript-sdk/package-lock.json
cli/coverage/
cli/dist/
cli/node_modules/
open-api/typescript-sdk/build/
open-api/typescript-sdk/node_modules/
server/coverage/
server/node_modules/
server/upload/
server/src/queries
server/dist/
server/www/
web/node_modules/
web/coverage/
web/.svelte-kit
web/build/
web/.env

9
.gitattributes vendored
View File

@@ -9,15 +9,6 @@ mobile/lib/**/*.g.dart linguist-generated=true
mobile/lib/**/*.drift.dart -diff -merge
mobile/lib/**/*.drift.dart linguist-generated=true
mobile/drift_schemas/main/drift_schema_*.json -diff -merge
mobile/drift_schemas/main/drift_schema_*.json linguist-generated=true
mobile/lib/infrastructure/repositories/db.repository.steps.dart -diff -merge
mobile/lib/infrastructure/repositories/db.repository.steps.dart linguist-generated=true
mobile/test/drift/main/generated/** -diff -merge
mobile/test/drift/main/generated/** linguist-generated=true
open-api/typescript-sdk/fetch-client.ts -diff -merge
open-api/typescript-sdk/fetch-client.ts linguist-generated=true

2
.github/.nvmrc vendored
View File

@@ -1 +1 @@
22.19.0
22.14.0

View File

@@ -1,4 +0,0 @@
# Ignore files for PNPM, NPM and YARN
pnpm-lock.yaml
package-lock.json
yarn.lock

View File

@@ -14,6 +14,7 @@ body:
label: I have searched the existing feature requests, both open and closed, to make sure this is not a duplicate request.
options:
- label: 'Yes'
required: true
- type: textarea
id: feature

View File

@@ -6,6 +6,7 @@ body:
label: I have searched the existing issues, both open and closed, to make sure this is not a duplicate report.
options:
- label: 'Yes'
required: true
- type: markdown
attributes:
@@ -64,11 +65,6 @@ body:
- label: Web
- label: Mobile
- type: input
attributes:
label: Device make and model
placeholder: Samsung S25 Android 16
- type: textarea
validations:
required: true

28
.github/package-lock.json generated vendored Normal file
View File

@@ -0,0 +1,28 @@
{
"name": ".github",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"devDependencies": {
"prettier": "^3.5.3"
}
},
"node_modules/prettier": {
"version": "3.5.3",
"resolved": "https://registry.npmjs.org/prettier/-/prettier-3.5.3.tgz",
"integrity": "sha512-QQtaxnoDJeAkDvDKWCLiwIXkTgRhwYDEQCghU9Z6q03iyek/rxRh/2lC3HB7P8sWT2xC/y5JDctPLBIGzHKbhw==",
"dev": true,
"license": "MIT",
"bin": {
"prettier": "bin/prettier.cjs"
},
"engines": {
"node": ">=14"
},
"funding": {
"url": "https://github.com/prettier/prettier?sponsor=1"
}
}
}
}

View File

@@ -34,7 +34,3 @@ The `/api/something` endpoint is now `/api/something-else`
- [ ] I have followed naming conventions/patterns in the surrounding code
- [ ] All code in `src/services/` uses repositories implementations for database calls, filesystem operations, etc.
- [ ] All code in `src/repositories/` is pretty basic/simple and does not have any immich specific logic (that belongs in `src/services/`)
## Please describe to which degree, if any, an LLM was used in creating this pull request.
...

View File

@@ -7,15 +7,6 @@ on:
ref:
required: false
type: string
secrets:
KEY_JKS:
required: true
ALIAS:
required: true
ANDROID_KEY_PASSWORD:
required: true
ANDROID_STORE_PASSWORD:
required: true
pull_request:
push:
branches: [main]
@@ -24,87 +15,71 @@ concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions: {}
jobs:
pre-job:
runs-on: ubuntu-latest
permissions:
contents: read
outputs:
should_run: ${{ steps.check.outputs.should_run }}
should_run: ${{ steps.found_paths.outputs.mobile == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps:
- name: Check what should run
id: check
uses: immich-app/devtools/actions/pre-job@24820aa4ef67959b0dcf69a438cccf00d7c7042b # pre-job-action-v1.0.1
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- id: found_paths
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3
with:
filters: |
mobile:
- 'mobile/**'
force-filters: |
- '.github/workflows/build-mobile.yml'
force-events: 'workflow_call,workflow_dispatch'
workflow:
- '.github/workflows/build-mobile.yml'
- name: Check if we should force jobs to run
id: should_force
run: echo "should_force=${{ steps.found_paths.outputs.workflow == 'true' || github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' }}" >> "$GITHUB_OUTPUT"
build-sign-android:
name: Build and sign Android
needs: pre-job
permissions:
contents: read
# Skip when PR from a fork
if: ${{ !github.event.pull_request.head.repo.fork && github.actor != 'dependabot[bot]' && fromJSON(needs.pre-job.outputs.should_run).mobile == true }}
runs-on: mich
if: ${{ !github.event.pull_request.head.repo.fork && github.actor != 'dependabot[bot]' && needs.pre-job.outputs.should_run == 'true' }}
runs-on: macos-14
steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Determine ref
id: get-ref
run: |
input_ref="${{ inputs.ref }}"
github_ref="${{ github.sha }}"
ref="${input_ref:-$github_ref}"
echo "ref=$ref" >> $GITHUB_OUTPUT
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
with:
ref: ${{ inputs.ref || github.sha }}
persist-credentials: false
ref: ${{ steps.get-ref.outputs.ref }}
- name: Create the Keystore
env:
KEY_JKS: ${{ secrets.KEY_JKS }}
working-directory: ./mobile
run: printf "%s" $KEY_JKS | base64 -d > android/key.jks
- uses: actions/setup-java@c5195efecf7bdfc987ee8bae7a71cb8b11521c00 # v4.7.1
- uses: actions/setup-java@c5195efecf7bdfc987ee8bae7a71cb8b11521c00 # v4
with:
distribution: 'zulu'
java-version: '17'
- name: Restore Gradle Cache
id: cache-gradle-restore
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4
with:
path: |
~/.gradle/caches
~/.gradle/wrapper
~/.android/sdk
mobile/android/.gradle
mobile/.dart_tool
key: build-mobile-gradle-${{ runner.os }}-main
cache: 'gradle'
- name: Setup Flutter SDK
uses: subosito/flutter-action@fd55f4c5af5b953cc57a2be44cb082c8f6635e8e # v2.21.0
uses: subosito/flutter-action@e938fdf56512cc96ef2f93601a5a40bde3801046 # v2
with:
channel: 'stable'
flutter-version-file: ./mobile/pubspec.yaml
cache: true
- name: Setup Android SDK
uses: android-actions/setup-android@9fc6c4e9069bf8d3d10b2204b1fb8f6ef7065407 # v3.2.2
with:
packages: ''
- name: Create the Keystore
env:
KEY_JKS: ${{ secrets.KEY_JKS }}
working-directory: ./mobile
run: echo $KEY_JKS | base64 -d > android/key.jks
- name: Get Packages
working-directory: ./mobile
run: flutter pub get
- name: Generate translation file
run: dart run easy_localization:generate -S ../i18n && dart run bin/generate_keys.dart
working-directory: ./mobile
- name: Generate platform APIs
run: make pigeon
run: make translation
working-directory: ./mobile
- name: Build Android App Bundle
@@ -113,30 +88,12 @@ jobs:
ALIAS: ${{ secrets.ALIAS }}
ANDROID_KEY_PASSWORD: ${{ secrets.ANDROID_KEY_PASSWORD }}
ANDROID_STORE_PASSWORD: ${{ secrets.ANDROID_STORE_PASSWORD }}
IS_MAIN: ${{ github.ref == 'refs/heads/main' }}
run: |
if [[ $IS_MAIN == 'true' ]]; then
flutter build apk --release
flutter build apk --release --split-per-abi --target-platform android-arm,android-arm64,android-x64
else
flutter build apk --debug --split-per-abi --target-platform android-arm64
fi
flutter build apk --release
flutter build apk --release --split-per-abi --target-platform android-arm,android-arm64,android-x64
- name: Publish Android Artifact
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
with:
name: release-apk-signed
path: mobile/build/app/outputs/flutter-apk/*.apk
- name: Save Gradle Cache
id: cache-gradle-save
uses: actions/cache/save@0400d5f644dc74513175e3cd8d07132dd4860809 # v4
if: github.ref == 'refs/heads/main'
with:
path: |
~/.gradle/caches
~/.gradle/wrapper
~/.android/sdk
mobile/android/.gradle
mobile/.dart_tool
key: ${{ steps.cache-gradle-restore.outputs.cache-primary-key }}

View File

@@ -8,38 +8,31 @@ concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions: {}
jobs:
cleanup:
name: Cleanup
runs-on: ubuntu-latest
permissions:
contents: read
actions: write
steps:
- name: Check out code
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
with:
persist-credentials: false
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Cleanup
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
REF: ${{ github.ref }}
run: |
gh extension install actions/gh-actions-cache
REPO=${{ github.repository }}
BRANCH=${{ github.ref }}
echo "Fetching list of cache keys"
cacheKeysForPR=$(gh actions-cache list -R $REPO -B ${REF} -L 100 | cut -f 1 )
cacheKeysForPR=$(gh actions-cache list -R $REPO -B $BRANCH -L 100 | cut -f 1 )
## Setting this to not fail the workflow while deleting cache keys.
set +e
echo "Deleting caches..."
for cacheKey in $cacheKeysForPR
do
gh actions-cache delete $cacheKey -R "$REPO" -B "${REF}" --confirm
gh actions-cache delete $cacheKey -R $REPO -B $BRANCH --confirm
done
echo "Done"
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -16,41 +16,31 @@ concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions: {}
permissions:
packages: write
jobs:
publish:
name: CLI Publish
runs-on: ubuntu-latest
permissions:
contents: read
defaults:
run:
working-directory: ./cli
steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
with:
persist-credentials: false
- name: Setup pnpm
uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4.1.0
- name: Setup Node
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
# Setup .npmrc file to publish to npm
- uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
with:
node-version-file: './cli/.nvmrc'
registry-url: 'https://registry.npmjs.org'
cache: 'pnpm'
cache-dependency-path: '**/pnpm-lock.yaml'
- name: Setup typescript-sdk
run: pnpm install && pnpm run build
working-directory: ./open-api/typescript-sdk
- run: pnpm install --frozen-lockfile
- run: pnpm build
- run: pnpm publish
- name: Prepare SDK
run: npm ci --prefix ../open-api/typescript-sdk/
- name: Build SDK
run: npm run build --prefix ../open-api/typescript-sdk/
- run: npm ci
- run: npm run build
- run: npm publish
if: ${{ github.event_name == 'release' }}
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
@@ -58,25 +48,20 @@ jobs:
docker:
name: Docker
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
needs: publish
steps:
- name: Checkout
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
with:
persist-credentials: false
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Set up QEMU
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
- name: Login to GitHub Container Registry
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
if: ${{ !github.event.pull_request.head.repo.fork }}
with:
registry: ghcr.io
@@ -91,7 +76,7 @@ jobs:
- name: Generate docker image tags
id: metadata
uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v5.8.0
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5
with:
flavor: |
latest=false
@@ -102,7 +87,7 @@ jobs:
type=raw,value=latest,enable=${{ github.event_name == 'release' }}
- name: Build and push image
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4 # v6.15.0
with:
file: cli/Dockerfile
platforms: linux/amd64,linux/arm64

View File

@@ -1,107 +0,0 @@
on:
issues:
types: [opened]
discussion:
types: [created]
name: Close likely duplicates
permissions: {}
jobs:
should_run:
runs-on: ubuntu-latest
outputs:
should_run: ${{ steps.should_run.outputs.run }}
steps:
- id: should_run
run: echo "run=${{ github.event_name == 'issues' || github.event.discussion.category.name == 'Feature Request' }}" >> $GITHUB_OUTPUT
get_body:
runs-on: ubuntu-latest
needs: should_run
if: ${{ needs.should_run.outputs.should_run == 'true' }}
env:
EVENT: ${{ toJSON(github.event) }}
outputs:
body: ${{ steps.get_body.outputs.body }}
steps:
- id: get_body
run: |
BODY=$(echo """$EVENT""" | jq -r '.issue // .discussion | .body' | base64 -w 0)
echo "body=$BODY" >> $GITHUB_OUTPUT
get_checkbox_json:
runs-on: ubuntu-latest
needs: [get_body, should_run]
if: ${{ needs.should_run.outputs.should_run == 'true' }}
container:
image: ghcr.io/immich-app/mdq:main@sha256:d8ae47cf2e6cf4e2559bd57a60b73674fe44f897cba2c2bddff2987a05be10a4
outputs:
checked: ${{ steps.get_checkbox.outputs.checked }}
steps:
- id: get_checkbox
env:
BODY: ${{ needs.get_body.outputs.body }}
run: |
CHECKED=$(echo "$BODY" | base64 -d | /mdq --output json '# I have searched | - [?] Yes' | jq '.items[0].list[0].checked // false')
echo "checked=$CHECKED" >> $GITHUB_OUTPUT
close_and_comment:
runs-on: ubuntu-latest
needs: [get_checkbox_json, should_run]
if: ${{ needs.should_run.outputs.should_run == 'true' && needs.get_checkbox_json.outputs.checked != 'true' }}
permissions:
issues: write
discussions: write
steps:
- name: Close issue
if: ${{ github.event_name == 'issues' }}
env:
GH_TOKEN: ${{ github.token }}
NODE_ID: ${{ github.event.issue.node_id }}
run: |
gh api graphql \
-f issueId="$NODE_ID" \
-f body="This issue has automatically been closed as it is likely a duplicate. We get a lot of duplicate threads each day, which is why we ask you in the template to confirm that you searched for duplicates before opening one. If you're sure this is not a duplicate, please leave a comment and we will reopen the thread if necessary." \
-f query='
mutation CommentAndCloseIssue($issueId: ID!, $body: String!) {
addComment(input: {
subjectId: $issueId,
body: $body
}) {
__typename
}
closeIssue(input: {
issueId: $issueId,
stateReason: DUPLICATE
}) {
__typename
}
}'
- name: Close discussion
if: ${{ github.event_name == 'discussion' && github.event.discussion.category.name == 'Feature Request' }}
env:
GH_TOKEN: ${{ github.token }}
NODE_ID: ${{ github.event.discussion.node_id }}
run: |
gh api graphql \
-f discussionId="$NODE_ID" \
-f body="This discussion has automatically been closed as it is likely a duplicate. We get a lot of duplicate threads each day, which is why we ask you in the template to confirm that you searched for duplicates before opening one. If you're sure this is not a duplicate, please leave a comment and we will reopen the thread if necessary." \
-f query='
mutation CommentAndCloseDiscussion($discussionId: ID!, $body: String!) {
addDiscussionComment(input: {
discussionId: $discussionId,
body: $body
}) {
__typename
}
closeDiscussion(input: {
discussionId: $discussionId,
reason: DUPLICATE
}) {
__typename
}
}'

View File

@@ -24,8 +24,6 @@ concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions: {}
jobs:
analyze:
name: Analyze
@@ -44,13 +42,11 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
with:
persist-credentials: false
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@192325c86100d080feab897ff886c34abd4c83a3 # v3.30.3
uses: github/codeql-action/init@45775bd8235c68ba998cffa5171334d58593da47 # v3
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
@@ -63,7 +59,7 @@ jobs:
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@192325c86100d080feab897ff886c34abd4c83a3 # v3.30.3
uses: github/codeql-action/autobuild@45775bd8235c68ba998cffa5171334d58593da47 # v3
# Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
@@ -76,6 +72,6 @@ jobs:
# ./location_of_script_within_repo/buildscript.sh
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@192325c86100d080feab897ff886c34abd4c83a3 # v3.30.3
uses: github/codeql-action/analyze@45775bd8235c68ba998cffa5171334d58593da47 # v3
with:
category: '/language:${{matrix.language}}'

View File

@@ -12,19 +12,20 @@ concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions: {}
permissions:
packages: write
jobs:
pre-job:
runs-on: ubuntu-latest
permissions:
contents: read
outputs:
should_run: ${{ steps.check.outputs.should_run }}
should_run_server: ${{ steps.found_paths.outputs.server == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run_ml: ${{ steps.found_paths.outputs.machine-learning == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps:
- name: Check what should run
id: check
uses: immich-app/devtools/actions/pre-job@24820aa4ef67959b0dcf69a438cccf00d7c7042b # pre-job-action-v1.0.1
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- id: found_paths
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3
with:
filters: |
server:
@@ -34,153 +35,485 @@ jobs:
- 'i18n/**'
machine-learning:
- 'machine-learning/**'
force-filters: |
- '.github/workflows/docker.yml'
- '.github/workflows/multi-runner-build.yml'
- '.github/actions/image-build'
force-events: 'workflow_dispatch,release'
workflow:
- '.github/workflows/docker.yml'
- name: Check if we should force jobs to run
id: should_force
run: echo "should_force=${{ steps.found_paths.outputs.workflow == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'release' }}" >> "$GITHUB_OUTPUT"
retag_ml:
name: Re-Tag ML
needs: pre-job
permissions:
contents: read
packages: write
if: ${{ fromJSON(needs.pre-job.outputs.should_run).machine-learning == false && !github.event.pull_request.head.repo.fork }}
if: ${{ needs.pre-job.outputs.should_run_ml == 'false' && !github.event.pull_request.head.repo.fork }}
runs-on: ubuntu-latest
strategy:
matrix:
suffix: ['', '-cuda', '-rocm', '-openvino', '-armnn', '-rknn']
steps:
- name: Login to GitHub Container Registry
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Re-tag image
env:
REGISTRY_NAME: 'ghcr.io'
REPOSITORY: ${{ github.repository_owner }}/immich-machine-learning
TAG_OLD: main${{ matrix.suffix }}
TAG_PR: ${{ github.event.number == 0 && github.ref_name || format('pr-{0}', github.event.number) }}${{ matrix.suffix }}
TAG_COMMIT: commit-${{ github.event_name != 'pull_request' && github.sha || github.event.pull_request.head.sha }}${{ matrix.suffix }}
run: |
docker buildx imagetools create -t "${REGISTRY_NAME}/${REPOSITORY}:${TAG_PR}" "${REGISTRY_NAME}/${REPOSITORY}:${TAG_OLD}"
docker buildx imagetools create -t "${REGISTRY_NAME}/${REPOSITORY}:${TAG_COMMIT}" "${REGISTRY_NAME}/${REPOSITORY}:${TAG_OLD}"
REGISTRY_NAME="ghcr.io"
REPOSITORY=${{ github.repository_owner }}/immich-machine-learning
TAG_OLD=main${{ matrix.suffix }}
TAG_PR=${{ github.event.number == 0 && github.ref_name || format('pr-{0}', github.event.number) }}${{ matrix.suffix }}
TAG_COMMIT=commit-${{ github.event_name != 'pull_request' && github.sha || github.event.pull_request.head.sha }}${{ matrix.suffix }}
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_PR $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_COMMIT $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
retag_server:
name: Re-Tag Server
needs: pre-job
permissions:
contents: read
packages: write
if: ${{ fromJSON(needs.pre-job.outputs.should_run).server == false && !github.event.pull_request.head.repo.fork }}
if: ${{ needs.pre-job.outputs.should_run_server == 'false' && !github.event.pull_request.head.repo.fork }}
runs-on: ubuntu-latest
strategy:
matrix:
suffix: ['']
steps:
- name: Login to GitHub Container Registry
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Re-tag image
env:
REGISTRY_NAME: 'ghcr.io'
REPOSITORY: ${{ github.repository_owner }}/immich-server
TAG_OLD: main${{ matrix.suffix }}
TAG_PR: ${{ github.event.number == 0 && github.ref_name || format('pr-{0}', github.event.number) }}${{ matrix.suffix }}
TAG_COMMIT: commit-${{ github.event_name != 'pull_request' && github.sha || github.event.pull_request.head.sha }}${{ matrix.suffix }}
run: |
docker buildx imagetools create -t "${REGISTRY_NAME}/${REPOSITORY}:${TAG_PR}" "${REGISTRY_NAME}/${REPOSITORY}:${TAG_OLD}"
docker buildx imagetools create -t "${REGISTRY_NAME}/${REPOSITORY}:${TAG_COMMIT}" "${REGISTRY_NAME}/${REPOSITORY}:${TAG_OLD}"
REGISTRY_NAME="ghcr.io"
REPOSITORY=${{ github.repository_owner }}/immich-server
TAG_OLD=main${{ matrix.suffix }}
TAG_PR=${{ github.event.number == 0 && github.ref_name || format('pr-{0}', github.event.number) }}${{ matrix.suffix }}
TAG_COMMIT=commit-${{ github.event_name != 'pull_request' && github.sha || github.event.pull_request.head.sha }}${{ matrix.suffix }}
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_PR $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_COMMIT $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
machine-learning:
build_and_push_ml:
name: Build and Push ML
needs: pre-job
if: ${{ fromJSON(needs.pre-job.outputs.should_run).machine-learning == true }}
if: ${{ needs.pre-job.outputs.should_run_ml == 'true' }}
runs-on: ${{ matrix.runner }}
env:
image: immich-machine-learning
context: machine-learning
file: machine-learning/Dockerfile
GHCR_REPO: ghcr.io/${{ github.repository_owner }}/immich-machine-learning
strategy:
# Prevent a failure in one image from stopping the other builds
fail-fast: false
matrix:
include:
- platform: linux/amd64
runner: ubuntu-latest
device: cpu
- platform: linux/arm64
runner: ubuntu-24.04-arm
device: cpu
- platform: linux/amd64
runner: ubuntu-latest
device: cuda
suffix: -cuda
- platform: linux/amd64
runner: mich
device: rocm
suffix: -rocm
- platform: linux/amd64
runner: ubuntu-latest
device: openvino
suffix: -openvino
- platform: linux/arm64
runner: ubuntu-24.04-arm
device: armnn
suffix: -armnn
- platform: linux/arm64
runner: ubuntu-24.04-arm
device: rknn
suffix: -rknn
steps:
- name: Prepare
run: |
platform=${{ matrix.platform }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
- name: Login to GitHub Container Registry
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
if: ${{ !github.event.pull_request.head.repo.fork }}
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Generate cache key suffix
run: |
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
echo "CACHE_KEY_SUFFIX=pr-${{ github.event.number }}" >> $GITHUB_ENV
else
echo "CACHE_KEY_SUFFIX=$(echo ${{ github.ref_name }} | sed 's/[^a-zA-Z0-9]/-/g')" >> $GITHUB_ENV
fi
- name: Generate cache target
id: cache-target
run: |
if [[ "${{ github.event.pull_request.head.repo.fork }}" == "true" ]]; then
# Essentially just ignore the cache output (forks can't write to registry cache)
echo "cache-to=type=local,dest=/tmp/discard,ignore-error=true" >> $GITHUB_OUTPUT
else
echo "cache-to=type=registry,ref=${{ env.GHCR_REPO }}-build-cache:${{ env.PLATFORM_PAIR }}-${{ matrix.device }}-${{ env.CACHE_KEY_SUFFIX }},mode=max,compression=zstd" >> $GITHUB_OUTPUT
fi
- name: Generate docker image tags
id: meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5
env:
DOCKER_METADATA_PR_HEAD_SHA: 'true'
- name: Build and push image
id: build
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4 # v6.15.0
with:
context: ${{ env.context }}
file: ${{ env.file }}
platforms: ${{ matrix.platforms }}
labels: ${{ steps.meta.outputs.labels }}
cache-to: ${{ steps.cache-target.outputs.cache-to }}
cache-from: |
type=registry,ref=${{ env.GHCR_REPO }}-build-cache:${{ env.PLATFORM_PAIR }}-${{ matrix.device }}-${{ env.CACHE_KEY_SUFFIX }}
type=registry,ref=${{ env.GHCR_REPO }}-build-cache:${{ env.PLATFORM_PAIR }}-${{ matrix.device }}-main
outputs: type=image,"name=${{ env.GHCR_REPO }}",push-by-digest=true,name-canonical=true,push=${{ !github.event.pull_request.head.repo.fork }}
build-args: |
DEVICE=${{ matrix.device }}
BUILD_ID=${{ github.run_id }}
BUILD_IMAGE=${{ github.event_name == 'release' && github.ref_name || steps.metadata.outputs.tags }}
BUILD_SOURCE_REF=${{ github.ref_name }}
BUILD_SOURCE_COMMIT=${{ github.sha }}
- name: Export digest
run: |
mkdir -p ${{ runner.temp }}/digests
digest="${{ steps.build.outputs.digest }}"
touch "${{ runner.temp }}/digests/${digest#sha256:}"
- name: Upload digest
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
with:
name: ml-digests-${{ matrix.device }}-${{ env.PLATFORM_PAIR }}
path: ${{ runner.temp }}/digests/*
if-no-files-found: error
retention-days: 1
merge_ml:
name: Merge & Push ML
runs-on: ubuntu-latest
if: ${{ needs.pre-job.outputs.should_run_ml == 'true' && !github.event.pull_request.head.repo.fork }}
env:
GHCR_REPO: ghcr.io/${{ github.repository_owner }}/immich-machine-learning
DOCKER_REPO: altran1502/immich-machine-learning
strategy:
matrix:
include:
- device: cpu
- device: cuda
suffix: -cuda
- device: rocm
suffix: -rocm
- device: openvino
suffix: -openvino
- device: armnn
suffix: -armnn
- device: rknn
suffix: -rknn
needs:
- build_and_push_ml
steps:
- name: Download digests
uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4
with:
path: ${{ runner.temp }}/digests
pattern: ml-digests-${{ matrix.device }}-*
merge-multiple: true
- name: Login to Docker Hub
if: ${{ github.event_name == 'release' }}
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GHCR
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3
- name: Generate docker image tags
id: meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5
env:
DOCKER_METADATA_PR_HEAD_SHA: 'true'
with:
flavor: |
# Disable latest tag
latest=false
suffix=${{ matrix.suffix }}
images: |
name=${{ env.GHCR_REPO }}
name=${{ env.DOCKER_REPO }},enable=${{ github.event_name == 'release' }}
tags: |
# Tag with branch name
type=ref,event=branch
# Tag with pr-number
type=ref,event=pr
# Tag with long commit sha hash
type=sha,format=long,prefix=commit-
# Tag with git tag on release
type=ref,event=tag
type=raw,value=release,enable=${{ github.event_name == 'release' }}
- name: Create manifest list and push
working-directory: ${{ runner.temp }}/digests
run: |
# Process annotations
declare -a ANNOTATIONS=()
if [[ -n "$DOCKER_METADATA_OUTPUT_JSON" ]]; then
while IFS= read -r annotation; do
# Extract key and value by removing the manifest: prefix
if [[ "$annotation" =~ ^manifest:(.+)=(.+)$ ]]; then
key="${BASH_REMATCH[1]}"
value="${BASH_REMATCH[2]}"
# Use array to properly handle arguments with spaces
ANNOTATIONS+=(--annotation "index:$key=$value")
fi
done < <(jq -r '.annotations[]' <<< "$DOCKER_METADATA_OUTPUT_JSON")
fi
TAGS=$(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
SOURCE_ARGS=$(printf '${{ env.GHCR_REPO }}@sha256:%s ' *)
echo "docker buildx imagetools create $TAGS "${ANNOTATIONS[@]}" $SOURCE_ARGS"
docker buildx imagetools create $TAGS "${ANNOTATIONS[@]}" $SOURCE_ARGS
build_and_push_server:
name: Build and Push Server
runs-on: ${{ matrix.runner }}
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_server == 'true' }}
env:
image: immich-server
context: .
file: server/Dockerfile
GHCR_REPO: ghcr.io/${{ github.repository_owner }}/immich-server
strategy:
fail-fast: false
matrix:
include:
- device: cpu
tag-suffix: ''
- device: cuda
tag-suffix: '-cuda'
platforms: linux/amd64
- device: openvino
tag-suffix: '-openvino'
platforms: linux/amd64
- device: armnn
tag-suffix: '-armnn'
platforms: linux/arm64
- device: rknn
tag-suffix: '-rknn'
platforms: linux/arm64
- device: rocm
tag-suffix: '-rocm'
platforms: linux/amd64
runner-mapping: '{"linux/amd64": "mich"}'
uses: immich-app/devtools/.github/workflows/multi-runner-build.yml@129aeda75a450666ce96e8bc8126652e717917a7 # multi-runner-build-workflow-0.1.1
permissions:
contents: read
actions: read
packages: write
secrets:
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }}
with:
image: immich-machine-learning
context: machine-learning
dockerfile: machine-learning/Dockerfile
platforms: ${{ matrix.platforms }}
runner-mapping: ${{ matrix.runner-mapping }}
tag-suffix: ${{ matrix.tag-suffix }}
dockerhub-push: ${{ github.event_name == 'release' }}
build-args: |
DEVICE=${{ matrix.device }}
- platform: linux/amd64
runner: ubuntu-latest
- platform: linux/arm64
runner: ubuntu-24.04-arm
steps:
- name: Prepare
run: |
platform=${{ matrix.platform }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
server:
name: Build and Push Server
needs: pre-job
if: ${{ fromJSON(needs.pre-job.outputs.should_run).server == true }}
uses: immich-app/devtools/.github/workflows/multi-runner-build.yml@129aeda75a450666ce96e8bc8126652e717917a7 # multi-runner-build-workflow-0.1.1
permissions:
contents: read
actions: read
packages: write
secrets:
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }}
with:
image: immich-server
context: .
dockerfile: server/Dockerfile
dockerhub-push: ${{ github.event_name == 'release' }}
build-args: |
DEVICE=cpu
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3
- name: Login to GitHub Container Registry
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
if: ${{ !github.event.pull_request.head.repo.fork }}
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Generate cache key suffix
run: |
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
echo "CACHE_KEY_SUFFIX=pr-${{ github.event.number }}" >> $GITHUB_ENV
else
echo "CACHE_KEY_SUFFIX=$(echo ${{ github.ref_name }} | sed 's/[^a-zA-Z0-9]/-/g')" >> $GITHUB_ENV
fi
- name: Generate cache target
id: cache-target
run: |
if [[ "${{ github.event.pull_request.head.repo.fork }}" == "true" ]]; then
# Essentially just ignore the cache output (forks can't write to registry cache)
echo "cache-to=type=local,dest=/tmp/discard,ignore-error=true" >> $GITHUB_OUTPUT
else
echo "cache-to=type=registry,ref=${{ env.GHCR_REPO }}-build-cache:${{ env.PLATFORM_PAIR }}-${{ env.CACHE_KEY_SUFFIX }},mode=max,compression=zstd" >> $GITHUB_OUTPUT
fi
- name: Generate docker image tags
id: meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5
env:
DOCKER_METADATA_PR_HEAD_SHA: 'true'
- name: Build and push image
id: build
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4 # v6.15.0
with:
context: ${{ env.context }}
file: ${{ env.file }}
platforms: ${{ matrix.platform }}
labels: ${{ steps.meta.outputs.labels }}
cache-to: ${{ steps.cache-target.outputs.cache-to }}
cache-from: |
type=registry,ref=${{ env.GHCR_REPO }}-build-cache:${{ env.PLATFORM_PAIR }}-${{ env.CACHE_KEY_SUFFIX }}
type=registry,ref=${{ env.GHCR_REPO }}-build-cache:${{ env.PLATFORM_PAIR }}-main
outputs: type=image,"name=${{ env.GHCR_REPO }}",push-by-digest=true,name-canonical=true,push=${{ !github.event.pull_request.head.repo.fork }}
build-args: |
DEVICE=cpu
BUILD_ID=${{ github.run_id }}
BUILD_IMAGE=${{ github.event_name == 'release' && github.ref_name || steps.metadata.outputs.tags }}
BUILD_SOURCE_REF=${{ github.ref_name }}
BUILD_SOURCE_COMMIT=${{ github.sha }}
- name: Export digest
run: |
mkdir -p ${{ runner.temp }}/digests
digest="${{ steps.build.outputs.digest }}"
touch "${{ runner.temp }}/digests/${digest#sha256:}"
- name: Upload digest
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
with:
name: server-digests-${{ env.PLATFORM_PAIR }}
path: ${{ runner.temp }}/digests/*
if-no-files-found: error
retention-days: 1
merge_server:
name: Merge & Push Server
runs-on: ubuntu-latest
if: ${{ needs.pre-job.outputs.should_run_server == 'true' && !github.event.pull_request.head.repo.fork }}
env:
GHCR_REPO: ghcr.io/${{ github.repository_owner }}/immich-server
DOCKER_REPO: altran1502/immich-server
needs:
- build_and_push_server
steps:
- name: Download digests
uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4
with:
path: ${{ runner.temp }}/digests
pattern: server-digests-*
merge-multiple: true
- name: Login to Docker Hub
if: ${{ github.event_name == 'release' }}
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GHCR
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3
- name: Generate docker image tags
id: meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5
env:
DOCKER_METADATA_PR_HEAD_SHA: 'true'
with:
flavor: |
# Disable latest tag
latest=false
suffix=${{ matrix.suffix }}
images: |
name=${{ env.GHCR_REPO }}
name=${{ env.DOCKER_REPO }},enable=${{ github.event_name == 'release' }}
tags: |
# Tag with branch name
type=ref,event=branch
# Tag with pr-number
type=ref,event=pr
# Tag with long commit sha hash
type=sha,format=long,prefix=commit-
# Tag with git tag on release
type=ref,event=tag
type=raw,value=release,enable=${{ github.event_name == 'release' }}
- name: Create manifest list and push
working-directory: ${{ runner.temp }}/digests
run: |
# Process annotations
declare -a ANNOTATIONS=()
if [[ -n "$DOCKER_METADATA_OUTPUT_JSON" ]]; then
while IFS= read -r annotation; do
# Extract key and value by removing the manifest: prefix
if [[ "$annotation" =~ ^manifest:(.+)=(.+)$ ]]; then
key="${BASH_REMATCH[1]}"
value="${BASH_REMATCH[2]}"
# Use array to properly handle arguments with spaces
ANNOTATIONS+=(--annotation "index:$key=$value")
fi
done < <(jq -r '.annotations[]' <<< "$DOCKER_METADATA_OUTPUT_JSON")
fi
TAGS=$(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
SOURCE_ARGS=$(printf '${{ env.GHCR_REPO }}@sha256:%s ' *)
echo "docker buildx imagetools create $TAGS "${ANNOTATIONS[@]}" $SOURCE_ARGS"
docker buildx imagetools create $TAGS "${ANNOTATIONS[@]}" $SOURCE_ARGS
success-check-server:
name: Docker Build & Push Server Success
needs: [server, retag_server]
permissions: {}
needs: [merge_server, retag_server]
runs-on: ubuntu-latest
if: always()
steps:
- uses: immich-app/devtools/actions/success-check@68f10eb389bb02a3cf9d1156111964c549eb421b # 0.0.4
with:
needs: ${{ toJSON(needs) }}
- name: Any jobs failed?
if: ${{ contains(needs.*.result, 'failure') }}
run: exit 1
- name: All jobs passed or skipped
if: ${{ !(contains(needs.*.result, 'failure')) }}
run: echo "All jobs passed or skipped" && echo "${{ toJSON(needs.*.result) }}"
success-check-ml:
name: Docker Build & Push ML Success
needs: [machine-learning, retag_ml]
permissions: {}
needs: [merge_ml, retag_ml]
runs-on: ubuntu-latest
if: always()
steps:
- uses: immich-app/devtools/actions/success-check@68f10eb389bb02a3cf9d1156111964c549eb421b # 0.0.4
with:
needs: ${{ toJSON(needs) }}
- name: Any jobs failed?
if: ${{ contains(needs.*.result, 'failure') }}
run: exit 1
- name: All jobs passed or skipped
if: ${{ !(contains(needs.*.result, 'failure')) }}
run: echo "All jobs passed or skipped" && echo "${{ toJSON(needs.*.result) }}"

View File

@@ -10,36 +10,30 @@ concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions: {}
jobs:
pre-job:
runs-on: ubuntu-latest
permissions:
contents: read
outputs:
should_run: ${{ steps.check.outputs.should_run }}
should_run: ${{ steps.found_paths.outputs.docs == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps:
- name: Check what should run
id: check
uses: immich-app/devtools/actions/pre-job@24820aa4ef67959b0dcf69a438cccf00d7c7042b # pre-job-action-v1.0.1
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- id: found_paths
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3
with:
filters: |
docs:
- 'docs/**'
open-api:
- 'open-api/immich-openapi-specs.json'
force-filters: |
- '.github/workflows/docs-build.yml'
force-events: 'release'
force-branches: 'main'
workflow:
- '.github/workflows/docs-build.yml'
- name: Check if we should force jobs to run
id: should_force
run: echo "should_force=${{ steps.found_paths.outputs.workflow == 'true' || github.event_name == 'release' || github.ref_name == 'main' }}" >> "$GITHUB_OUTPUT"
build:
name: Docs Build
needs: pre-job
permissions:
contents: read
if: ${{ fromJSON(needs.pre-job.outputs.should_run).docs == true }}
if: ${{ needs.pre-job.outputs.should_run == 'true' }}
runs-on: ubuntu-latest
defaults:
run:
@@ -47,33 +41,25 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
with:
persist-credentials: false
- name: Setup pnpm
uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4.1.0
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Setup Node
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
with:
node-version-file: './docs/.nvmrc'
cache: 'pnpm'
cache-dependency-path: '**/pnpm-lock.yaml'
- name: Run install
run: pnpm install
- name: Run npm install
run: npm ci
- name: Check formatting
run: pnpm format
run: npm run format
- name: Run build
run: pnpm build
run: npm run build
- name: Upload build output
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
with:
name: docs-build-output
path: docs/build/
include-hidden-files: true
retention-days: 1

View File

@@ -1,6 +1,6 @@
name: Docs deploy
on:
workflow_run: # zizmor: ignore[dangerous-triggers] no attacker inputs are used here
workflow_run:
workflows: ['Docs build']
types:
- completed
@@ -9,9 +9,6 @@ jobs:
checks:
name: Docs Deploy Checks
runs-on: ubuntu-latest
permissions:
actions: read
pull-requests: read
outputs:
parameters: ${{ steps.parameters.outputs.result }}
artifact: ${{ steps.get-artifact.outputs.result }}
@@ -20,7 +17,7 @@ jobs:
run: echo 'The triggering workflow did not succeed' && exit 1
- name: Get artifact
id: get-artifact
uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7.1.0
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7
with:
script: |
let allArtifacts = await github.rest.actions.listWorkflowRunArtifacts({
@@ -38,9 +35,7 @@ jobs:
return { found: true, id: matchArtifact.id };
- name: Determine deploy parameters
id: parameters
uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7.1.0
env:
HEAD_SHA: ${{ github.event.workflow_run.head_sha }}
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7
with:
script: |
const eventType = context.payload.workflow_run.event;
@@ -62,8 +57,7 @@ jobs:
} else if (eventType == "pull_request") {
let pull_number = context.payload.workflow_run.pull_requests[0]?.number;
if(!pull_number) {
const {HEAD_SHA} = process.env;
const response = await github.rest.search.issuesAndPullRequests({q: `repo:${{ github.repository }} is:pr sha:${HEAD_SHA}`,per_page: 1,})
const response = await github.rest.search.issuesAndPullRequests({q: 'repo:${{ github.repository }} is:pr sha:${{ github.event.workflow_run.head_sha }}',per_page: 1,})
const items = response.data.items
if (items.length < 1) {
throw new Error("No pull request found for the commit")
@@ -101,36 +95,30 @@ jobs:
name: Docs Deploy
runs-on: ubuntu-latest
needs: checks
permissions:
contents: read
actions: read
pull-requests: write
if: ${{ fromJson(needs.checks.outputs.artifact).found && fromJson(needs.checks.outputs.parameters).shouldDeploy }}
steps:
- name: Checkout code
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
with:
persist-credentials: false
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Load parameters
id: parameters
uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7.1.0
env:
PARAM_JSON: ${{ needs.checks.outputs.parameters }}
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7
with:
script: |
const parameters = JSON.parse(process.env.PARAM_JSON);
const json = `${{ needs.checks.outputs.parameters }}`;
const parameters = JSON.parse(json);
core.setOutput("event", parameters.event);
core.setOutput("name", parameters.name);
core.setOutput("shouldDeploy", parameters.shouldDeploy);
- run: |
echo "Starting docs deployment for ${{ steps.parameters.outputs.event }} ${{ steps.parameters.outputs.name }}"
- name: Download artifact
uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7.1.0
env:
ARTIFACT_JSON: ${{ needs.checks.outputs.artifact }}
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7
with:
script: |
let artifact = JSON.parse(process.env.ARTIFACT_JSON);
let artifact = ${{ needs.checks.outputs.artifact }};
let download = await github.rest.actions.downloadArtifact({
owner: context.repo.owner,
repo: context.repo.repo,
@@ -150,7 +138,7 @@ jobs:
CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
TF_STATE_POSTGRES_CONN_STR: ${{ secrets.TF_STATE_POSTGRES_CONN_STR }}
uses: gruntwork-io/terragrunt-action@aee21a7df999be8b471c2a8564c6cd853cb674e1 # v2.1.8
uses: gruntwork-io/terragrunt-action@9559e51d05873b0ea467c42bbabcb5c067642ccc # v2
with:
tg_version: '0.58.12'
tofu_version: '1.7.1'
@@ -165,7 +153,7 @@ jobs:
CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
TF_STATE_POSTGRES_CONN_STR: ${{ secrets.TF_STATE_POSTGRES_CONN_STR }}
uses: gruntwork-io/terragrunt-action@aee21a7df999be8b471c2a8564c6cd853cb674e1 # v2.1.8
uses: gruntwork-io/terragrunt-action@9559e51d05873b0ea467c42bbabcb5c067642ccc # v2
with:
tg_version: '0.58.12'
tofu_version: '1.7.1'
@@ -174,15 +162,12 @@ jobs:
- name: Output Cleaning
id: clean
env:
TG_OUTPUT: ${{ steps.docs-output.outputs.tg_action_output }}
run: |
CLEANED=$(echo "$TG_OUTPUT" | sed 's|%0A|\n|g ; s|%3C|<|g' | jq -c .)
echo "output=$CLEANED" >> $GITHUB_OUTPUT
TG_OUT=$(echo '${{ steps.docs-output.outputs.tg_action_output }}' | sed 's|%0A|\n|g ; s|%3C|<|g' | jq -c .)
echo "output=$TG_OUT" >> $GITHUB_OUTPUT
- name: Publish to Cloudflare Pages
# TODO: Action is deprecated
uses: cloudflare/pages-action@f0a1cd58cd66095dee69bfa18fa5efd1dde93bca # v1.5.0
uses: cloudflare/pages-action@f0a1cd58cd66095dee69bfa18fa5efd1dde93bca # v1
with:
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN_PAGES_UPLOAD }}
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
@@ -199,7 +184,7 @@ jobs:
CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
TF_STATE_POSTGRES_CONN_STR: ${{ secrets.TF_STATE_POSTGRES_CONN_STR }}
uses: gruntwork-io/terragrunt-action@aee21a7df999be8b471c2a8564c6cd853cb674e1 # v2.1.8
uses: gruntwork-io/terragrunt-action@9559e51d05873b0ea467c42bbabcb5c067642ccc # v2
with:
tg_version: '0.58.12'
tofu_version: '1.7.1'
@@ -207,7 +192,7 @@ jobs:
tg_command: 'apply'
- name: Comment
uses: actions-cool/maintain-one-comment@4b2dbf086015f892dcb5e8c1106f5fccd6c1476b # v3.2.0
uses: actions-cool/maintain-one-comment@4b2dbf086015f892dcb5e8c1106f5fccd6c1476b # v3
if: ${{ steps.parameters.outputs.event == 'pr' }}
with:
number: ${{ fromJson(needs.checks.outputs.parameters).pr_number }}

View File

@@ -1,22 +1,15 @@
name: Docs destroy
on:
pull_request_target: # zizmor: ignore[dangerous-triggers] no attacker inputs are used here
pull_request_target:
types: [closed]
permissions: {}
jobs:
deploy:
name: Docs Destroy
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: write
steps:
- name: Checkout code
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
with:
persist-credentials: false
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Destroy Docs Subdomain
env:
@@ -25,7 +18,7 @@ jobs:
CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
TF_STATE_POSTGRES_CONN_STR: ${{ secrets.TF_STATE_POSTGRES_CONN_STR }}
uses: gruntwork-io/terragrunt-action@aee21a7df999be8b471c2a8564c6cd853cb674e1 # v2.1.8
uses: gruntwork-io/terragrunt-action@9559e51d05873b0ea467c42bbabcb5c067642ccc # v2
with:
tg_version: '0.58.12'
tofu_version: '1.7.1'
@@ -33,7 +26,7 @@ jobs:
tg_command: 'destroy -refresh=false'
- name: Comment
uses: actions-cool/maintain-one-comment@4b2dbf086015f892dcb5e8c1106f5fccd6c1476b # v3.2.0
uses: actions-cool/maintain-one-comment@4b2dbf086015f892dcb5e8c1106f5fccd6c1476b # v3
with:
number: ${{ github.event.number }}
delete: true

View File

@@ -4,51 +4,42 @@ on:
pull_request:
types: [labeled]
permissions: {}
jobs:
fix-formatting:
runs-on: ubuntu-latest
if: ${{ github.event.label.name == 'fix:formatting' }}
permissions:
contents: write
pull-requests: write
steps:
- name: Generate a token
id: generate-token
uses: actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b # v2.1.1
uses: actions/create-github-app-token@3ff1caaa28b64c9cc276ce0a02e2ff584f3900c5 # v2
with:
app-id: ${{ secrets.PUSH_O_MATIC_APP_ID }}
private-key: ${{ secrets.PUSH_O_MATIC_APP_KEY }}
- name: 'Checkout'
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
with:
ref: ${{ github.event.pull_request.head.ref }}
token: ${{ steps.generate-token.outputs.token }}
persist-credentials: true
- name: Setup pnpm
uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4.1.0
- name: Setup Node
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
with:
node-version-file: './server/.nvmrc'
cache: 'pnpm'
cache-dependency-path: '**/pnpm-lock.yaml'
- name: Fix formatting
run: make install-all && make format-all
- name: Commit and push
uses: EndBug/add-and-commit@a94899bca583c204427a224a7af87c02f9b325d5 # v9.1.4
uses: EndBug/add-and-commit@a94899bca583c204427a224a7af87c02f9b325d5 # v9
with:
default_author: github_actions
message: 'chore: fix formatting'
- name: Remove label
uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7.1.0
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7
if: always()
with:
script: |

View File

@@ -1,112 +0,0 @@
name: Merge translations
on:
workflow_dispatch:
workflow_call:
secrets:
PUSH_O_MATIC_APP_ID:
required: true
PUSH_O_MATIC_APP_KEY:
required: true
WEBLATE_TOKEN:
required: true
permissions: {}
env:
WEBLATE_HOST: 'https://hosted.weblate.org'
WEBLATE_COMPONENT: 'immich/immich'
jobs:
merge:
runs-on: ubuntu-latest
permissions:
pull-requests: write
steps:
- name: Find translation PR
id: find_pr
env:
GH_TOKEN: ${{ github.token }}
run: |
set -euo pipefail
PR=$(gh pr list --repo $GITHUB_REPOSITORY --author weblate --json number,mergeable)
echo "$PR"
PR_NUMBER=$(echo "$PR" | jq '
if length == 1 then
.[0].number
else
error("Expected exactly 1 entry, got \(length)")
end
' 2>&1) || exit 1
echo "PR_NUMBER=$PR_NUMBER" >> $GITHUB_OUTPUT
echo "Selected PR $PR_NUMBER"
if ! echo "$PR" | jq -e '.[0].mergeable == "MERGEABLE"'; then
echo "PR is not mergeable"
exit 1
fi
- name: Generate a token
id: generate_token
uses: actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b # v2.1.1
with:
app-id: ${{ secrets.PUSH_O_MATIC_APP_ID }}
private-key: ${{ secrets.PUSH_O_MATIC_APP_KEY }}
- name: Lock weblate
env:
WEBLATE_TOKEN: ${{ secrets.WEBLATE_TOKEN }}
run: |
curl --fail-with-body -X POST -H "Authorization: Token $WEBLATE_TOKEN" "$WEBLATE_HOST/api/components/$WEBLATE_COMPONENT/lock/" -d lock=true
- name: Commit translations
env:
WEBLATE_TOKEN: ${{ secrets.WEBLATE_TOKEN }}
run: |
curl --fail-with-body -X POST -H "Authorization: Token $WEBLATE_TOKEN" "$WEBLATE_HOST/api/components/$WEBLATE_COMPONENT/repository/" -d operation=commit
curl --fail-with-body -X POST -H "Authorization: Token $WEBLATE_TOKEN" "$WEBLATE_HOST/api/components/$WEBLATE_COMPONENT/repository/" -d operation=push
- name: Merge PR
id: merge_pr
env:
GH_TOKEN: ${{ steps.generate_token.outputs.token }}
PR_NUMBER: ${{ steps.find_pr.outputs.PR_NUMBER }}
run: |
set -euo pipefail
REVIEW_ID=$(gh api -X POST "repos/$GITHUB_REPOSITORY/pulls/$PR_NUMBER/reviews" --field event='APPROVE' --field body='Automatically merging translations PR' \
| jq '.id')
echo "REVIEW_ID=$REVIEW_ID" >> $GITHUB_OUTPUT
gh pr merge "$PR_NUMBER" --repo "$GITHUB_REPOSITORY" --auto --squash
- name: Wait for PR to merge
env:
GH_TOKEN: ${{ steps.generate_token.outputs.token }}
PR_NUMBER: ${{ steps.find_pr.outputs.PR_NUMBER }}
REVIEW_ID: ${{ steps.merge_pr.outputs.REVIEW_ID }}
run: |
# So we clean up no matter what
set +e
for i in {1..100}; do
if gh pr view "$PR_NUMBER" --repo "$GITHUB_REPOSITORY" --json state | jq -e '.state == "MERGED"'; then
echo "PR merged"
exit 0
else
echo "PR not merged yet, waiting..."
sleep 6
fi
done
echo "PR did not merge in time"
gh api -X PUT "repos/$GITHUB_REPOSITORY/pulls/$PR_NUMBER/reviews/$REVIEW_ID/dismissals" --field message='Merge attempt timed out' --field event='DISMISS'
gh pr merge "$PR_NUMBER" --repo "$GITHUB_REPOSITORY" --disable-auto
exit 1
- name: Unlock weblate
env:
WEBLATE_TOKEN: ${{ secrets.WEBLATE_TOKEN }}
run: |
curl --fail-with-body -X POST -H "Authorization: Token $WEBLATE_TOKEN" "$WEBLATE_HOST/api/components/$WEBLATE_COMPONENT/lock/" -d lock=false

View File

@@ -1,12 +0,0 @@
name: PR Conventional Commit
on:
pull_request:
types: [opened, synchronize, reopened, edited]
jobs:
validate-pr-title:
name: Validate PR Title (conventional commit)
uses: immich-app/devtools/.github/workflows/shared-pr-require-conventional-commit.yml@main
permissions:
pull-requests: write

View File

@@ -1,15 +0,0 @@
name: Zizmor
on:
pull_request:
push:
branches: [main]
jobs:
zizmor:
name: Zizmor
uses: immich-app/devtools/.github/workflows/shared-zizmor.yml@main
permissions:
actions: read
contents: read
security-events: write

View File

@@ -1,11 +1,9 @@
name: PR Label Validation
on:
pull_request_target: # zizmor: ignore[dangerous-triggers] no attacker inputs are used here
pull_request_target:
types: [opened, labeled, unlabeled, synchronize]
permissions: {}
jobs:
validate-release-label:
runs-on: ubuntu-latest
@@ -14,7 +12,7 @@ jobs:
pull-requests: write
steps:
- name: Require PR to have a changelog label
uses: mheap/github-action-required-labels@8afbe8ae6ab7647d0c9f0cfa7c2f939650d22509 # v5.5.1
uses: mheap/github-action-required-labels@388fd6af37b34cdfe5a23b37060e763217e58b03 # v5
with:
mode: exactly
count: 1

View File

@@ -1,8 +1,6 @@
name: 'Pull Request Labeler'
on:
- pull_request_target # zizmor: ignore[dangerous-triggers] no attacker inputs are used here
permissions: {}
- pull_request_target
jobs:
labeler:
@@ -11,4 +9,4 @@ jobs:
pull-requests: write
runs-on: ubuntu-latest
steps:
- uses: actions/labeler@8558fd74291d67161a8a78ce36a881fa63b766a9 # v5.0.0
- uses: actions/labeler@8558fd74291d67161a8a78ce36a881fa63b766a9 # v5

View File

@@ -0,0 +1,15 @@
name: PR Conventional Commit Validation
on:
pull_request:
types: [opened, synchronize, reopened, edited]
jobs:
validate-pr-title:
runs-on: ubuntu-latest
steps:
- name: PR Conventional Commit Validation
uses: ytanikin/PRConventionalCommits@b628c5a234cc32513014b7bfdd1e47b532124d98 # 1.3.0
with:
task_types: '["feat","fix","docs","test","ci","refactor","perf","chore","revert"]'
add_label: 'false'

View File

@@ -21,59 +21,35 @@ concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-root
cancel-in-progress: true
permissions: {}
jobs:
merge_translations:
uses: ./.github/workflows/merge-translations.yml
permissions:
pull-requests: write
secrets:
PUSH_O_MATIC_APP_ID: ${{ secrets.PUSH_O_MATIC_APP_ID }}
PUSH_O_MATIC_APP_KEY: ${{ secrets.PUSH_O_MATIC_APP_KEY }}
WEBLATE_TOKEN: ${{ secrets.WEBLATE_TOKEN }}
bump_version:
runs-on: ubuntu-latest
outputs:
ref: ${{ steps.push-tag.outputs.commit_long_sha }}
permissions: {} # No job-level permissions are needed because it uses the app-token
steps:
- name: Generate a token
id: generate-token
uses: actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b # v2.1.1
uses: actions/create-github-app-token@3ff1caaa28b64c9cc276ce0a02e2ff584f3900c5 # v2
with:
app-id: ${{ secrets.PUSH_O_MATIC_APP_ID }}
private-key: ${{ secrets.PUSH_O_MATIC_APP_KEY }}
- name: Checkout
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
with:
token: ${{ steps.generate-token.outputs.token }}
persist-credentials: true
- name: Install uv
uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5.4.2
- name: Setup pnpm
uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4.1.0
- name: Setup Node
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0
with:
node-version-file: './server/.nvmrc'
cache: 'pnpm'
cache-dependency-path: '**/pnpm-lock.yaml'
uses: astral-sh/setup-uv@0c5e2b8115b80b4c7c5ddf6ffdd634974642d182 # v5
- name: Bump version
env:
SERVER_BUMP: ${{ inputs.serverBump }}
MOBILE_BUMP: ${{ inputs.mobileBump }}
run: misc/release/pump-version.sh -s "${SERVER_BUMP}" -m "${MOBILE_BUMP}"
run: misc/release/pump-version.sh -s "${{ inputs.serverBump }}" -m "${{ inputs.mobileBump }}"
- name: Commit and tag
id: push-tag
uses: EndBug/add-and-commit@a94899bca583c204427a224a7af87c02f9b325d5 # v9.1.4
uses: EndBug/add-and-commit@a94899bca583c204427a224a7af87c02f9b325d5 # v9
with:
default_author: github_actions
message: 'chore: version ${{ env.IMMICH_VERSION }}'
@@ -83,47 +59,37 @@ jobs:
build_mobile:
uses: ./.github/workflows/build-mobile.yml
needs: bump_version
permissions:
contents: read
secrets:
KEY_JKS: ${{ secrets.KEY_JKS }}
ALIAS: ${{ secrets.ALIAS }}
ANDROID_KEY_PASSWORD: ${{ secrets.ANDROID_KEY_PASSWORD }}
ANDROID_STORE_PASSWORD: ${{ secrets.ANDROID_STORE_PASSWORD }}
secrets: inherit
with:
ref: ${{ needs.bump_version.outputs.ref }}
prepare_release:
runs-on: ubuntu-latest
needs: build_mobile
permissions:
actions: read # To download the app artifact
# No content permissions are needed because it uses the app-token
steps:
- name: Generate a token
id: generate-token
uses: actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b # v2.1.1
uses: actions/create-github-app-token@3ff1caaa28b64c9cc276ce0a02e2ff584f3900c5 # v2
with:
app-id: ${{ secrets.PUSH_O_MATIC_APP_ID }}
private-key: ${{ secrets.PUSH_O_MATIC_APP_KEY }}
- name: Checkout
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
with:
token: ${{ steps.generate-token.outputs.token }}
persist-credentials: false
- name: Download APK
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4
with:
name: release-apk-signed
- name: Create draft release
uses: softprops/action-gh-release@6cbd405e2c4e67a21c47fa9e383d020e4e28b836 # v2.3.3
uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda # v2
with:
draft: true
tag_name: ${{ env.IMMICH_VERSION }}
token: ${{ steps.generate-token.outputs.token }}
generate_release_notes: true
body_path: misc/release/notes.tmpl
files: |

View File

@@ -4,8 +4,6 @@ on:
pull_request:
types: [labeled, closed]
permissions: {}
jobs:
comment-status:
runs-on: ubuntu-latest
@@ -13,18 +11,18 @@ jobs:
permissions:
pull-requests: write
steps:
- uses: mshick/add-pr-comment@b8f338c590a895d50bcbfa6c5859251edc8952fc # v2.8.2
- uses: mshick/add-pr-comment@b8f338c590a895d50bcbfa6c5859251edc8952fc # v2
with:
message-id: 'preview-status'
message: 'Deploying preview environment to https://pr-${{ github.event.pull_request.number }}.preview.internal.immich.cloud/'
remove-label:
runs-on: ubuntu-latest
if: ${{ (github.event.action == 'closed' || github.event.pull_request.head.repo.fork) && contains(github.event.pull_request.labels.*.name, 'preview') }}
if: ${{ github.event.action == 'closed' && contains(github.event.pull_request.labels.*.name, 'preview') }}
permissions:
pull-requests: write
steps:
- uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7.1.0
- uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7
with:
script: |
github.rest.issues.removeLabel({
@@ -33,15 +31,3 @@ jobs:
repo: context.repo.repo,
name: 'preview'
})
- uses: mshick/add-pr-comment@b8f338c590a895d50bcbfa6c5859251edc8952fc # v2.8.2
if: ${{ github.event.pull_request.head.repo.fork }}
with:
message-id: 'preview-status'
message: 'PRs from forks cannot have preview environments.'
- uses: mshick/add-pr-comment@b8f338c590a895d50bcbfa6c5859251edc8952fc # v2.8.2
if: ${{ !github.event.pull_request.head.repo.fork }}
with:
message-id: 'preview-status'
message: 'Preview environment has been removed.'

View File

@@ -4,37 +4,28 @@ on:
release:
types: [published]
permissions: {}
permissions:
packages: write
jobs:
publish:
name: Publish `@immich/sdk`
runs-on: ubuntu-latest
permissions:
contents: read
defaults:
run:
working-directory: ./open-api/typescript-sdk
steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
with:
persist-credentials: false
- name: Setup pnpm
uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4.1.0
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
# Setup .npmrc file to publish to npm
- uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0
- uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
with:
node-version-file: './open-api/typescript-sdk/.nvmrc'
registry-url: 'https://registry.npmjs.org'
cache: 'pnpm'
cache-dependency-path: '**/pnpm-lock.yaml'
- name: Install deps
run: pnpm install --frozen-lockfile
run: npm ci
- name: Build
run: pnpm build
run: npm run build
- name: Publish
run: pnpm publish
run: npm publish
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}

View File

@@ -9,70 +9,57 @@ concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions: {}
jobs:
pre-job:
runs-on: ubuntu-latest
permissions:
contents: read
outputs:
should_run: ${{ steps.check.outputs.should_run }}
should_run: ${{ steps.found_paths.outputs.mobile == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps:
- name: Check what should run
id: check
uses: immich-app/devtools/actions/pre-job@24820aa4ef67959b0dcf69a438cccf00d7c7042b # pre-job-action-v1.0.1
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- id: found_paths
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3
with:
filters: |
mobile:
- 'mobile/**'
force-filters: |
- '.github/workflows/static_analysis.yml'
force-events: 'workflow_dispatch,release'
workflow:
- '.github/workflows/static_analysis.yml'
- name: Check if we should force jobs to run
id: should_force
run: echo "should_force=${{ steps.found_paths.outputs.workflow == 'true' || github.event_name == 'release' }}" >> "$GITHUB_OUTPUT"
mobile-dart-analyze:
name: Run Dart Code Analysis
needs: pre-job
if: ${{ fromJSON(needs.pre-job.outputs.should_run).mobile == true }}
if: ${{ needs.pre-job.outputs.should_run == 'true' }}
runs-on: ubuntu-latest
permissions:
contents: read
defaults:
run:
working-directory: ./mobile
steps:
- name: Checkout code
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
with:
persist-credentials: false
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Setup Flutter SDK
uses: subosito/flutter-action@fd55f4c5af5b953cc57a2be44cb082c8f6635e8e # v2.21.0
uses: subosito/flutter-action@e938fdf56512cc96ef2f93601a5a40bde3801046 # v2
with:
channel: 'stable'
flutter-version-file: ./mobile/pubspec.yaml
- name: Install dependencies
run: dart pub get
- name: Install DCM
uses: CQLabs/setup-dcm@8697ae0790c0852e964a6ef1d768d62a6675481a # v2.0.1
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
version: auto
working-directory: ./mobile
working-directory: ./mobile
- name: Generate translation file
run: dart run easy_localization:generate -S ../i18n && dart run bin/generate_keys.dart
run: make translation; dart format lib/generated/codegen_loader.g.dart
working-directory: ./mobile
- name: Run Build Runner
run: make build
- name: Generate platform API
run: make pigeon
working-directory: ./mobile
- name: Find file changes
uses: tj-actions/verify-changed-files@a1c6acee9df209257a246f2cc6ae8cb6581c1edf # v20.0.4
uses: tj-actions/verify-changed-files@a1c6acee9df209257a246f2cc6ae8cb6581c1edf # v20
id: verify-changed-files
with:
files: |
@@ -82,23 +69,19 @@ jobs:
- name: Verify files have not changed
if: steps.verify-changed-files.outputs.files_changed == 'true'
env:
CHANGED_FILES: ${{ steps.verify-changed-files.outputs.changed_files }}
run: |
echo "ERROR: Generated files not up to date! Run 'make build' and 'make pigeon' inside the mobile directory"
echo "Changed files: ${CHANGED_FILES}"
echo "ERROR: Generated files not up to date! Run make_build inside the mobile directory"
echo "Changed files: ${{ steps.verify-changed-files.outputs.changed_files }}"
exit 1
- name: Run dart analyze
run: dart analyze --fatal-infos
working-directory: ./mobile
- name: Run dart format
run: make format
run: dart format lib/ --set-exit-if-changed
working-directory: ./mobile
# TODO: Re-enable after upgrading custom_lint
# - name: Run dart custom_lint
# run: dart run custom_lint
# TODO: Use https://github.com/CQLabs/dcm-action
- name: Run DCM
run: dcm analyze lib --fatal-style --fatal-warnings
- name: Run dart custom_lint
run: dart run custom_lint
working-directory: ./mobile

File diff suppressed because it is too large Load Diff

View File

@@ -3,59 +3,55 @@ name: Weblate checks
on:
pull_request:
branches: [main]
types:
- opened
- synchronize
- ready_for_review
- auto_merge_enabled
- auto_merge_disabled
permissions: {}
env:
BOT_NAME: immich-push-o-matic
jobs:
pre-job:
runs-on: ubuntu-latest
permissions:
contents: read
outputs:
should_run: ${{ steps.check.outputs.should_run }}
should_run: ${{ steps.found_paths.outputs.i18n == 'true' && github.head_ref != 'chore/translations'}}
steps:
- name: Check what should run
id: check
uses: immich-app/devtools/actions/pre-job@24820aa4ef67959b0dcf69a438cccf00d7c7042b # pre-job-action-v1.0.1
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- id: found_paths
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3
with:
filters: |
i18n:
- 'i18n/!(en)**\.json'
exclude-branches: 'chore/translations'
skip-force-logic: 'true'
- name: Debug
run: |
echo "Should run: ${{ steps.found_paths.outputs.i18n == 'true' && github.head_ref != 'chore/translations'}}"
echo "Found i18n paths: ${{ steps.found_paths.outputs.i18n }}"
echo "Head ref: ${{ github.head_ref }}"
enforce-lock:
name: Check Weblate Lock
needs: [pre-job]
runs-on: ubuntu-latest
permissions: {}
if: ${{ fromJSON(needs.pre-job.outputs.should_run).i18n == true }}
if: ${{ needs.pre-job.outputs.should_run == 'true' }}
steps:
- name: Bot review status
env:
PR_NUMBER: ${{ github.event.pull_request.number || github.event.pull_request_review.pull_request.number }}
GH_TOKEN: ${{ github.token }}
- name: Check weblate lock
run: |
# Then check for APPROVED by the bot, if absent fail
gh pr view "$PR_NUMBER" --repo "$GITHUB_REPOSITORY" --json reviews | jq -e '.reviews | map(select(.author.login == env.BOT_NAME and .state == "APPROVED")) | length > 0' \
|| (echo "The push-o-matic bot has not approved this PR yet" && exit 1)
if [[ "false" = $(curl https://hosted.weblate.org/api/components/immich/immich/lock/ | jq .locked) ]]; then
exit 1
fi
- name: Find Pull Request
uses: juliangruber/find-pull-request-action@48b6133aa6c826f267ebd33aa2d29470f9d9e7d0 # v1
id: find-pr
with:
branch: chore/translations
- name: Fail if existing weblate PR
if: ${{ steps.find-pr.outputs.number }}
run: exit 1
success-check-lock:
name: Weblate Lock Check Success
needs: [enforce-lock]
runs-on: ubuntu-latest
permissions: {}
if: always()
steps:
- uses: immich-app/devtools/actions/success-check@68f10eb389bb02a3cf9d1156111964c549eb421b # 0.0.4
with:
needs: ${{ toJSON(needs) }}
- name: Any jobs failed?
if: ${{ contains(needs.*.result, 'failure') }}
run: exit 1
- name: All jobs passed or skipped
if: ${{ !(contains(needs.*.result, 'failure')) }}
run: echo "All jobs passed or skipped" && echo "${{ toJSON(needs.*.result) }}"

5
.gitignore vendored
View File

@@ -3,7 +3,6 @@
.DS_Store
.vscode/*
!.vscode/launch.json
!.vscode/extensions.json
.idea
docker/upload
@@ -18,13 +17,9 @@ mobile/libisar.dylib
mobile/openapi/test
mobile/openapi/doc
mobile/openapi/.openapi-generator/FILES
mobile/ios/build
open-api/typescript-sdk/build
mobile/android/fastlane/report.xml
mobile/ios/fastlane/report.xml
vite.config.js.timestamp-*
.pnpm-store
.devcontainer/library
.devcontainer/.env*

View File

@@ -1,18 +0,0 @@
module.exports = {
hooks: {
readPackage: (pkg) => {
if (!pkg.name) {
return pkg;
}
if (pkg.name === "exiftool-vendored") {
if (pkg.optionalDependencies["exiftool-vendored.pl"]) {
// make exiftool-vendored.pl a regular dependency
pkg.dependencies["exiftool-vendored.pl"] =
pkg.optionalDependencies["exiftool-vendored.pl"];
delete pkg.optionalDependencies["exiftool-vendored.pl"];
}
}
return pkg;
},
},
};

View File

@@ -1,10 +0,0 @@
{
"recommendations": [
"esbenp.prettier-vscode",
"svelte.svelte-vscode",
"dbaeumer.vscode-eslint",
"dart-code.flutter",
"dart-code.dart-code",
"dcmdev.dcm-vscode-extension"
]
}

4
.vscode/launch.json vendored
View File

@@ -7,7 +7,7 @@
"restart": true,
"port": 9231,
"name": "Immich API Server",
"remoteRoot": "/usr/src/app/server",
"remoteRoot": "/usr/src/app",
"localRoot": "${workspaceFolder}/server"
},
{
@@ -16,7 +16,7 @@
"restart": true,
"port": 9230,
"name": "Immich Workers",
"remoteRoot": "/usr/src/app/server",
"remoteRoot": "/usr/src/app",
"localRoot": "${workspaceFolder}/server"
}
]

79
.vscode/settings.json vendored
View File

@@ -1,64 +1,45 @@
{
"editor.formatOnSave": true,
"[javascript]": {
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.tabSize": 2,
"editor.formatOnSave": true
},
"[typescript]": {
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.tabSize": 2,
"editor.formatOnSave": true
},
"[css]": {
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.formatOnSave": true,
"editor.tabSize": 2,
"editor.formatOnSave": true
},
"[svelte]": {
"editor.defaultFormatter": "svelte.svelte-vscode",
"editor.tabSize": 2
},
"svelte.enable-ts-plugin": true,
"eslint.validate": [
"javascript",
"svelte"
],
"typescript.preferences.importModuleSpecifier": "non-relative",
"[dart]": {
"editor.defaultFormatter": "Dart-Code.dart-code",
"editor.formatOnSave": true,
"editor.selectionHighlight": false,
"editor.suggest.snippetsPreventQuickSuggestions": false,
"editor.suggestSelection": "first",
"editor.tabCompletion": "onlySnippets",
"editor.wordBasedSuggestions": "off"
"editor.wordBasedSuggestions": "off",
"editor.defaultFormatter": "Dart-Code.dart-code"
},
"[javascript]": {
"editor.codeActionsOnSave": {
"source.organizeImports": "explicit",
"source.removeUnusedImports": "explicit"
},
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.formatOnSave": true,
"editor.tabSize": 2
},
"[json]": {
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.formatOnSave": true,
"editor.tabSize": 2
},
"[jsonc]": {
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.formatOnSave": true,
"editor.tabSize": 2
},
"[svelte]": {
"editor.codeActionsOnSave": {
"source.organizeImports": "explicit",
"source.removeUnusedImports": "explicit"
},
"editor.defaultFormatter": "svelte.svelte-vscode",
"editor.formatOnSave": true,
"editor.tabSize": 2
},
"[typescript]": {
"editor.codeActionsOnSave": {
"source.organizeImports": "explicit",
"source.removeUnusedImports": "explicit"
},
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.formatOnSave": true,
"editor.tabSize": 2
},
"cSpell.words": ["immich"],
"editor.formatOnSave": true,
"eslint.validate": ["javascript", "svelte"],
"cSpell.words": [
"immich"
],
"explorer.fileNesting.enabled": true,
"explorer.fileNesting.patterns": {
"*.dart": "${capture}.g.dart,${capture}.gr.dart,${capture}.drift.dart",
"*.ts": "${capture}.spec.ts,${capture}.mock.ts",
"package.json": "package-lock.json, yarn.lock, pnpm-lock.yaml, bun.lockb, bun.lock, pnpm-workspace.yaml, .pnpmfile.cjs"
},
"svelte.enable-ts-plugin": true,
"typescript.preferences.importModuleSpecifier": "non-relative"
}
"*.dart": "${capture}.g.dart,${capture}.gr.dart,${capture}.drift.dart"
}
}

72
.vscode/tasks.json vendored
View File

@@ -1,72 +0,0 @@
{
"version": "2.0.0",
"tasks": [
{
"label": "Fix Permissions, Install Dependencies",
"type": "shell",
"command": "[ -f /immich-devcontainer/container-start.sh ] && /immich-devcontainer/container-start.sh || exit 0",
"presentation": {
"echo": true,
"reveal": "always",
"focus": false,
"panel": "dedicated",
"showReuseMessage": true,
"clear": false,
"group": "Devcontainer tasks",
"close": true
},
"runOptions": {
"runOn": "default"
},
"problemMatcher": []
},
{
"label": "Immich API Server (Nest)",
"dependsOn": ["Fix Permissions, Install Dependencies"],
"type": "shell",
"command": "[ -f /immich-devcontainer/container-start-backend.sh ] && /immich-devcontainer/container-start-backend.sh || exit 0",
"presentation": {
"echo": true,
"reveal": "always",
"focus": false,
"panel": "dedicated",
"showReuseMessage": true,
"clear": false,
"group": "Devcontainer tasks",
"close": true
},
"runOptions": {
"runOn": "default"
},
"problemMatcher": []
},
{
"label": "Immich Web Server (Vite)",
"dependsOn": ["Fix Permissions, Install Dependencies"],
"type": "shell",
"command": "[ -f /immich-devcontainer/container-start-frontend.sh ] && /immich-devcontainer/container-start-frontend.sh || exit 0",
"presentation": {
"echo": true,
"reveal": "always",
"focus": false,
"panel": "dedicated",
"showReuseMessage": true,
"clear": false,
"group": "Devcontainer tasks",
"close": true
},
"runOptions": {
"runOn": "default"
},
"problemMatcher": []
},
{
"label": "Immich Server and Web",
"dependsOn": ["Immich Web Server (Vite)", "Immich API Server (Nest)"],
"runOptions": {
"runOn": "folderOpen"
},
"problemMatcher": []
}
]
}

View File

@@ -1,7 +1,5 @@
/.github/ @bo0tzz
/docker/ @bo0tzz
/server/ @danieldietzler
/web/ @danieldietzler
/machine-learning/ @mertalev
/e2e/ @danieldietzler
/mobile/ @shenlong-tanwen

120
Makefile
View File

@@ -1,36 +1,24 @@
dev:
@trap 'make dev-down' EXIT; COMPOSE_BAKE=true docker compose -f ./docker/docker-compose.dev.yml up --remove-orphans
docker compose -f ./docker/docker-compose.dev.yml up --remove-orphans || make dev-down
dev-down:
docker compose -f ./docker/docker-compose.dev.yml down --remove-orphans
dev-update:
@trap 'make dev-down' EXIT; COMPOSE_BAKE=true docker compose -f ./docker/docker-compose.dev.yml up --build -V --remove-orphans
docker compose -f ./docker/docker-compose.dev.yml up --build -V --remove-orphans
dev-scale:
@trap 'make dev-down' EXIT; COMPOSE_BAKE=true docker compose -f ./docker/docker-compose.dev.yml up --build -V --scale immich-server=3 --remove-orphans
dev-docs:
npm --prefix docs run start
docker compose -f ./docker/docker-compose.dev.yml up --build -V --scale immich-server=3 --remove-orphans
.PHONY: e2e
e2e:
@trap 'make e2e-down' EXIT; COMPOSE_BAKE=true docker compose -f ./e2e/docker-compose.yml up --remove-orphans
e2e-update:
@trap 'make e2e-down' EXIT; COMPOSE_BAKE=true docker compose -f ./e2e/docker-compose.yml up --build -V --remove-orphans
e2e-down:
docker compose -f ./e2e/docker-compose.yml down --remove-orphans
docker compose -f ./e2e/docker-compose.yml up --build -V --remove-orphans
prod:
@trap 'make prod-down' EXIT; COMPOSE_BAKE=true docker compose -f ./docker/docker-compose.prod.yml up --build -V --remove-orphans
prod-down:
docker compose -f ./docker/docker-compose.prod.yml down --remove-orphans
docker compose -f ./docker/docker-compose.prod.yml up --build -V --remove-orphans
prod-scale:
@trap 'make prod-down' EXIT; COMPOSE_BAKE=true docker compose -f ./docker/docker-compose.prod.yml up --build -V --scale immich-server=3 --scale immich-microservices=3 --remove-orphans
docker compose -f ./docker/docker-compose.prod.yml up --build -V --scale immich-server=3 --scale immich-microservices=3 --remove-orphans
.PHONY: open-api
open-api:
@@ -43,7 +31,7 @@ open-api-typescript:
cd ./open-api && bash ./bin/generate-open-api.sh typescript
sql:
pnpm --filter immich run sync:sql
npm --prefix server run sync:sql
attach-server:
docker exec -it docker_immich-server_1 sh
@@ -51,59 +39,31 @@ attach-server:
renovate:
LOG_LEVEL=debug npx renovate --platform=local --repository-cache=reset
# Directories that need to be created for volumes or build output
VOLUME_DIRS = \
./.pnpm-store \
./web/.svelte-kit \
./web/node_modules \
./web/coverage \
./e2e/node_modules \
./docs/node_modules \
./server/node_modules \
./open-api/typescript-sdk/node_modules \
./.github/node_modules \
./node_modules \
./cli/node_modules
# Include .env file if it exists
-include docker/.env
MODULES = e2e server web cli sdk docs .github
# directory to package name mapping function
# cli = @immich/cli
# docs = documentation
# e2e = immich-e2e
# open-api/typescript-sdk = @immich/sdk
# server = immich
# web = immich-web
map-package = $(subst sdk,@immich/sdk,$(subst cli,@immich/cli,$(subst docs,documentation,$(subst e2e,immich-e2e,$(subst server,immich,$(subst web,immich-web,$1))))))
audit-%:
pnpm --filter $(call map-package,$*) audit fix
npm --prefix $(subst sdk,open-api/typescript-sdk,$*) audit fix
install-%:
pnpm --filter $(call map-package,$*) install $(if $(FROZEN),--frozen-lockfile) $(if $(OFFLINE),--offline)
npm --prefix $(subst sdk,open-api/typescript-sdk,$*) i
build-cli: build-sdk
build-web: build-sdk
build-%: install-%
pnpm --filter $(call map-package,$*) run build
npm --prefix $(subst sdk,open-api/typescript-sdk,$*) run build
format-%:
pnpm --filter $(call map-package,$*) run format:fix
npm --prefix $* run format:fix
lint-%:
pnpm --filter $(call map-package,$*) run lint:fix
lint-web:
pnpm --filter $(call map-package,$*) run lint:p
npm --prefix $* run lint:fix
check-%:
pnpm --filter $(call map-package,$*) run check
npm --prefix $* run check
check-web:
pnpm --filter immich-web run check:typescript
pnpm --filter immich-web run check:svelte
npm --prefix web run check:typescript
npm --prefix web run check:svelte
test-%:
pnpm --filter $(call map-package,$*) run test
npm --prefix $* run test
test-e2e:
docker compose -f ./e2e/docker-compose.yml build
pnpm --filter immich-e2e run test
pnpm --filter immich-e2e run test:web
npm --prefix e2e run test
npm --prefix e2e run test:web
test-medium:
docker run \
--rm \
@@ -113,39 +73,23 @@ test-medium:
-v ./server/tsconfig.json:/usr/src/app/tsconfig.json \
-e NODE_ENV=development \
immich-server:latest \
-c "pnpm test:medium -- --run"
-c "npm ci && npm run test:medium -- --run"
test-medium-dev:
docker exec -it immich_server /bin/sh -c "pnpm run test:medium"
docker exec -it immich_server /bin/sh -c "npm run test:medium"
install-all:
pnpm -r --filter '!documentation' install
build-all: $(foreach M,$(filter-out e2e docs .github,$(MODULES)),build-$M) ;
check-all:
pnpm -r --filter '!documentation' run "/^(check|check\:svelte|check\:typescript)$/"
lint-all:
pnpm -r --filter '!documentation' run lint:fix
format-all:
pnpm -r --filter '!documentation' run format:fix
audit-all:
pnpm -r --filter '!documentation' audit fix
hygiene-all: audit-all
pnpm -r --filter '!documentation' run "/(format:fix|check|check:svelte|check:typescript|sql)/"
test-all:
pnpm -r --filter '!documentation' run "/^test/"
build-all: $(foreach M,$(filter-out e2e .github,$(MODULES)),build-$M) ;
install-all: $(foreach M,$(MODULES),install-$M) ;
check-all: $(foreach M,$(filter-out sdk cli docs .github,$(MODULES)),check-$M) ;
lint-all: $(foreach M,$(filter-out sdk docs .github,$(MODULES)),lint-$M) ;
format-all: $(foreach M,$(filter-out sdk,$(MODULES)),format-$M) ;
audit-all: $(foreach M,$(MODULES),audit-$M) ;
hygiene-all: lint-all format-all check-all sql audit-all;
test-all: $(foreach M,$(filter-out sdk docs .github,$(MODULES)),test-$M) ;
clean:
find . -name "node_modules" -type d -prune -exec rm -rf {} +
find . -name "node_modules" -type d -prune -exec rm -rf '{}' +
find . -name "dist" -type d -prune -exec rm -rf '{}' +
find . -name "build" -type d -prune -exec rm -rf '{}' +
find . -name ".svelte-kit" -type d -prune -exec rm -rf '{}' +
find . -name "coverage" -type d -prune -exec rm -rf '{}' +
find . -name ".pnpm-store" -type d -prune -exec rm -rf '{}' +
command -v docker >/dev/null 2>&1 && docker compose -f ./docker/docker-compose.dev.yml down -v --remove-orphans || true
command -v docker >/dev/null 2>&1 && docker compose -f ./e2e/docker-compose.yml down -v --remove-orphans || true
setup-server-dev: install-server
setup-web-dev: install-sdk build-sdk install-web
find . -name "svelte-kit" -type d -prune -exec rm -rf '{}' +
docker compose -f ./docker/docker-compose.dev.yml rm -v -f || true
docker compose -f ./e2e/docker-compose.yml rm -v -f || true

View File

@@ -1 +1 @@
22.19.0
22.14.0

View File

@@ -1,14 +1,19 @@
FROM node:22.16.0-alpine3.20@sha256:2289fb1fba0f4633b08ec47b94a89c7e20b829fc5679f9b7b298eaa2f1ed8b7e AS core
FROM node:22.14.0-alpine3.20@sha256:40be979442621049f40b1d51a26b55e281246b5de4e5f51a18da7beb6e17e3f9 AS core
WORKDIR /usr/src/open-api/typescript-sdk
COPY open-api/typescript-sdk/package*.json open-api/typescript-sdk/tsconfig*.json ./
RUN npm ci
COPY open-api/typescript-sdk/ ./
RUN npm run build
WORKDIR /usr/src/app
COPY package* pnpm* .pnpmfile.cjs ./
COPY ./cli ./cli/
COPY ./open-api/typescript-sdk ./open-api/typescript-sdk/
RUN corepack enable pnpm && \
pnpm install --filter @immich/sdk --filter @immich/cli --frozen-lockfile && \
pnpm --filter @immich/sdk build && \
pnpm --filter @immich/cli build
COPY cli/package.json cli/package-lock.json ./
RUN npm ci
COPY cli .
RUN npm run build
WORKDIR /import
ENTRYPOINT ["node", "/usr/src/app/cli/dist"]
ENTRYPOINT ["node", "/usr/src/app/dist"]

View File

@@ -1,2 +0,0 @@
#!/usr/bin/env node
import '../dist/index.js';

4621
cli/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,11 +1,11 @@
{
"name": "@immich/cli",
"version": "2.2.90",
"version": "2.2.61",
"description": "Command Line Interface (CLI) for Immich",
"type": "module",
"exports": "./dist/index.js",
"bin": {
"immich": "./bin/immich"
"immich": "dist/index.js"
},
"license": "GNU Affero General Public License version 3",
"keywords": [
@@ -13,6 +13,7 @@
"cli"
],
"devDependencies": {
"@eslint/eslintrc": "^3.1.0",
"@eslint/js": "^9.8.0",
"@immich/sdk": "file:../open-api/typescript-sdk",
"@types/byte-size": "^8.1.0",
@@ -20,22 +21,22 @@
"@types/lodash-es": "^4.17.12",
"@types/micromatch": "^4.0.9",
"@types/mock-fs": "^4.13.1",
"@types/node": "^22.18.1",
"@types/node": "^22.14.0",
"@vitest/coverage-v8": "^3.0.0",
"byte-size": "^9.0.0",
"cli-progress": "^3.12.0",
"commander": "^12.0.0",
"eslint": "^9.14.0",
"eslint-config-prettier": "^10.1.8",
"eslint-config-prettier": "^10.0.0",
"eslint-plugin-prettier": "^5.1.3",
"eslint-plugin-unicorn": "^60.0.0",
"eslint-plugin-unicorn": "^57.0.0",
"globals": "^16.0.0",
"mock-fs": "^5.2.0",
"prettier": "^3.2.5",
"prettier-plugin-organize-imports": "^4.0.0",
"typescript": "^5.3.3",
"typescript-eslint": "^8.28.0",
"vite": "^7.0.0",
"vite": "^6.0.0",
"vite-tsconfig-paths": "^5.0.0",
"vitest": "^3.0.0",
"vitest-fetch-mock": "^0.4.0",
@@ -68,6 +69,6 @@
"micromatch": "^4.0.8"
},
"volta": {
"node": "22.19.0"
"node": "22.14.0"
}
}

View File

@@ -43,7 +43,6 @@ export interface UploadOptionsDto {
concurrency: number;
progress?: boolean;
watch?: boolean;
jsonOutput?: boolean;
}
class UploadFile extends File {
@@ -66,14 +65,8 @@ class UploadFile extends File {
const uploadBatch = async (files: string[], options: UploadOptionsDto) => {
const { newFiles, duplicates } = await checkForDuplicates(files, options);
const newAssets = await uploadFiles(newFiles, options);
if (options.jsonOutput) {
console.log(JSON.stringify({ newFiles, duplicates, newAssets }, undefined, 4));
}
await updateAlbums([...newAssets, ...duplicates], options);
await deleteFiles(
newAssets.map(({ filepath }) => filepath),
options,
);
await deleteFiles(newFiles, options);
};
export const startWatch = async (

View File

@@ -68,11 +68,6 @@ program
.env('IMMICH_UPLOAD_CONCURRENCY')
.default(4),
)
.addOption(
new Option('-j, --json-output', 'Output detailed information in json format')
.env('IMMICH_JSON_OUTPUT')
.default(false),
)
.addOption(new Option('--delete', 'Delete local assets after upload').env('IMMICH_DELETE_ASSETS'))
.addOption(new Option('--no-progress', 'Hide progress bars').env('IMMICH_PROGRESS_BAR').default(true))
.addOption(

View File

@@ -2,37 +2,37 @@
# Manual edits may be lost in future updates.
provider "registry.opentofu.org/cloudflare/cloudflare" {
version = "4.52.5"
constraints = "4.52.5"
version = "4.52.0"
constraints = "4.52.0"
hashes = [
"h1:+rfzF+16ZcWZWnTyW/p1HHTzYbPKX8Zt2nIFtR/+f+E=",
"h1:18bXaaOSq8MWKuMxo/4y7EB7/i7G90y5QsKHZRmkoDo=",
"h1:4vZVOpKeEQZsF2VrARRZFeL37Ed/gD4rRMtfnvWQres=",
"h1:BZOsTF83QPKXTAaYqxPKzdl1KRjk/L2qbPpFjM0w28A=",
"h1:CDuC+HXLvc1z6wkCRsSDcc/+QENIHEtssYshiWg3opA=",
"h1:DE+YFzLnqSe79pI2R4idRGx5QzLdrA7RXvngTkGfZ30=",
"h1:DfaJwH3Ml4yrRbdAY4AcDVy0QTQk5T3A622TXzS/u2E=",
"h1:EIDXP0W3kgIv2pecrFmqtK/DnlqkyckzBzhxKaXU+4A=",
"h1:EV4kYyaOnwGA0bh/3hU6Ezqnt1PFDxopH7i85e48IzY=",
"h1:M0iXabfzamU+MPDi0G9XACpbacFKMakmM+Z9HZ8HrsM=",
"h1:YWmCbGF/KbsrUzcYVBLscwLizidbp95TDQa0N2qpmVo=",
"h1:cxPcCB5gbrpUO1+IXkQYs1YTY50/0IlApCzGea0cwuQ=",
"h1:g6DldikTV2HXUu9uoeNY5FuLufgaYWF4ufgZg7wq62s=",
"h1:oi/Hrx9pwoQ+Z52CBC+rrowVH387EIj0qvnxQgDeI+0=",
"zh:1a3400cb38863b2585968d1876706bcfc67a148e1318a1d325c6c7704adc999b",
"zh:4c5062cb9e9da1676f06ae92b8370186d98976cc4c7030d3cd76df12af54282a",
"zh:52110f493b5f0587ef77a1cfd1a67001fd4c617b14c6502d732ab47352bdc2f7",
"zh:5aa536f9eaeb43823aaf2aa80e7d39b25ef2b383405ed034aa16a28b446a9238",
"zh:5cc39459a1c6be8a918f17054e4fbba573825ed5597dcada588fe99614d98a5b",
"zh:629ae6a7ba298815131da826474d199312d21cec53a4d5ded4fa56a692e6f072",
"zh:719cc7c75dc1d3eb30c22ff5102a017996d9788b948078c7e1c5b3446aeca661",
"zh:8698635a3ca04383c1e93b21d6963346bdae54d27177a48e4b1435b7f731731c",
"h1:2BEJyXJtYC4B4nda/WCYUmuJYDaYk88F8t1pwPzr0iQ=",
"h1:4IASk5SESeWKQ7JU0+M7KApuF5mZyklvwMXPBabim3c=",
"h1:5ImZxxALSnWfH/4EXw/wFirSmk5Tr0ACmcysy51AafE=",
"h1:6TJ3dxLSin4ZKBJLsZDn95H2ZYnGm8S7GGHvvXuuMQU=",
"h1:IzTUjg9kQ4N3qizP9CjYLeHwjsuGgtxwXvfUQWyOLcA=",
"h1:NTaOQfYINA0YTG/V1/9+SYtgX1it63+cBugj4WK4FWc=",
"h1:PXH48LuJn329sCfMXprdMDk51EZaWFyajVvS03qhQLs=",
"h1:Pi5M+GeoMSN2eJ6QnIeXjBf19O+rby/74CfB2ocpv20=",
"h1:ShXZ2ZjBvm3thfoPPzPT8+OhyismnydQVkUAfI8X12w=",
"h1:WQ9hu0Wge2msBbODfottCSKgu8oKUrw4Opz+fDPVVHk=",
"h1:Z5yXML2DE0uH9UU+M0ut9JMQAORcwVZz1CxBHzeBmao=",
"h1:jqI2qKknpleS3JDSplyGYHMu0u9K/tor1ZOjFwDgEMk=",
"h1:kgfutDh14Q5nw4eg6qGFamFxIiY8Ae0FPKRBLDOzpcI=",
"h1:zCAO7GZmfYhWb+i6TfqlqhMeDyPZWGio2IzEzAh3YTs=",
"zh:19be1a91c982b902c42aba47766860dfa5dc151eed1e95fd39ca642229381ef0",
"zh:1de451c4d1ecf7efbe67b6dace3426ba810711afdd644b0f1b870364c8ae91f8",
"zh:352b4a2120173298622e669258744554339d959ac3a95607b117a48ee4a83238",
"zh:3c6f1346d9154afbd2d558fabb4b0150fc8d559aa961254144fe1bc17fe6032f",
"zh:4c4c92d53fb535b1e0eff26f222bbd627b97d3b4c891ec9c321268676d06152f",
"zh:53276f68006c9ceb7cdb10a6ccf91a5c1eadd1407a28edb5741e84e88d7e29e8",
"zh:7925a97773948171a63d4f65bb81ee92fd6d07a447e36012977313293a5435c9",
"zh:7dfb0a4496cfe032437386d0a2cd9229a1956e9c30bd920923c141b0f0440060",
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
"zh:8a9993f1dcadf1dd6ca43b23348abe374605d29945a2fafc07fb3457644e6a54",
"zh:b1b9a1e6bcc24d5863a664a411d2dc906373ae7a2399d2d65548ce7377057852",
"zh:b270184cdeec277218e84b94cb136fead753da717f9b9dc378e51907f3f00bb0",
"zh:dff2bc10071210181726ce270f954995fe42c696e61e2e8f874021fed02521e5",
"zh:e8e87b40b6a87dc097b0fdc20d3f725cec0d82abc9cc3755c1f89f8f6e8b0036",
"zh:ee964a6573d399a5dd22ce328fb38ca1207797a02248f14b2e4913ee390e7803",
"zh:8d4aa79f0a414bb4163d771063c70cd991c8fac6c766e685bac2ee12903c5bd6",
"zh:a67540c13565616a7e7e51ee9366e88b0dc60046e1d75c72680e150bd02725bb",
"zh:a936383a4767f5393f38f622e92bf2d0c03fe04b69c284951f27345766c7b31b",
"zh:d4887d73c466ff036eecf50ad6404ba38fd82ea4855296b1846d244b0f13c380",
"zh:e9093c8bd5b6cd99c81666e315197791781b8f93afa14fc2e0f732d1bb2a44b7",
"zh:efd3b3f1ec59a37f635aa1d4efcf178734c2fcf8ddb0d56ea690bec342da8672",
]
}

View File

@@ -5,7 +5,7 @@ terraform {
required_providers {
cloudflare = {
source = "cloudflare/cloudflare"
version = "4.52.5"
version = "4.52.0"
}
}
}

View File

@@ -2,37 +2,37 @@
# Manual edits may be lost in future updates.
provider "registry.opentofu.org/cloudflare/cloudflare" {
version = "4.52.5"
constraints = "4.52.5"
version = "4.52.0"
constraints = "4.52.0"
hashes = [
"h1:+rfzF+16ZcWZWnTyW/p1HHTzYbPKX8Zt2nIFtR/+f+E=",
"h1:18bXaaOSq8MWKuMxo/4y7EB7/i7G90y5QsKHZRmkoDo=",
"h1:4vZVOpKeEQZsF2VrARRZFeL37Ed/gD4rRMtfnvWQres=",
"h1:BZOsTF83QPKXTAaYqxPKzdl1KRjk/L2qbPpFjM0w28A=",
"h1:CDuC+HXLvc1z6wkCRsSDcc/+QENIHEtssYshiWg3opA=",
"h1:DE+YFzLnqSe79pI2R4idRGx5QzLdrA7RXvngTkGfZ30=",
"h1:DfaJwH3Ml4yrRbdAY4AcDVy0QTQk5T3A622TXzS/u2E=",
"h1:EIDXP0W3kgIv2pecrFmqtK/DnlqkyckzBzhxKaXU+4A=",
"h1:EV4kYyaOnwGA0bh/3hU6Ezqnt1PFDxopH7i85e48IzY=",
"h1:M0iXabfzamU+MPDi0G9XACpbacFKMakmM+Z9HZ8HrsM=",
"h1:YWmCbGF/KbsrUzcYVBLscwLizidbp95TDQa0N2qpmVo=",
"h1:cxPcCB5gbrpUO1+IXkQYs1YTY50/0IlApCzGea0cwuQ=",
"h1:g6DldikTV2HXUu9uoeNY5FuLufgaYWF4ufgZg7wq62s=",
"h1:oi/Hrx9pwoQ+Z52CBC+rrowVH387EIj0qvnxQgDeI+0=",
"zh:1a3400cb38863b2585968d1876706bcfc67a148e1318a1d325c6c7704adc999b",
"zh:4c5062cb9e9da1676f06ae92b8370186d98976cc4c7030d3cd76df12af54282a",
"zh:52110f493b5f0587ef77a1cfd1a67001fd4c617b14c6502d732ab47352bdc2f7",
"zh:5aa536f9eaeb43823aaf2aa80e7d39b25ef2b383405ed034aa16a28b446a9238",
"zh:5cc39459a1c6be8a918f17054e4fbba573825ed5597dcada588fe99614d98a5b",
"zh:629ae6a7ba298815131da826474d199312d21cec53a4d5ded4fa56a692e6f072",
"zh:719cc7c75dc1d3eb30c22ff5102a017996d9788b948078c7e1c5b3446aeca661",
"zh:8698635a3ca04383c1e93b21d6963346bdae54d27177a48e4b1435b7f731731c",
"h1:2BEJyXJtYC4B4nda/WCYUmuJYDaYk88F8t1pwPzr0iQ=",
"h1:4IASk5SESeWKQ7JU0+M7KApuF5mZyklvwMXPBabim3c=",
"h1:5ImZxxALSnWfH/4EXw/wFirSmk5Tr0ACmcysy51AafE=",
"h1:6TJ3dxLSin4ZKBJLsZDn95H2ZYnGm8S7GGHvvXuuMQU=",
"h1:IzTUjg9kQ4N3qizP9CjYLeHwjsuGgtxwXvfUQWyOLcA=",
"h1:NTaOQfYINA0YTG/V1/9+SYtgX1it63+cBugj4WK4FWc=",
"h1:PXH48LuJn329sCfMXprdMDk51EZaWFyajVvS03qhQLs=",
"h1:Pi5M+GeoMSN2eJ6QnIeXjBf19O+rby/74CfB2ocpv20=",
"h1:ShXZ2ZjBvm3thfoPPzPT8+OhyismnydQVkUAfI8X12w=",
"h1:WQ9hu0Wge2msBbODfottCSKgu8oKUrw4Opz+fDPVVHk=",
"h1:Z5yXML2DE0uH9UU+M0ut9JMQAORcwVZz1CxBHzeBmao=",
"h1:jqI2qKknpleS3JDSplyGYHMu0u9K/tor1ZOjFwDgEMk=",
"h1:kgfutDh14Q5nw4eg6qGFamFxIiY8Ae0FPKRBLDOzpcI=",
"h1:zCAO7GZmfYhWb+i6TfqlqhMeDyPZWGio2IzEzAh3YTs=",
"zh:19be1a91c982b902c42aba47766860dfa5dc151eed1e95fd39ca642229381ef0",
"zh:1de451c4d1ecf7efbe67b6dace3426ba810711afdd644b0f1b870364c8ae91f8",
"zh:352b4a2120173298622e669258744554339d959ac3a95607b117a48ee4a83238",
"zh:3c6f1346d9154afbd2d558fabb4b0150fc8d559aa961254144fe1bc17fe6032f",
"zh:4c4c92d53fb535b1e0eff26f222bbd627b97d3b4c891ec9c321268676d06152f",
"zh:53276f68006c9ceb7cdb10a6ccf91a5c1eadd1407a28edb5741e84e88d7e29e8",
"zh:7925a97773948171a63d4f65bb81ee92fd6d07a447e36012977313293a5435c9",
"zh:7dfb0a4496cfe032437386d0a2cd9229a1956e9c30bd920923c141b0f0440060",
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
"zh:8a9993f1dcadf1dd6ca43b23348abe374605d29945a2fafc07fb3457644e6a54",
"zh:b1b9a1e6bcc24d5863a664a411d2dc906373ae7a2399d2d65548ce7377057852",
"zh:b270184cdeec277218e84b94cb136fead753da717f9b9dc378e51907f3f00bb0",
"zh:dff2bc10071210181726ce270f954995fe42c696e61e2e8f874021fed02521e5",
"zh:e8e87b40b6a87dc097b0fdc20d3f725cec0d82abc9cc3755c1f89f8f6e8b0036",
"zh:ee964a6573d399a5dd22ce328fb38ca1207797a02248f14b2e4913ee390e7803",
"zh:8d4aa79f0a414bb4163d771063c70cd991c8fac6c766e685bac2ee12903c5bd6",
"zh:a67540c13565616a7e7e51ee9366e88b0dc60046e1d75c72680e150bd02725bb",
"zh:a936383a4767f5393f38f622e92bf2d0c03fe04b69c284951f27345766c7b31b",
"zh:d4887d73c466ff036eecf50ad6404ba38fd82ea4855296b1846d244b0f13c380",
"zh:e9093c8bd5b6cd99c81666e315197791781b8f93afa14fc2e0f732d1bb2a44b7",
"zh:efd3b3f1ec59a37f635aa1d4efcf178734c2fcf8ddb0d56ea690bec342da8672",
]
}

View File

@@ -5,7 +5,7 @@ terraform {
required_providers {
cloudflare = {
source = "cloudflare/cloudflare"
version = "4.52.5"
version = "4.52.0"
}
}
}

View File

@@ -16,31 +16,23 @@ name: immich-dev
services:
immich-server:
container_name: immich_server
command: ['immich-dev']
command: ['/usr/src/app/bin/immich-dev']
image: immich-server-dev:latest
# extends:
# file: hwaccel.transcoding.yml
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
build:
context: ../
dockerfile: server/Dockerfile.dev
dockerfile: server/Dockerfile
target: dev
restart: unless-stopped
volumes:
- ..:/usr/src/app
- ${UPLOAD_LOCATION}/photos:/data
- ../server:/usr/src/app
- ../open-api:/usr/src/open-api
- ${UPLOAD_LOCATION}/photos:/usr/src/app/upload
- ${UPLOAD_LOCATION}/photos/upload:/usr/src/app/upload/upload
- /usr/src/app/node_modules
- /etc/localtime:/etc/localtime:ro
- pnpm-store:/usr/src/app/.pnpm-store
- server-node_modules:/usr/src/app/server/node_modules
- web-node_modules:/usr/src/app/web/node_modules
- github-node_modules:/usr/src/app/.github/node_modules
- cli-node_modules:/usr/src/app/cli/node_modules
- docs-node_modules:/usr/src/app/docs/node_modules
- e2e-node_modules:/usr/src/app/e2e/node_modules
- sdk-node_modules:/usr/src/app/open-api/typescript-sdk/node_modules
- app-node_modules:/usr/src/app/node_modules
- sveltekit:/usr/src/app/web/.svelte-kit
- coverage:/usr/src/app/web/coverage
env_file:
- .env
environment:
@@ -56,7 +48,7 @@ services:
IMMICH_THIRD_PARTY_SOURCE_URL: https://github.com/immich-app/immich/
IMMICH_THIRD_PARTY_BUG_FEATURE_URL: https://github.com/immich-app/immich/issues
IMMICH_THIRD_PARTY_DOCUMENTATION_URL: https://immich.app/docs
IMMICH_THIRD_PARTY_SUPPORT_URL: https://immich.app/docs/community-guides
IMMICH_THIRD_PARTY_SUPPORT_URL: https://immich.app/docs/third-party
ulimits:
nofile:
soft: 1048576
@@ -66,47 +58,37 @@ services:
- 9231:9231
- 2283:2283
depends_on:
redis:
condition: service_started
database:
condition: service_started
- redis
- database
healthcheck:
disable: false
immich-web:
container_name: immich_web
image: immich-web-dev:latest
# Needed for rootless docker setup, see https://github.com/moby/moby/issues/45919
# user: 0:0
build:
context: ../
dockerfile: server/Dockerfile.dev
target: dev
command: ['immich-web']
context: ../web
command: ['/usr/src/app/bin/immich-web']
env_file:
- .env
ports:
- 3000:3000
- 24678:24678
volumes:
- ..:/usr/src/app
- pnpm-store:/usr/src/app/.pnpm-store
- server-node_modules:/usr/src/app/server/node_modules
- web-node_modules:/usr/src/app/web/node_modules
- github-node_modules:/usr/src/app/.github/node_modules
- cli-node_modules:/usr/src/app/cli/node_modules
- docs-node_modules:/usr/src/app/docs/node_modules
- e2e-node_modules:/usr/src/app/e2e/node_modules
- sdk-node_modules:/usr/src/app/open-api/typescript-sdk/node_modules
- app-node_modules:/usr/src/app/node_modules
- sveltekit:/usr/src/app/web/.svelte-kit
- coverage:/usr/src/app/web/coverage
- ../web:/usr/src/app
- ../i18n:/usr/src/i18n
- ../open-api/:/usr/src/open-api/
# - ../../ui:/usr/ui
- /usr/src/app/node_modules
ulimits:
nofile:
soft: 1048576
hard: 1048576
restart: unless-stopped
depends_on:
immich-server:
condition: service_started
- immich-server
immich-machine-learning:
container_name: immich_machine_learning
@@ -134,13 +116,13 @@ services:
redis:
container_name: immich_redis
image: docker.io/valkey/valkey:8-bookworm@sha256:fea8b3e67b15729d4bb70589eb03367bab9ad1ee89c876f54327fc7c6e618571
image: docker.io/valkey/valkey:8-bookworm@sha256:42cba146593a5ea9a622002c1b7cba5da7be248650cbb64ecb9c6c33d29794b1
healthcheck:
test: redis-cli ping || exit 1
database:
container_name: immich_postgres
image: ghcr.io/immich-app/postgres:14-vectorchord0.4.3-pgvectors0.2.0@sha256:c44be5f2871c59362966d71eab4268170eb6f5653c0e6170184e72b38ffdf107
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52
env_file:
- .env
environment:
@@ -152,7 +134,25 @@ services:
- ${UPLOAD_LOCATION}/postgres:/var/lib/postgresql/data
ports:
- 5432:5432
shm_size: 128mb
healthcheck:
test: >-
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1;
Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align
--command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')";
echo "checksum failure count is $$Chksum";
[ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command: >-
postgres
-c shared_preload_libraries=vectors.so
-c 'search_path="$$user", public, vectors'
-c logging_collector=on
-c max_wal_size=2GB
-c shared_buffers=512MB
-c wal_compression=on
# set IMMICH_TELEMETRY_INCLUDE=all in .env to enable metrics
# immich-prometheus:
# container_name: immich_prometheus
@@ -178,14 +178,3 @@ volumes:
model-cache:
prometheus-data:
grafana-data:
pnpm-store:
server-node_modules:
web-node_modules:
github-node_modules:
cli-node_modules:
docs-node_modules:
e2e-node_modules:
sdk-node_modules:
app-node_modules:
sveltekit:
coverage:

View File

@@ -20,7 +20,7 @@ services:
context: ../
dockerfile: server/Dockerfile
volumes:
- ${UPLOAD_LOCATION}/photos:/data
- ${UPLOAD_LOCATION}/photos:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
env_file:
- .env
@@ -56,14 +56,14 @@ services:
redis:
container_name: immich_redis
image: docker.io/valkey/valkey:8-bookworm@sha256:fea8b3e67b15729d4bb70589eb03367bab9ad1ee89c876f54327fc7c6e618571
image: docker.io/valkey/valkey:8-bookworm@sha256:42cba146593a5ea9a622002c1b7cba5da7be248650cbb64ecb9c6c33d29794b1
healthcheck:
test: redis-cli ping || exit 1
restart: always
database:
container_name: immich_postgres
image: ghcr.io/immich-app/postgres:14-vectorchord0.4.3-pgvectors0.2.0@sha256:c44be5f2871c59362966d71eab4268170eb6f5653c0e6170184e72b38ffdf107
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52
env_file:
- .env
environment:
@@ -75,7 +75,14 @@ services:
- ${UPLOAD_LOCATION}/postgres:/var/lib/postgresql/data
ports:
- 5432:5432
shm_size: 128mb
healthcheck:
test: >-
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1; Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command: >-
postgres -c shared_preload_libraries=vectors.so -c 'search_path="$$user", public, vectors' -c logging_collector=on -c max_wal_size=2GB -c shared_buffers=512MB -c wal_compression=on
restart: always
# set IMMICH_TELEMETRY_INCLUDE=all in .env to enable metrics
@@ -83,7 +90,7 @@ services:
container_name: immich_prometheus
ports:
- 9090:9090
image: prom/prometheus@sha256:63805ebb8d2b3920190daf1cb14a60871b16fd38bed42b857a3182bc621f4996
image: prom/prometheus@sha256:502ad90314c7485892ce696cb14a99fceab9fc27af29f4b427f41bd39701a199
volumes:
- ./prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus-data:/prometheus
@@ -92,10 +99,10 @@ services:
# add data source for http://immich-prometheus:9090 to get started
immich-grafana:
container_name: immich_grafana
command: ['./run.sh', '-disable-reporting']
command: [ './run.sh', '-disable-reporting' ]
ports:
- 3000:3000
image: grafana/grafana:12.1.1-ubuntu@sha256:d1da838234ff2de93e0065ee1bf0e66d38f948dcc5d718c25fa6237e14b4424a
image: grafana/grafana:11.6.0-ubuntu@sha256:fd8fa48213c624e1a95122f1d93abbf1cf1cbe85fc73212c1e599dbd76c63ff8
volumes:
- grafana-data:/var/lib/grafana

View File

@@ -18,7 +18,7 @@ services:
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
volumes:
# Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file
- ${UPLOAD_LOCATION}:/data
- ${UPLOAD_LOCATION}:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
env_file:
- .env
@@ -49,25 +49,30 @@ services:
redis:
container_name: immich_redis
image: docker.io/valkey/valkey:8-bookworm@sha256:fea8b3e67b15729d4bb70589eb03367bab9ad1ee89c876f54327fc7c6e618571
image: docker.io/valkey/valkey:8-bookworm@sha256:42cba146593a5ea9a622002c1b7cba5da7be248650cbb64ecb9c6c33d29794b1
healthcheck:
test: redis-cli ping || exit 1
restart: always
database:
container_name: immich_postgres
image: ghcr.io/immich-app/postgres:14-vectorchord0.4.3-pgvectors0.2.0@sha256:c44be5f2871c59362966d71eab4268170eb6f5653c0e6170184e72b38ffdf107
image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52
environment:
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_DB: ${DB_DATABASE_NAME}
POSTGRES_INITDB_ARGS: '--data-checksums'
# Uncomment the DB_STORAGE_TYPE: 'HDD' var if your database isn't stored on SSDs
# DB_STORAGE_TYPE: 'HDD'
volumes:
# Do not edit the next line. If you want to change the database storage location on your system, edit the value of DB_DATA_LOCATION in the .env file
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data
shm_size: 128mb
healthcheck:
test: >-
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1; Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command: >-
postgres -c shared_preload_libraries=vectors.so -c 'search_path="$$user", public, vectors' -c logging_collector=on -c max_wal_size=2GB -c shared_buffers=512MB -c wal_compression=on
restart: always
volumes:

4
docs/.gitignore vendored
View File

@@ -18,6 +18,4 @@
npm-debug.log*
yarn-debug.log*
yarn-error.log*
yarn.lock
/static/openapi.json
yarn.lock

View File

@@ -1 +1 @@
22.19.0
22.14.0

View File

@@ -5,13 +5,13 @@ This website is built using [Docusaurus](https://docusaurus.io/), a modern stati
### Installation
```
$ pnpm install
$ npm install
```
### Local Development
```
$ pnpm run start
$ npm run start
```
This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server.
@@ -19,7 +19,7 @@ This command starts a local development server and opens up a browser window. Mo
### Build
```
$ pnpm run build
$ npm run build
```
This command generates static content into the `build` directory and can be served using any static contents hosting service.
@@ -29,13 +29,13 @@ This command generates static content into the `build` directory and can be serv
Using SSH:
```
$ USE_SSH=true pnpm run deploy
$ USE_SSH=true npm run deploy
```
Not using SSH:
```
$ GIT_USER=<Your GitHub username> pnpm run deploy
$ GIT_USER=<Your GitHub username> npm run deploy
```
If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch.

View File

@@ -1,31 +1,5 @@
# FAQ
## Commercial Guidelines
### Are you open to commercial partnerships and collaborations?
We are working to commercialize Immich and we'd love for you to help us by making Immich better. FUTO is dedicated to developing sustainable models for developing open source software for our customers. We want our customers to be delighted by the products our engineers deliver, and we want our engineers to be paid when they succeed.
If you wish to use Immich in a commercial product not owned by FUTO, we have the following requirements:
- Plugin Integrations: Integrations for other platforms are typically approved, provided proper notification is given.
- Reseller Partnerships: Must adhere to the guidelines outlined below regarding trademark usage, and proper representation.
- Strategic Collaborations: We welcome discussions about mutually beneficial partnerships that enhance the value proposition for both organizations.
### What are your guidelines for resellers and trademark usage?
For organizations seeking to resell Immich, we have established the following guidelines to protect our brand integrity and ensure proper representation.
- We request that resellers do not display our trademarks on their websites or marketing materials. If such usage is discovered, we will contact you to request removal.
- Do not misrepresent your reseller site or services as being officially affiliated with or endorsed by Immich or our development team.
- For small resellers who wish to contribute financially to Immich's development, we recommend directing your customers to purchase licenses directy from us rather than attempting to broker revenue-sharing arrangements. We ask that you refrain from misrepresenting reseller activities as directly supporting our development work.
When in doubt or if you have an edge case scenario, we encourage you to contact us directly via email to discuss the use of our trademark. We can provide clear guidance on what is acceptable and what is not. You can reach out at: questions@immich.app
## User
### How can I reset the admin password?
@@ -206,7 +180,7 @@ services:
...
volumes:
# Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file
- ${UPLOAD_LOCATION}:/data
- ${UPLOAD_LOCATION}:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
+ - originals:/usr/src/app/originals
...
@@ -516,7 +490,7 @@ You can also scan the Postgres database file structure for errors:
<details>
<summary>Scan for file structure errors</summary>
```bash
docker exec -it immich_postgres pg_amcheck --username=<DB_USERNAME> --heapallindexed --parent-check --rootdescend --progress --all --install-missing
docker exec -it immich_postgres pg_amcheck --username=postgres --heapallindexed --parent-check --rootdescend --progress --all --install-missing
```
A normal result will end something like this and return with an exit code of `0`:

View File

@@ -23,32 +23,23 @@ Refer to the official [postgres documentation](https://www.postgresql.org/docs/c
It is not recommended to directly backup the `DB_DATA_LOCATION` folder. Doing so while the database is running can lead to a corrupted backup that cannot be restored.
:::
### Automatic Database Dumps
### Automatic Database Backups
:::warning
The automatic database dumps can be used to restore the database in the event of damage to the Postgres database files.
There is no monitoring for these dumps and you will not be notified if they are unsuccessful.
:::
For convenience, Immich will automatically create database backups by default. The backups are stored in `UPLOAD_LOCATION/backups`.
As mentioned above, you should make your own backup of these together with the asset folders as noted below.
You can adjust the schedule and amount of kept backups in the [admin settings](http://my.immich.app/admin/system-settings?isOpen=backup).
By default, Immich will keep the last 14 backups and create a new backup every day at 2:00 AM.
:::caution
The database dumps do **NOT** contain any pictures or videos, only metadata. They are only usable with a copy of the other files in `UPLOAD_LOCATION` as outlined below.
:::
#### Trigger Backup
For disaster-recovery purposes, Immich will automatically create database dumps. The dumps are stored in `UPLOAD_LOCATION/backups`.
Please be sure to make your own, independent backup of the database together with the asset folders as noted below.
You can adjust the schedule and amount of kept database dumps in the [admin settings](http://my.immich.app/admin/system-settings?isOpen=backup).
By default, Immich will keep the last 14 database dumps and create a new dump every day at 2:00 AM.
#### Trigger Dump
You are able to trigger a database dump in the [admin job status page](http://my.immich.app/admin/jobs-status).
Visit the page, open the "Create job" modal from the top right, select "Create Database Dump" and click "Confirm".
A job will run and trigger a dump, you can verify this worked correctly by checking the logs or the `backups/` folder.
This dumps will count towards the last `X` dumps that will be kept based on your settings.
You are able to trigger a backup in the [admin job status page](http://my.immich.app/admin/jobs-status).
Visit the page, open the "Create job" modal from the top right, select "Backup Database" and click "Confirm".
A job will run and trigger a backup, you can verify this worked correctly by checking the logs or the backup folder.
This backup will count towards the last X backups that will be kept based on your settings.
#### Restoring
We hope to make restoring simpler in future versions, for now you can find the database dumps in the `UPLOAD_LOCATION/backups` folder on your host.
We hope to make restoring simpler in future versions, for now you can find the backups in the `UPLOAD_LOCATION/backups` folder on your host.
Then please follow the steps in the following section for restoring the database.
### Manual Backup and Restore
@@ -57,7 +48,7 @@ Then please follow the steps in the following section for restoring the database
<TabItem value="Linux system" label="Linux system" default>
```bash title='Backup'
docker exec -t immich_postgres pg_dumpall --clean --if-exists --username=<DB_USERNAME> | gzip > "/path/to/backup/dump.sql.gz"
docker exec -t immich_postgres pg_dumpall --clean --if-exists --username=postgres | gzip > "/path/to/backup/dump.sql.gz"
```
```bash title='Restore'
@@ -79,7 +70,7 @@ docker compose up -d # Start remainder of Immich apps
<TabItem value="Windows system (PowerShell)" label="Windows system (PowerShell)">
```powershell title='Backup'
[System.IO.File]::WriteAllLines("C:\absolute\path\to\backup\dump.sql", (docker exec -t immich_postgres pg_dumpall --clean --if-exists --username=<DB_USERNAME>))
[System.IO.File]::WriteAllLines("C:\absolute\path\to\backup\dump.sql", (docker exec -t immich_postgres pg_dumpall --clean --if-exists --username=postgres))
```
```powershell title='Restore'
@@ -150,10 +141,12 @@ for more info read the [release notes](https://github.com/immich-app/immich/rele
- Preview images (small thumbnails and large previews) for each asset and thumbnails for recognized faces.
- Stored in `UPLOAD_LOCATION/thumbs/<userID>`.
- **Encoded Assets:**
- Videos that have been re-encoded from the original for wider compatibility. The original is not removed.
- Stored in `UPLOAD_LOCATION/encoded-video/<userID>`.
- **Postgres**
- The Immich database containing all the information to allow the system to function properly.
**Note:** This folder will only appear to users who have made the changes mentioned in [v1.102.0](https://github.com/immich-app/immich/discussions/8930) (an optional, non-mandatory change) or who started with this version.
- Stored in `DB_DATA_LOCATION`.
@@ -199,6 +192,7 @@ When you turn off the storage template engine, it will leave the assets in `UPLO
- Temporarily located in `UPLOAD_LOCATION/upload/<userID>`.
- Transferred to `UPLOAD_LOCATION/library/<userID>` upon successful upload.
- **Postgres**
- The Immich database containing all the information to allow the system to function properly.
**Note:** This folder will only appear to users who have made the changes mentioned in [v1.102.0](https://github.com/immich-app/immich/discussions/8930) (an optional, non-mandatory change) or who started with this version.
- Stored in `DB_DATA_LOCATION`.
@@ -216,10 +210,3 @@ When you turn off the storage template engine, it will leave the assets in `UPLO
Do not touch the files inside these folders under any circumstances except taking a backup. Changing or removing an asset can cause untracked and missing files.
You can think of it as App-Which-Must-Not-Be-Named, the only access to viewing, changing and deleting assets is only through the mobile or browser interface.
:::
## Backup ordering
A backup of Immich should contain both the database and the asset files. When backing these up it's possible for them to get out of sync, potentially resulting in broken assets after you restore.
The best way of dealing with this is to stop the immich-server container while you take a backup. If nothing is changing then the backup will always be in sync.
If stopping the container is not an option, then the recommended order is to back up the database first, and the filesystem second. This way, the worst case scenario is that there are files on the filesystem that the database doesn't know about. If necessary, these can be (re)uploaded manually after a restore. If the backup is done the other way around, with the filesystem first and the database second, it's possible for the restored database to reference files that aren't in the filesystem backup, thus resulting in broken assets.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 33 KiB

View File

@@ -46,12 +46,6 @@ services:
When a new asset is uploaded it kicks off a series of jobs, which include metadata extraction, thumbnail generation, machine learning tasks, and storage template migration, if enabled. To view the status of a job navigate to the Administration -> Jobs page.
Additionally, some jobs run on a schedule, which is every night at midnight. This schedule, with the exception of [External Libraries](/docs/features/libraries) scanning, cannot be changed.
<img src={require('./img/admin-jobs.webp').default} width="60%" title="Admin jobs" />
Additionally, some jobs (such as memories generation) run on a schedule, which is every night at midnight by default. To change when they run or enable/disable a job navigate to System Settings -> [Nightly Tasks Settings](https://my.immich.app/admin/system-settings?isOpen=nightly-tasks).
<img src={require('./img/admin-nightly-tasks.webp').default} width="60%" title="Admin nightly tasks" />
:::note
Some jobs ([External Libraries](/docs/features/libraries) scanning, Database Dump) are configured in their own sections in System Settings.
:::

View File

@@ -10,7 +10,7 @@ Unable to set `app.immich:///oauth-callback` as a valid redirect URI? See [Mobil
Immich supports 3rd party authentication via [OpenID Connect][oidc] (OIDC), an identity layer built on top of OAuth2. OIDC is supported by most identity providers, including:
- [Authentik](https://integrations.goauthentik.io/media/immich/)
- [Authentik](https://goauthentik.io/integrations/sources/oauth/#openid-connect)
- [Authelia](https://www.authelia.com/integration/openid-connect/immich/)
- [Okta](https://www.okta.com/openid-connect/)
- [Google](https://developers.google.com/identity/openid-connect/openid-connect)
@@ -20,6 +20,7 @@ Immich supports 3rd party authentication via [OpenID Connect][oidc] (OIDC), an i
Before enabling OAuth in Immich, a new client application needs to be configured in the 3rd-party authentication server. While the specifics of this setup vary from provider to provider, the general approach should be the same.
1. Create a new (Client) Application
1. The **Provider** type should be `OpenID Connect` or `OAuth2`
2. The **Client type** should be `Confidential`
3. The **Application** type should be `Web`
@@ -28,6 +29,7 @@ Before enabling OAuth in Immich, a new client application needs to be configured
2. Configure Redirect URIs/Origins
The **Sign-in redirect URIs** should include:
- `app.immich:///oauth-callback` - for logging in with OAuth from the [Mobile App](/docs/features/mobile-app.mdx)
- `http://DOMAIN:PORT/auth/login` - for logging in with OAuth from the Web Client
- `http://DOMAIN:PORT/user-settings` - for manually linking OAuth in the Web Client
@@ -35,17 +37,21 @@ Before enabling OAuth in Immich, a new client application needs to be configured
Redirect URIs should contain all the domains you will be using to access Immich. Some examples include:
Mobile
- `app.immich:///oauth-callback` (You **MUST** include this for iOS and Android mobile apps to work properly)
Localhost
- `http://localhost:2283/auth/login`
- `http://localhost:2283/user-settings`
Local IP
- `http://192.168.0.200:2283/auth/login`
- `http://192.168.0.200:2283/user-settings`
Hostname
- `https://immich.example.com/auth/login`
- `https://immich.example.com/user-settings`
@@ -62,9 +68,8 @@ Once you have a new OAuth client application configured, Immich can be configure
| Scope | string | openid email profile | Full list of scopes to send with the request (space delimited) |
| Signing Algorithm | string | RS256 | The algorithm used to sign the id token (examples: RS256, HS256) |
| Storage Label Claim | string | preferred_username | Claim mapping for the user's storage label**¹** |
| Role Claim | string | immich_role | Claim mapping for the user's role. (should return "user" or "admin")**¹** |
| Storage Quota Claim | string | immich_quota | Claim mapping for the user's storage**¹** |
| Default Storage Quota (GiB) | number | 0 | Default quota for user without storage quota claim (empty for unlimited quota) |
| Default Storage Quota (GiB) | number | 0 | Default quota for user without storage quota claim (Enter 0 for unlimited quota) |
| Button Text | string | Login with OAuth | Text for the OAuth button on the web |
| Auto Register | boolean | true | When true, will automatically register a user the first time they sign in |
| [Auto Launch](#auto-launch) | boolean | false | When true, will skip the login page and automatically start the OAuth login process |
@@ -88,7 +93,6 @@ The `.well-known/openid-configuration` part of the url is optional and will be a
## Auto Launch
When Auto Launch is enabled, the login page will automatically redirect the user to the OAuth authorization url, to login with OAuth. To access the login screen again, use the browser's back button, or navigate directly to `/auth/login?autoLaunch=0`.
Auto Launch can also be enabled on a per-request basis by navigating to `/auth/login?autoLaunch=1`, this can be useful in situations where Immich is called from e.g. Nextcloud using the _External sites_ app and the _oidc_ app so as to enable users to directly interact with a logged-in instance of Immich.
## Mobile Redirect URI
@@ -106,89 +110,6 @@ Immich has a route (`/api/oauth/mobile-redirect`) that is already configured to
## Example Configuration
<details>
<summary>Authelia Example</summary>
### Authelia Example
Here's an example of OAuth configured for Authelia:
This assumes there exist an attribute `immichquota` in the user schema, which is used to set the user's storage quota in Immich.
The configuration concerning the quota is optional.
```yaml
authentication_backend:
ldap:
# The LDAP server configuration goes here.
# See: https://www.authelia.com/c/ldap
attributes:
extra:
immichquota: # The attribute name from LDAP
name: 'immich_quota'
multi_valued: false
value_type: 'integer'
identity_providers:
oidc:
## The other portions of the mandatory OpenID Connect 1.0 configuration go here.
## See: https://www.authelia.com/c/oidc
claims_policies:
immich_policy:
custom_claims:
immich_quota:
attribute: 'immich_quota'
scopes:
immich_scope:
claims:
- 'immich_quota'
clients:
- client_id: 'immich'
client_name: 'Immich'
# https://www.authelia.com/integration/openid-connect/frequently-asked-questions/#how-do-i-generate-a-client-identifier-or-client-secret
client_secret: $pbkdf2-sha512$310000$c8p78n7pUMln0jzvd4aK4Q$JNRBzwAo0ek5qKn50cFzzvE9RXV88h1wJn5KGiHrD0YKtZaR/nCb2CJPOsKaPK0hjf.9yHxzQGZziziccp6Yng'
public: false
require_pkce: false
redirect_uris:
- 'https://example.immich.app/auth/login'
- 'https://example.immich.app/user-settings'
- 'app.immich:///oauth-callback'
scopes:
- 'openid'
- 'profile'
- 'email'
- 'immich_scope'
claims_policy: 'immich_policy'
response_types:
- 'code'
grant_types:
- 'authorization_code'
id_token_signed_response_alg: 'RS256'
userinfo_signed_response_alg: 'RS256'
token_endpoint_auth_method: 'client_secret_post'
```
Configuration of OAuth in Immich System Settings
| Setting | Value |
| ---------------------------------- | ------------------------------------------------------------------- |
| Issuer URL | `https://example.immich.app/.well-known/openid-configuration` |
| Client ID | immich |
| Client Secret | 0v89FXkQOWO\***\*\*\*\*\***\*\*\***\*\*\*\*\***mprbvXD549HH6s1iw... |
| Token Endpoint Auth Method | client_secret_post |
| Scope | openid email profile immich_scope |
| ID Token Signed Response Algorithm | RS256 |
| Userinfo Signed Response Algorithm | RS256 |
| Storage Label Claim | uid |
| Storage Quota Claim | immich_quota |
| Default Storage Quota (GiB) | 0 (empty for unlimited quota) |
| Button Text | Sign in with Authelia (optional) |
| Auto Register | Enabled (optional) |
| Auto Launch | Enabled (optional) |
| Mobile Redirect URI Override | Disable |
| Mobile Redirect URI | |
</details>
<details>
<summary>Authentik Example</summary>
@@ -211,7 +132,7 @@ Configuration of OAuth in Immich System Settings
| Signing Algorithm | RS256 |
| Storage Label Claim | preferred_username |
| Storage Quota Claim | immich_quota |
| Default Storage Quota (GiB) | 0 (empty for unlimited quota) |
| Default Storage Quota (GiB) | 0 (0 for unlimited quota) |
| Button Text | Sign in with Authentik (optional) |
| Auto Register | Enabled (optional) |
| Auto Launch | Enabled (optional) |
@@ -242,7 +163,7 @@ Configuration of OAuth in Immich System Settings
| Signing Algorithm | RS256 |
| Storage Label Claim | preferred_username |
| Storage Quota Claim | immich_quota |
| Default Storage Quota (GiB) | 0 (empty for unlimited quota) |
| Default Storage Quota (GiB) | 0 (0 for unlimited quota) |
| Button Text | Sign in with Google (optional) |
| Auto Register | Enabled (optional) |
| Auto Launch | Enabled |

View File

@@ -10,16 +10,12 @@ Running with a pre-existing Postgres server can unlock powerful administrative f
## Prerequisites
You must install `pgvector` (`>= 0.7.0, < 1.0.0`), as it is a prerequisite for `vchord`.
The easiest way to do this on Debian/Ubuntu is by adding the [PostgreSQL Apt repository][pg-apt] and then
running `apt install postgresql-NN-pgvector`, where `NN` is your Postgres version (e.g., `16`).
You must install VectorChord into your instance of Postgres using their [instructions][vchord-install]. After installation, add `shared_preload_libraries = 'vchord.so'` to your `postgresql.conf`. If you already have some `shared_preload_libraries` set, you can separate each extension with a comma. For example, `shared_preload_libraries = 'pg_stat_statements, vchord.so'`.
You must install pgvecto.rs into your instance of Postgres using their [instructions][vectors-install]. After installation, add `shared_preload_libraries = 'vectors.so'` to your `postgresql.conf`. If you already have some `shared_preload_libraries` set, you can separate each extension with a comma. For example, `shared_preload_libraries = 'pg_stat_statements, vectors.so'`.
:::note
Immich is known to work with Postgres versions `>= 14, < 18`.
Immich is known to work with Postgres versions 14, 15, and 16. Earlier versions are unsupported. Postgres 17 is nominally compatible, but pgvecto.rs does not have prebuilt images or packages for it as of writing.
Make sure the installed version of VectorChord is compatible with your version of Immich. The current accepted range for VectorChord is `>= 0.3.0, < 0.5.0`.
Make sure the installed version of pgvecto.rs is compatible with your version of Immich. The current accepted range for pgvecto.rs is `>= 0.2.0, < 0.4.0`.
:::
## Specifying the connection URL
@@ -57,99 +53,21 @@ CREATE DATABASE <immichdatabasename>;
\c <immichdatabasename>
BEGIN;
ALTER DATABASE <immichdatabasename> OWNER TO <immichdbusername>;
CREATE EXTENSION vchord CASCADE;
CREATE EXTENSION vectors;
CREATE EXTENSION earthdistance CASCADE;
ALTER DATABASE <immichdatabasename> SET search_path TO "$user", public, vectors;
ALTER SCHEMA vectors OWNER TO <immichdbusername>;
COMMIT;
```
### Updating VectorChord
### Updating pgvecto.rs
When installing a new version of VectorChord, you will need to manually update the extension and reindex by connecting to the Immich database and running:
When installing a new version of pgvecto.rs, you will need to manually update the extension by connecting to the Immich database and running `ALTER EXTENSION vectors UPDATE;`.
```
ALTER EXTENSION vchord UPDATE;
REINDEX INDEX face_index;
REINDEX INDEX clip_index;
```
### Common errors
## Migrating to VectorChord
#### Permission denied for view
VectorChord is the successor extension to pgvecto.rs, allowing for higher performance, lower memory usage and higher quality results for smart search and facial recognition.
If you get the error `driverError: error: permission denied for view pg_vector_index_stat`, you can fix this by connecting to the Immich database and running `GRANT SELECT ON TABLE pg_vector_index_stat TO <immichdbusername>;`.
### Migrating from pgvecto.rs
Support for pgvecto.rs will be dropped in a later release, hence we recommend all users currently using pgvecto.rs to migrate to VectorChord at their convenience. There are two primary approaches to do so.
The easiest option is to have both extensions installed during the migration:
<details>
<summary>Migration steps (automatic)</summary>
1. Ensure you still have pgvecto.rs installed
2. Install `pgvector` (`>= 0.7.0, < 1.0.0`). The easiest way to do this is on Debian/Ubuntu by adding the [PostgreSQL Apt repository][pg-apt] and then running `apt install postgresql-NN-pgvector`, where `NN` is your Postgres version (e.g., `16`)
3. [Install VectorChord][vchord-install]
4. Add `shared_preload_libraries= 'vchord.so, vectors.so'` to your `postgresql.conf`, making sure to include _both_ `vchord.so` and `vectors.so`. You may include other libraries here as well if needed
5. Restart the Postgres database
6. If Immich does not have superuser permissions, run the SQL command `CREATE EXTENSION vchord CASCADE;` using psql or your choice of database client
7. Start Immich and wait for the logs `Reindexed face_index` and `Reindexed clip_index` to be output
8. If Immich does not have superuser permissions, run the SQL command `DROP EXTENSION vectors;`
9. Drop the old schema by running `DROP SCHEMA vectors;`
10. Remove the `vectors.so` entry from the `shared_preload_libraries` setting
11. Restart the Postgres database
12. Uninstall pgvecto.rs (e.g. `apt-get purge vectors-pg14` on Debian-based environments, replacing `pg14` as appropriate). `pgvector` must remain installed as it provides the data types used by `vchord`
</details>
If it is not possible to have both VectorChord and pgvecto.rs installed at the same time, you can perform the migration with more manual steps:
<details>
<summary>Migration steps (manual)</summary>
1. While pgvecto.rs is still installed, run the following SQL command using psql or your choice of database client. Take note of the number outputted by this command as you will need it later
```sql
SELECT atttypmod as dimsize
FROM pg_attribute f
JOIN pg_class c ON c.oid = f.attrelid
WHERE c.relkind = 'r'::char
AND f.attnum > 0
AND c.relname = 'smart_search'::text
AND f.attname = 'embedding'::text;
```
2. Remove references to pgvecto.rs using the below SQL commands
```sql
DROP INDEX IF EXISTS clip_index;
DROP INDEX IF EXISTS face_index;
ALTER TABLE smart_search ALTER COLUMN embedding SET DATA TYPE real[];
ALTER TABLE face_search ALTER COLUMN embedding SET DATA TYPE real[];
```
3. [Install VectorChord][vchord-install]
4. Change the columns back to the appropriate vector types, replacing `<number>` with the number from step 1
```sql
CREATE EXTENSION IF NOT EXISTS vchord CASCADE;
ALTER TABLE smart_search ALTER COLUMN embedding SET DATA TYPE vector(<number>);
ALTER TABLE face_search ALTER COLUMN embedding SET DATA TYPE vector(512);
```
5. Start Immich and let it create new indices using VectorChord
</details>
### Migrating from pgvector
<details>
<summary>Migration steps</summary>
1. Ensure you have at least 0.7.0 of pgvector installed. If it is below that, please upgrade it and run the SQL command `ALTER EXTENSION vector UPDATE;` using psql or your choice of database client
2. Follow the Prerequisites to install VectorChord
3. If Immich does not have superuser permissions, run the SQL command `CREATE EXTENSION vchord CASCADE;`
4. Remove the `DB_VECTOR_EXTENSION=pgvector` environmental variable as it will make Immich still use pgvector if set
5. Start Immich and let it create new indices using VectorChord
</details>
Note that VectorChord itself uses pgvector types, so you should not uninstall pgvector after following these steps.
[vchord-install]: https://docs.vectorchord.ai/vectorchord/getting-started/installation.html
[pg-apt]: https://www.postgresql.org/download/linux/#generic
[vectors-install]: https://docs.vectorchord.ai/getting-started/installation.html

View File

@@ -2,6 +2,10 @@
Users can deploy a custom reverse proxy that forwards requests to Immich. This way, the reverse proxy can handle TLS termination, load balancing, or other advanced features. All reverse proxies between Immich and the user must forward all headers and set the `Host`, `X-Real-IP`, `X-Forwarded-Proto` and `X-Forwarded-For` headers to their appropriate values. Additionally, your reverse proxy should allow for big enough uploads. By following these practices, you ensure that all custom reverse proxies are fully compatible with Immich.
:::note
The Repair page can take a long time to load. To avoid server timeouts or errors, we recommend specifying a timeout of at least 10 minutes on your proxy server.
:::
:::caution
Immich does not support being served on a sub-path such as `location /immich {`. It has to be served on the root path of a (sub)domain.
:::
@@ -18,7 +22,7 @@ server {
client_max_body_size 50000M;
# Set headers
proxy_set_header Host $host;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;

View File

@@ -2,17 +2,16 @@
The `immich-server` docker image comes preinstalled with an administrative CLI (`immich-admin`) that supports the following commands:
| Command | Description |
| ------------------------ | ------------------------------------------------------------- |
| `help` | Display help |
| `reset-admin-password` | Reset the password for the admin user |
| `disable-password-login` | Disable password login |
| `enable-password-login` | Enable password login |
| `enable-oauth-login` | Enable OAuth login |
| `disable-oauth-login` | Disable OAuth login |
| `list-users` | List Immich users |
| `version` | Print Immich version |
| `change-media-location` | Change database file paths to align with a new media location |
| Command | Description |
| ------------------------ | ------------------------------------- |
| `help` | Display help |
| `reset-admin-password` | Reset the password for the admin user |
| `disable-password-login` | Disable password login |
| `enable-password-login` | Enable password login |
| `enable-oauth-login` | Enable OAuth login |
| `disable-oauth-login` | Disable OAuth login |
| `list-users` | List Immich users |
| `version` | Print Immich version |
## How to run a command
@@ -89,21 +88,3 @@ Print Immich Version
immich-admin version
v1.129.0
```
Change media location
```
immich-admin change-media-location
? Enter the previous value of IMMICH_MEDIA_LOCATION: /data
? Enter the new value of IMMICH_MEDIA_LOCATION: /my-data
...
Previous value: /data
Current value: /my-data
Changing database paths from "/data/*" to "/my-data/*"
? Do you want to proceed? [Y/n] y
Database file paths updated successfully! 🎉
...
```

View File

@@ -1,14 +1,14 @@
# Database Migrations
After making any changes in the `server/src/schema`, a database migration need to run in order to register the changes in the database. Follow the steps below to create a new migration.
After making any changes in the `server/src/entities`, a database migration need to run in order to register the changes in the database. Follow the steps below to create a new migration.
1. Run the command
```bash
pnpm run migrations:generate <migration-name>
npm run typeorm:migrations:generate <migration-name>
```
2. Check if the migration file makes sense.
3. Move the migration file to folder `./server/src/schema/migrations` in your code editor.
3. Move the migration file to folder `./server/src/migrations` in your code editor.
The server will automatically detect `*.ts` file changes and restart. Part of the server start-up process includes running any new migrations, so it will be applied immediately.

View File

@@ -1,480 +0,0 @@
---
title: Devcontainers
sidebar_position: 3
---
# Development with Dev Containers
Dev Containers provide a consistent, reproducible development environment using Docker containers. With a single click, you can get started with an Immich development environment on Mac, Linux, Windows, or in the cloud using GitHub Codespaces.
Get started fast!
[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/immich-app/immich/)
[Learn more about Dev Containers](https://docs.github.com/en/codespaces/setting-up-your-project-for-codespaces/adding-a-dev-container-configuration/introduction-to-dev-containers)
## Prerequisites
Before getting started, ensure you have:
- **Docker Desktop** (latest version)
- [Mac](https://docs.docker.com/desktop/install/mac-install/)
- [Windows](https://docs.docker.com/desktop/install/windows-install/) (with WSL2 backend recommended)
- [Linux](https://docs.docker.com/desktop/install/linux-install/)
- **Visual Studio Code** with the [Dev Containers extension](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers)
- **Git** for cloning the repository
- At least **8GB of RAM** (16GB recommended)
- **20GB of free disk space**
:::tip Alternative Development Environments
While this guide focuses on VS Code, you have many options for Dev Container development:
**Local Editors:**
- [IntelliJ IDEA](https://www.jetbrains.com/help/idea/connect-to-devcontainer.html) - Full JetBrains IDE support
- [neovim](https://github.com/jamestthompson3/nvim-remote-containers) - Lightweight terminal-based editor
- [Emacs](https://github.com/emacs-lsp/lsp-docker) - Extensible text editor
- [DevContainer CLI](https://github.com/devcontainers/cli) - Command-line interface
**Cloud-Based Solutions:**
- [GitHub Codespaces](https://github.com/features/codespaces) - Fully integrated with GitHub, excellent devcontainer.json support
- [GitPod](https://www.gitpod.io) - SaaS platform with recent Dev Container support (historically used gitpod.yml)
**Self-Hostable Options:**
- [Coder](https://coder.com) - Enterprise-focused, requires Terraform knowledge, self-managed
- [DevPod](https://devpod.sh) - Client-only tool with excellent devcontainer.json support, works with any provider (local, cloud, or on-premise)
:::
## Dev Container Services
The Dev Container environment consists of the following services:
| Service | Container Name | Description | Ports |
| ---------------- | ------------------------- | --------------------------------------------------------- | ----------------------------------------------------------------------- |
| Server & Web | `immich-server` | Runs both API server and web frontend in development mode | 2283 (API)<br/>3000 (Web)<br/>9230 (Workers Debug)<br/>9231 (API Debug) |
| Database | `database` | PostgreSQL database | 5432 |
| Cache | `redis` | Valkey cache server | 6379 |
| Machine Learning | `immich-machine-learning` | Immich ML model inference server | 3003 |
## Getting Started
### Step 1: Clone the Repository
```bash
git clone https://github.com/immich-app/immich.git
cd immich
```
### Step 2: Configure Environment Variables
The immich dev containers read environment variables from your shell environment, not from `.env` files. This allows them to work in cloud environments without pre-configuration.
:::important Configuration
When running locally, and if you want to create (or use an existing) DB and/or photo storage folder, you must set the `UPLOAD_LOCATION` variable in your shell environment before launching the Dev Container. This determines where uploaded files are stored and also where the DB stores it data.
```bash
# Set temporarily for current session
export UPLOAD_LOCATION=/opt/dev_upload_folder
# Or add to your shell profile for persistence
# (~/.bashrc, ~/.zshrc, ~/.bash_profile, etc.)
echo 'export UPLOAD_LOCATION=/opt/dev_upload_folder' >> ~/.bashrc
source ~/.bashrc
```
:::
### Step 3: Launch the Dev Container
:::tip
Immich development makes extensive use of specialized [base images](https://github.com/immich-app/base-images) for its docker-compose based development. For this reason, you won't be able to use VSCode's **_Clone Repository in a Container Volume_** command.
:::
#### Using VS Code UI:
1. Open the cloned repository in VS Code
2. Press `F1` or `Ctrl/Cmd+Shift+P` to open the command palette
3. Type and select "Dev Containers: Rebuild and Reopen in Container"
4. Select "Immich - Backend, Frontend and ML" from the list
5. Wait for the container to build and start (this may take several minutes on first run)
#### Using VS Code Quick Actions:
1. Open the repository in VS Code
2. You should see a popup asking if you want to reopen in a container
3. Click "Reopen in Container"
#### Using Command Line:
```bash
# Using the DevContainer CLI
devcontainer up --workspace-folder .
```
## Environment Variable Details
### How Dev Containers Handle Environment Variables
Unlike the Immich developer setup based on Docker Compose which uses `.env` files, Immich Dev Containers read environment variables from your shell environment. This is configured in `.devcontainer/devcontainer.json`:
```json
"remoteEnv": {
"UPLOAD_LOCATION": "${localEnv:UPLOAD_LOCATION:./Library}",
"DB_PASSWORD": "${localEnv:DB_PASSWORD:postgres}",
"DB_USERNAME": "${localEnv:DB_USERNAME:postgres}",
"DB_DATABASE_NAME": "${localEnv:DB_DATABASE_NAME:immich}"
}
```
The `${localEnv:VARIABLE:default}` syntax reads from your shell environment with optional defaults.
### Upload Location Path Resolution
The `UPLOAD_LOCATION` environment variable controls where files are stored:
**Default:** `./Library` (relative to the `docker` directory)
**Resolved to:** `<immich-root>/docker/Library`
**Bind Mounts Created:**
```yaml
# From .devcontainer/server/container-compose-overrides.yml
- ${UPLOAD_LOCATION-./Library}/photos:/workspaces/immich/server/upload
- ${UPLOAD_LOCATION-./Library}/postgres:/var/lib/postgresql/data
```
### Database Configuration
These variables have sensible defaults (for development) but can be customized:
| Variable | Default | Description |
| ------------------ | ---------- | ------------------- |
| `DB_PASSWORD` | `postgres` | PostgreSQL password |
| `DB_USERNAME` | `postgres` | PostgreSQL username |
| `DB_DATABASE_NAME` | `immich` | Database name |
### Setting Environment Variables
Add these to your shell profile (`~/.bashrc`, `~/.zshrc`, `~/.bash_profile`, etc.):
```bash
# Required
export UPLOAD_LOCATION=./Library # or absolute path
# Optional (only if using non-default values)
export DB_PASSWORD=your_password
export DB_USERNAME=your_username
export DB_DATABASE_NAME=your_database
```
Remember to reload your shell configuration:
```bash
source ~/.bashrc # or ~/.zshrc, etc.
```
## Git Configuration
### SSH Keys and Authentication
To use your SSH keys for GitHub access inside the Dev Container:
1. **Start SSH Agent** on your host machine:
```bash
eval "$(ssh-agent -s)"
ssh-add ~/.ssh/id_rsa # or your key path
```
2. **VS Code automatically forwards your SSH agent** to the container
For detailed instructions, see the [VS Code guide on sharing Git credentials](https://code.visualstudio.com/remote/advancedcontainers/sharing-git-credentials).
### Commit Signing
To use your SSH key for commit signing, see the [GitHub guide on SSH commit signing](https://docs.github.com/en/authentication/managing-commit-signature-verification/telling-git-about-your-signing-key#telling-git-about-your-ssh-key).
## Development Workflow
### Automatic Setup
When the Dev Container starts, it automatically:
1. **Runs post-create script** (`container-server-post-create.sh`):
- Adjusts file permissions for the `node` user
- Installs dependencies: `pnpm install` in all packages
- Builds TypeScript SDK: `pnpm run build` in `open-api/typescript-sdk`
2. **Starts development servers** via VS Code tasks:
- `Immich API Server (Nest)` - API server with hot-reloading on port 2283
- `Immich Web Server (Vite)` - Web frontend with hot-reloading on port 3000
- Both servers watch for file changes and recompile automatically
3. **Configures port forwarding**:
- Web UI: http://localhost:3000 (opens automatically)
- API: http://localhost:2283
- Debug ports: 9230 (workers), 9231 (API)
:::info
The Dev Container setup replaces the `make dev` command from the traditional setup. All services start automatically when you open the container.
:::
### Accessing Services
Once running, you can access:
| Service | URL | Description |
| -------- | --------------------- | ---------------------------------------------------------------------------------------------- |
| Web UI | http://localhost:3000 | Main web interface |
| API | http://localhost:2283 | REST API endpoints (Not used directly, web UI will expose this over http://localhost:3000/api) |
| Database | localhost:5432 | PostgreSQL (username: `postgres`) (Not used directly) |
### Connecting Mobile Apps
To connect the mobile app to your Dev Container:
1. Find your machine's IP address
2. In the mobile app, use: `http://YOUR_IP:3000/api`
3. Ensure your firewall allows connections on port 2283
### Making Code Changes
- **Server code** (`/server`): Changes trigger automatic restart
- **Web code** (`/web`): Changes trigger hot module replacement
- **Database migrations**: Run `pnpm run sync:sql` in the server directory
- **API changes**: Regenerate TypeScript SDK with `make open-api`
## Testing
### Running Tests
The Dev Container supports multiple ways to run tests:
#### Using Make Commands (Recommended)
```bash
# Run tests for specific components
make test-server # Server unit tests
make test-web # Web unit tests
make test-e2e # End-to-end tests
make test-cli # CLI tests
# Run all tests
make test-all # Runs tests for all components
# Medium tests (integration tests)
make test-medium-dev # End-to-end tests
```
#### Using NPM Directly
```bash
# Server tests
cd /workspaces/immich/server
pnpm test # Run all tests
pnpm run test:watch # Watch mode
pnpm run test:cov # Coverage report
# Web tests
cd /workspaces/immich/web
pnpm test # Run all tests
pnpm run test:watch # Watch mode
# E2E tests
cd /workspaces/immich/e2e
pnpm run test # Run API tests
pnpm run test:web # Run web UI tests
```
### Code Quality Commands
```bash
# Linting
make lint-server # Lint server code
make lint-web # Lint web code
make lint-all # Lint all components
# Formatting
make format-server # Format server code
make format-web # Format web code
make format-all # Format all code
# Type checking
make check-server # Type check server
make check-web # Type check web
make check-all # Check all components
# Complete hygiene check
make hygiene-all # Runs lint, format, check, SQL sync, and audit
```
### Additional Make Commands
```bash
# Build commands
make build-server # Build server
make build-web # Build web app
make build-all # Build everything
# API generation
make open-api # Generate OpenAPI specs
make open-api-typescript # Generate TypeScript SDK
make open-api-dart # Generate Dart SDK
# Database
make sql # Sync database schema
# Dependencies
make install-server # Install server dependencies
make install-web # Install web dependencies
make install-all # Install all dependencies
```
### Debugging
The Dev Container is pre-configured for debugging:
1. **API Server Debugging**:
- Set breakpoints in VS Code
- Press `F5` or use "Run and Debug" panel
- Select "Attach to Server" configuration
- Debug port: 9231
2. **Worker Debugging**:
- Use "Attach to Workers" configuration
- Debug port: 9230
3. **Web Debugging**:
- Use browser DevTools
- VS Code debugger for Chrome/Edge extensions supported
## Troubleshooting
### Common Issues
#### Permission Errors
**Problem**: `EACCES` or permission denied errors
**Solution**:
- The Dev Container runs as the `node` user (UID 1000)
- If your host UID differs, you may see permission issues
- Try rebuilding the container: "Dev Containers: Rebuild Container"
#### Container Won't Start
**Problem**: Dev Container fails to start or build
**Solution**:
1. Check Docker is running: `docker ps`
2. Clean Docker resources: `docker system prune -a`
3. Check available disk space
4. Review Docker Desktop resource limits
#### Port Already in Use
**Problem**: "Port 3000/2283 is already in use"
**Solution**:
1. Check for conflicting services: `lsof -i :3000` (macOS/Linux)
2. Stop conflicting services or change port mappings
3. Restart Docker Desktop
#### Upload Location Not Set
**Problem**: Errors about missing UPLOAD_LOCATION
**Solution**:
1. Set the environment variable: `export UPLOAD_LOCATION=./Library`
2. Add to your shell profile for persistence
3. Restart your terminal and VS Code
#### Database Connection Failed
**Problem**: Cannot connect to PostgreSQL
**Solution**:
1. Ensure all containers are running: `docker ps`
2. Check logs: "Dev Containers: Show Container Log"
3. Verify database credentials match environment variables
### Getting Help
If you encounter issues:
1. Check container logs: View → Output → Select "Dev Containers"
2. Rebuild without cache: "Dev Containers: Rebuild Container Without Cache"
3. Review [common Docker issues](https://docs.docker.com/desktop/troubleshoot/)
4. Ask in [Discord](https://discord.immich.app) `#help-desk-support` channel
## Mobile Development
While the Dev Container focuses on server and web development, you can connect mobile apps for testing:
### Connecting iOS/Android Apps
1. **Ensure API is accessible**:
```bash
# Find your machine's IP
# macOS
ipconfig getifaddr en0
# Linux
hostname -I
# Windows (in WSL2)
ip addr show eth0
```
2. **Configure mobile app**:
- Server URL: `http://YOUR_IP:2283/api`
- Ensure firewall allows port 2283
3. **For full mobile development**, see the [mobile development guide](/docs/developer/setup) which covers:
- Flutter setup
- Running on simulators/devices
- Mobile-specific debugging
## Advanced Configuration
### Custom VS Code Extensions
Add extensions to `.devcontainer/devcontainer.json`:
```json
"customizations": {
"vscode": {
"extensions": [
"your.extension-id"
]
}
}
```
### Additional Services
To add services (e.g., Redis Commander), modify:
1. `/docker/docker-compose.dev.yml` - Add service definition
2. `/.devcontainer/server/container-compose-overrides.yml` - Add overrides if needed
### Resource Limits
Adjust Docker Desktop resources:
- **macOS/Windows**: Docker Desktop → Settings → Resources
- **Linux**: Modify Docker daemon configuration
Recommended minimums:
- CPU: 4 cores
- Memory: 8GB
- Disk: 20GB
## Next Steps
- Read the [architecture overview](/docs/developer/architecture)
- Learn about [database migrations](/docs/developer/database-migrations)
- Explore [API documentation](/docs/api)
- Join `#immich` on [Discord](https://discord.immich.app)

View File

@@ -8,47 +8,34 @@ When contributing code through a pull request, please check the following:
## Web Checks
- [ ] `pnpm run lint` (linting via ESLint)
- [ ] `pnpm run format` (formatting via Prettier)
- [ ] `pnpm run check:svelte` (Type checking via SvelteKit)
- [ ] `pnpm run check:typescript` (check typescript)
- [ ] `pnpm test` (unit tests)
- [ ] `npm run lint` (linting via ESLint)
- [ ] `npm run format` (formatting via Prettier)
- [ ] `npm run check:svelte` (Type checking via SvelteKit)
- [ ] `npm run check:typescript` (check typescript)
- [ ] `npm test` (unit tests)
## Documentation
- [ ] `pnpm run format` (formatting via Prettier)
- [ ] `npm run format` (formatting via Prettier)
- [ ] Update the `_redirects` file if you have renamed a page or removed it from the documentation.
:::tip AIO
Run all web checks with `pnpm run check:all`
Run all web checks with `npm run check:all`
:::
## Server Checks
- [ ] `pnpm run lint` (linting via ESLint)
- [ ] `pnpm run format` (formatting via Prettier)
- [ ] `pnpm run check` (Type checking via `tsc`)
- [ ] `pnpm test` (unit tests)
- [ ] `npm run lint` (linting via ESLint)
- [ ] `npm run format` (formatting via Prettier)
- [ ] `npm run check` (Type checking via `tsc`)
- [ ] `npm test` (unit tests)
:::tip AIO
Run all server checks with `pnpm run check:all`
Run all server checks with `npm run check:all`
:::
:::info Auto Fix
You can use `pnpm run __:fix` to potentially correct some issues automatically for `pnpm run format` and `lint`.
:::
## Mobile Checks
The following commands must be executed from within the mobile app directory of the codebase.
- [ ] `make build` (auto-generate files using build_runner)
- [ ] `make analyze` (static analysis via Dart Analyzer and DCM)
- [ ] `make format` (formatting via Dart Formatter)
- [ ] `make test` (unit tests)
:::info Auto Fix
You can use `dart fix --apply` and `dcm fix lib` to potentially correct some issues automatically for `make analyze`.
You can use `npm run __:fix` to potentially correct some issues automatically for `npm run format` and `lint`.
:::
## OpenAPI

View File

@@ -54,20 +54,20 @@ You can access the web from `http://your-machine-ip:3000` or `http://localhost:3
If you only want to do web development connected to an existing, remote backend, follow these steps:
1. Build the Immich SDK - `cd open-api/typescript-sdk && pnpm i && pnpm run build && cd -`
1. Build the Immich SDK - `cd open-api/typescript-sdk && npm i && npm run build && cd -`
2. Enter the web directory - `cd web/`
3. Install web dependencies - `pnpm i`
3. Install web dependencies - `npm i`
4. Start the web development server
```bash
IMMICH_SERVER_URL=https://demo.immich.app/ pnpm run dev
IMMICH_SERVER_URL=https://demo.immich.app/ npm run dev
```
If you're using PowerShell on Windows you may need to set the env var separately like so:
```powershell
$env:IMMICH_SERVER_URL = "https://demo.immich.app/"
pnpm run dev
npm run dev
```
#### `@immich/ui`
@@ -75,12 +75,11 @@ pnpm run dev
To see local changes to `@immich/ui` in Immich, do the following:
1. Install `@immich/ui` as a sibling to `immich/`, for example `/home/user/immich` and `/home/user/ui`
2. Build the `@immich/ui` project via `pnpm run build`
3. Uncomment the corresponding volume in web service of the `docker/docker-compose.dev.yaml` file (`../../ui:/usr/ui`)
4. Uncomment the corresponding alias in the `web/vite.config.js` file (`'@immich/ui': path.resolve(\_\_dirname, '../../ui')`)
5. Uncomment the import statement in `web/src/app.css` file `@import '/usr/ui/dist/theme/default.css';` and comment out `@import '@immich/ui/theme/default.css';`
6. Start up the stack via `make dev`
7. After making changes in `@immich/ui`, rebuild it (`pnpm run build`)
1. Build the `@immich/ui` project via `npm run build`
1. Uncomment the corresponding volume in web service of the `docker/docker-compose.dev.yaml` file (`../../ui:/usr/ui`)
1. Uncomment the corresponding alias in the `web/vite.config.js` file (`'@immich/ui': path.resolve(\_\_dirname, '../../ui')`)
1. Start up the stack via `make dev`
1. After making changes in `@immich/ui`, rebuild it (`npm run build`)
### Mobile app
@@ -115,72 +114,32 @@ Note: Activating the license is not required.
### VSCode
Install `Flutter`, `DCM`, `Prettier`, `ESLint` and `Svelte` extensions. These extensions are listed in the `extensions.json` file under `.vscode/` and should appear as workspace recommendations.
Install `Flutter`, `DCM`, `Prettier`, `ESLint` and `Svelte` extensions.
Here are the settings we use, they should be active as workspace settings (`settings.json`):
in User `settings.json` (`cmd + shift + p` and search for `Open User Settings JSON`) add the following:
```json title="settings.json"
{
"[css]": {
"editor.formatOnSave": true,
"[javascript][typescript][css]": {
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.formatOnSave": true,
"editor.tabSize": 2,
"editor.formatOnSave": true
},
"[svelte]": {
"editor.defaultFormatter": "svelte.svelte-vscode",
"editor.tabSize": 2
},
"svelte.enable-ts-plugin": true,
"eslint.validate": ["javascript", "svelte"],
"[dart]": {
"editor.defaultFormatter": "Dart-Code.dart-code",
"editor.formatOnSave": true,
"editor.selectionHighlight": false,
"editor.suggest.snippetsPreventQuickSuggestions": false,
"editor.suggestSelection": "first",
"editor.tabCompletion": "onlySnippets",
"editor.wordBasedSuggestions": "off"
},
"[javascript]": {
"editor.codeActionsOnSave": {
"source.organizeImports": "explicit",
"source.removeUnusedImports": "explicit"
},
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.formatOnSave": true,
"editor.tabSize": 2
},
"[json]": {
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.formatOnSave": true,
"editor.tabSize": 2
},
"[jsonc]": {
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.formatOnSave": true,
"editor.tabSize": 2
},
"[svelte]": {
"editor.codeActionsOnSave": {
"source.organizeImports": "explicit",
"source.removeUnusedImports": "explicit"
},
"editor.defaultFormatter": "svelte.svelte-vscode",
"editor.formatOnSave": true,
"editor.tabSize": 2
},
"[typescript]": {
"editor.codeActionsOnSave": {
"source.organizeImports": "explicit",
"source.removeUnusedImports": "explicit"
},
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.formatOnSave": true,
"editor.tabSize": 2
},
"cSpell.words": ["immich"],
"editor.formatOnSave": true,
"eslint.validate": ["javascript", "svelte"],
"explorer.fileNesting.enabled": true,
"explorer.fileNesting.patterns": {
"*.dart": "${capture}.g.dart,${capture}.gr.dart,${capture}.drift.dart",
"*.ts": "${capture}.spec.ts,${capture}.mock.ts"
},
"svelte.enable-ts-plugin": true,
"typescript.preferences.importModuleSpecifier": "non-relative"
"editor.wordBasedSuggestions": "off",
"editor.defaultFormatter": "Dart-Code.dart-code"
}
}
```

View File

@@ -4,8 +4,8 @@
### Unit tests
Unit are run by calling `pnpm run test` from the `server/` directory.
You need to run `pnpm install` (in `server/`) before _once_.
Unit are run by calling `npm run test` from the `server/` directory.
You need to run `npm install` (in `server/`) before _once_.
### End to end tests
@@ -17,14 +17,14 @@ make e2e
Before you can run the tests, you need to run the following commands _once_:
- `pnpm install` (in `e2e/`)
- `npm install` (in `e2e/`)
- `make open-api` (in the project root `/`)
Once the test environment is running, the e2e tests can be run via:
```bash
cd e2e/
pnpm test
npm test
```
The tests check various things including:

View File

@@ -1,19 +0,0 @@
# Chromecast support
Immich supports the Google's Cast protocol so that photos and videos can be cast to devices such as a Chromecast and a Nest Hub. This feature is considered experimental and has several important limitations listed below. Currently, this feature is only supported by the web client, support on Android and iOS is planned for the future.
## Enable Google Cast Support
Google Cast support is disabled by default. The web UI uses Google-provided scripts and must retreive them from Google servers when the page loads. This is a privacy concern for some and is thus opt-in.
You can enable Google Cast support through `Account Settings > Features > Cast > Google Cast`
<img src={require('./img/gcast-enable.webp').default} width="70%" title='Enable Google Cast Support' />
## Limitations
To use casting with Immich, there are a few prerequisites:
1. Your instance must be accessed via an HTTPS connection in order for the casting menu to show.
2. Your instance must be publicly accessible via HTTPS and a DNS record for the server must be accessible via Google's DNS servers (`8.8.8.8` and `8.8.4.4`)
3. Videos must be in a format that is compatible with Google Cast. For more info, check out [Google's documentation](https://developers.google.com/cast/docs/media)

View File

@@ -90,22 +90,19 @@ Usage: immich upload [paths...] [options]
Upload assets
Arguments:
paths One or more paths to assets to be uploaded
paths One or more paths to assets to be uploaded
Options:
-r, --recursive Recursive (default: false, env: IMMICH_RECURSIVE)
-i, --ignore <pattern> Pattern to ignore (env: IMMICH_IGNORE_PATHS)
-h, --skip-hash Don't hash files before upload (default: false, env: IMMICH_SKIP_HASH)
-H, --include-hidden Include hidden folders (default: false, env: IMMICH_INCLUDE_HIDDEN)
-a, --album Automatically create albums based on folder name (default: false, env: IMMICH_AUTO_CREATE_ALBUM)
-A, --album-name <name> Add all assets to specified album (env: IMMICH_ALBUM_NAME)
-n, --dry-run Don't perform any actions, just show what will be done (default: false, env: IMMICH_DRY_RUN)
-c, --concurrency <number> Number of assets to upload at the same time (default: 4, env: IMMICH_UPLOAD_CONCURRENCY)
-j, --json-output Output detailed information in json format (default: false, env: IMMICH_JSON_OUTPUT)
--delete Delete local assets after upload (env: IMMICH_DELETE_ASSETS)
--no-progress Hide progress bars (env: IMMICH_PROGRESS_BAR)
--watch Watch for changes and upload automatically (default: false, env: IMMICH_WATCH_CHANGES)
--help display help for command
-r, --recursive Recursive (default: false, env: IMMICH_RECURSIVE)
-i, --ignore [paths...] Paths to ignore (default: [], env: IMMICH_IGNORE_PATHS)
-h, --skip-hash Don't hash files before upload (default: false, env: IMMICH_SKIP_HASH)
-H, --include-hidden Include hidden folders (default: false, env: IMMICH_INCLUDE_HIDDEN)
-a, --album Automatically create albums based on folder name (default: false, env: IMMICH_AUTO_CREATE_ALBUM)
-A, --album-name <name> Add all assets to specified album (env: IMMICH_ALBUM_NAME)
-n, --dry-run Don't perform any actions, just show what will be done (default: false, env: IMMICH_DRY_RUN)
-c, --concurrency <number> Number of assets to upload at the same time (default: 4, env: IMMICH_UPLOAD_CONCURRENCY)
--delete Delete local assets after upload (env: IMMICH_DELETE_ASSETS)
--help display help for command
```
</details>
@@ -175,16 +172,6 @@ By default, hidden files are skipped. If you want to include hidden files, use t
immich upload --include-hidden --recursive directory/
```
You can use the `--json-output` option to get a json printed which includes
three keys: `newFiles`, `duplicates` and `newAssets`. Due to some logging
output you will need to strip the first three lines of output to get the json.
For example to get a list of files that would be uploaded for further
processing:
```bash
immich upload --dry-run . | tail -n +4 | jq .newFiles[]
```
### Obtain the API Key
The API key can be obtained in the user setting panel on the web interface.

View File

@@ -2,7 +2,7 @@
Folder view provides an additional view besides the timeline that is similar to a file explorer. It allows you to navigate through the folders and files in the library. This feature is handy for a highly curated and customized external library or a nicely configured storage template.
You can enable this feature under [`Account Settings > Features > Folders`](https://my.immich.app/user-settings?isOpen=feature+folders)
You can enable this feature under [`Account Settings > Features > Folder View`](https://my.immich.app/user-settings?isOpen=feature+folders)
## Enable folder view

View File

@@ -121,6 +121,6 @@ Once this is done, you can continue to step 3 of "Basic Setup".
[hw-file]: https://github.com/immich-app/immich/releases/latest/download/hwaccel.transcoding.yml
[nvct]: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html
[jellyfin-lp]: https://jellyfin.org/docs/general/post-install/transcoding/hardware-acceleration/intel#low-power-encoding
[jellyfin-kernel-bug]: https://jellyfin.org/docs/general/post-install/transcoding/hardware-acceleration/intel#known-issues-and-limitations-on-linux
[jellyfin-lp]: https://jellyfin.org/docs/general/administration/hardware-acceleration/intel/#configure-and-verify-lp-mode-on-linux
[jellyfin-kernel-bug]: https://jellyfin.org/docs/general/administration/hardware-acceleration/intel/#known-issues-and-limitations
[libmali-rockchip]: https://github.com/tsukumijima/libmali-rockchip/releases

Binary file not shown.

Before

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

View File

@@ -33,7 +33,7 @@ Sometimes, an external library will not scan correctly. This can happen if Immic
- Are the permissions set correctly?
- Make sure you are using forward slashes (`/`) and not backward slashes.
To validate that Immich can reach your external library, start a shell inside the container. Run `docker exec -it immich_server bash` to a bash shell. If your import path is `/mnt/photos`, check it with `ls /mnt/photos`. If you are using a dedicated microservices container, make sure to add the same mount point and check for availability within the microservices container as well.
To validate that Immich can reach your external library, start a shell inside the container. Run `docker exec -it immich_server bash` to a bash shell. If your import path is `/data/import/photos`, check it with `ls /data/import/photos`. Do the same check for the same in any microservices containers.
### Exclusion Patterns
@@ -56,9 +56,9 @@ Internally, Immich uses the [glob](https://www.npmjs.com/package/glob) package t
### Automatic watching (EXPERIMENTAL)
This feature is considered experimental and for advanced users only. If enabled, it will allow automatic watching of the filesystem which means new assets are automatically imported to Immich without needing to rescan.
This feature - currently hidden in the config file - is considered experimental and for advanced users only. If enabled, it will allow automatic watching of the filesystem which means new assets are automatically imported to Immich without needing to rescan.
If your photos are on a network drive, automatic file watching likely won't work. In that case, you will have to rely on a [periodic library refresh](#set-custom-scan-interval) to pull in your changes.
If your photos are on a network drive, automatic file watching likely won't work. In that case, you will have to rely on a periodic library refresh to pull in your changes.
#### Troubleshooting
@@ -72,9 +72,7 @@ In rare cases, the library watcher can hang, preventing Immich from starting up.
### Nightly job
There is an automatic scan job that is scheduled to run once a day. Its schedule is configurable, see [Set Custom Scan Interval](#set-custom-scan-interval).
This job also cleans up any libraries stuck in deletion. It is possible to trigger the cleanup by clicking "Scan all libraries" in the library management page.
There is an automatic scan job that is scheduled to run once a day. This job also cleans up any libraries stuck in deletion. It is possible to trigger the cleanup by clicking "Scan all libraries" in the library managment page.
## Usage
@@ -93,7 +91,7 @@ The `immich-server` container will need access to the gallery. Modify your docke
```diff title="docker-compose.yml"
immich-server:
volumes:
- ${UPLOAD_LOCATION}:/data
- ${UPLOAD_LOCATION}:/usr/src/app/upload
+ - /mnt/nas/christmas-trip:/mnt/media/christmas-trip:ro
+ - /home/user/old-pics:/mnt/media/old-pics:ro
+ - /mnt/media/videos:/mnt/media/videos:ro
@@ -114,15 +112,12 @@ _Remember to run `docker compose up -d` to register the changes. Make sure you c
These actions must be performed by the Immich administrator.
- Click on your avatar in the upper right corner
- Click on Administration -> External Libraries
- Click on Create an external library…
- Click on Administration -> Libraries
- Click on Create External Library
- Select which user owns the library, this can not be changed later
- Enter `/mnt/media/christmas-trip` then click Add
- Click on Save
- Click the drop-down menu on the newly created library
- Click on Scan
- Click the drop-down menu on the newly created library
- Click on Rename Library and rename it to "Christmas Trip"
NOTE: We have to use the `/mnt/media/christmas-trip` path and not the `/mnt/nas/christmas-trip` path since all paths have to be what the Docker containers see.
@@ -161,7 +156,9 @@ Within seconds, the assets from the old-pics and videos folders should show up i
Folder view provides an additional view besides the timeline that is similar to a file explorer. It allows you to navigate through the folders and files in the library. This feature is handy for a highly curated and customized external library or a nicely configured storage template.
You can enable this feature under [`Account Settings > Features > Folders`](https://my.immich.app/user-settings?isOpen=feature+folders)
You can enable this feature under [`Account Settings > Features > Folder View`](https://my.immich.app/user-settings?isOpen=feature+folders)
The UI is currently only available for the web; mobile will come in a subsequent release.
<img src={require('./img/folder-view-1.webp').default} width="100%" title='Folder-view' />
@@ -171,7 +168,7 @@ You can enable this feature under [`Account Settings > Features > Folders`](http
Only an admin can do this.
:::
You can define a custom interval for the trigger external library rescan under Administration -> Settings -> External Library.
You can define a custom interval for the trigger external library rescan under Administration -> Settings -> Library.
You can set the scanning interval using the preset or cron format. For more information you can refer to [Crontab Guru](https://crontab.guru/).
<img src={require('./img/library-custom-scan-interval.webp').default} width="75%" title='Set custom scan interval for external library' />

View File

@@ -42,7 +42,7 @@ You do not need to redo any machine learning jobs after enabling hardware accele
- The GPU must have compute capability 5.2 or greater.
- The server must have the official NVIDIA driver installed.
- The installed driver must be >= 545 (it must support CUDA 12.3).
- The installed driver must be >= 535 (it must support CUDA 12.2).
- On Linux (except for WSL2), you also need to have [NVIDIA Container Toolkit][nvct] installed.
#### ROCm

View File

@@ -88,9 +88,9 @@ It will only reflect files you add.
:::
If the same asset is in more than one album it will only sync to the first album it's in, after that it won't sync again even if the user clicks sync albums manually.
To overcome this limitation, the files must be removed from the ignore list by
To overcome this limitation, the files must be removed from the blacklist by
App settings -> Advanced -> Duplicate Assets -> Clear
:::info
Cleaning duplicate assets from the list will cause all the previously uploaded duplicate files to be re-uploaded, the files will not actually be uploaded and will be rejected on the server side (due to duplication) but will be synchronized to the album and at the end will be added to the ignore list again at the end of the synchronization.
Cleaning duplicate assets from the list will cause all the previously uploaded duplicate files to be re-uploaded, the files will not actually be uploaded and will be rejected on the server side (due to duplication) but will be synchronized to the album and at the end will be added to the black list again at the end of the synchronization.
:::

View File

@@ -66,7 +66,7 @@ The provided file is just a starting point. There are a ton of ways to configure
After bringing down the containers with `docker compose down` and back up with `docker compose up -d`, a Prometheus instance will now collect metrics from the immich server and microservices containers. Note that we didn't need to expose any new ports for these containers - the communication is handled in the internal Docker network.
:::note
To see exactly what metrics are made available, you can additionally add `8081:8081` (API metrics) and `8082:8082` (microservices metrics) to the immich_server container's ports.
To see exactly what metrics are made available, you can additionally add `8081:8081` to the server container's ports and `8082:8082` to the microservices container's ports.
Visiting the `/metrics` endpoint for these services will show the same raw data that Prometheus collects.
To configure these ports see [`IMMICH_API_METRICS_PORT` & `IMMICH_MICROSERVICES_METRICS_PORT`](/docs/install/environment-variables/#general).
:::

View File

@@ -5,7 +5,7 @@ import TabItem from '@theme/TabItem';
Immich uses Postgres as its search database for both metadata and contextual CLIP search.
Contextual CLIP search is powered by the [VectorChord](https://github.com/tensorchord/VectorChord) extension, utilizing machine learning models like [CLIP](https://openai.com/research/clip) to provide relevant search results. This allows for freeform searches without requiring specific keywords in the image or video metadata.
Contextual CLIP search is powered by the [pgvecto.rs](https://github.com/tensorchord/pgvecto.rs) extension, utilizing machine learning models like [CLIP](https://openai.com/research/clip) to provide relevant search results. This allows for freeform searches without requiring specific keywords in the image or video metadata.
## Advanced Search Filters
@@ -92,7 +92,7 @@ Memory and execution time estimates were obtained without acceleration on a 7800
**Execution Time (ms)**: After warming up the model with one pass, the mean execution time of 100 passes with the same input.
**Memory (MiB)**: The peak RSS usage of the process after performing the above timing benchmark. Does not include image decoding, concurrent processing, the web server, etc., which are relatively constant factors.
**Memory (MiB)**: The peak RSS usage of the process afer performing the above timing benchmark. Does not include image decoding, concurrent processing, the web server, etc., which are relatively constant factors.
**Recall (%)**: Evaluated on Crossmodal-3600, the average of the recall@1, recall@5 and recall@10 results for zeroshot image retrieval. Chinese (Simplified), English, French, German, Italian, Japanese, Korean, Polish, Russian, Spanish and Turkish are additionally tested on XTD-10. Chinese (Simplified) and English are additionally tested on Flickr30k. The recall metrics are the average across all tested datasets.

View File

@@ -16,7 +16,7 @@ For the full list, refer to the [Immich source code](https://github.com/immich-a
| `HEIC` | `.heic` | :white_check_mark: | |
| `HEIF` | `.heif` | :white_check_mark: | |
| `JPEG 2000` | `.jp2` | :white_check_mark: | |
| `JPEG` | `.jpeg` `.jpg` `.jpe` `.insp` | :white_check_mark: | |
| `JPEG` | `.webp` `.jpg` `.jpe` `.insp` | :white_check_mark: | |
| `JPEG XL` | `.jxl` | :white_check_mark: | |
| `PNG` | `.png` | :white_check_mark: | |
| `PSD` | `.psd` | :white_check_mark: | Adobe Photoshop |

View File

@@ -1,6 +1,6 @@
# Tags
Immich supports hierarchical tags, with the ability to read existing tags from the XMP `TagsList` field and IPTC `Keywords` field. Any changes to tags made through Immich are also written back to a [sidecar](/docs/features/xmp-sidecars) file. You can re-run the metadata extraction jobs for all assets to import your existing tags.
Immich supports hierarchical tags, with the ability to read existing tags from the `TagList` and `Keywords` EXIF properties. Any changes to tags made through Immich are also written back to a [sidecar](/docs/features/xmp-sidecars) file. You can re-run the metadata extraction jobs for all assets to import your existing tags.
## Enable tags feature

View File

@@ -1,68 +1,13 @@
# XMP Sidecars
Immich supports XMP sidecar files — external `.xmp` files that store metadata for an image or video in XML format. During the metadata extraction job Immich will read & import metadata from `.xmp` files, and during the Sidecar Write job it will _write_ metadata back to `.xmp`.
Immich can ingest XMP sidecars on file upload (via the CLI) as well as detect new sidecars that are placed in the filesystem for existing images.
:::tip
Tools like Lightroom, Darktable, digiKam and other applications can also be configured to write changes to `.xmp` files, in order to avoid modifying the original file.
:::
<img src={require('./img/xmp-sidecars.webp').default} title='XMP sidecars' />
## Metadata Fields
XMP sidecars are external XML files that contain metadata related to media files. Many applications read and write these files either exclusively or in addition to the metadata written to image files. They can be a powerful tool for editing and storing metadata of a media file without modifying the media file itself. When Immich receives or detects an XMP sidecar for a media file, it will attempt to extract the metadata from both the sidecar as well as the media file. It will prioritize the metadata for fields in the sidecar but will fall back and use the metadata in the media file if necessary.
Immich does not support _all_ metadata fields. Below is a table showing what fields Immich can _read_ and _write_. It's important to note that writes do not replace the entire file contents, but are merged together with any existing fields.
When importing files via the CLI bulk uploader or parsing photo metadata for external libraries, Immich will automatically detect XMP sidecar files as files that exist next to the original media file. Immich will look files that have the same name as the photo, but with the `.xmp` file extension. The same name can either include the photo's file extension or without the photo's file extension. For example, for a photo named `PXL_20230401_203352928.MP.jpg`, Immich will look for an XMP file named either `PXL_20230401_203352928.MP.jpg.xmp` or `PXL_20230401_203352928.MP.xmp`. If both `PXL_20230401_203352928.MP.jpg.xmp` and `PXL_20230401_203352928.MP.xmp` are present, Immich will prefer `PXL_20230401_203352928.MP.jpg.xmp`.
:::info
Immich automatically queues a Sidecar Write job after editing the description, rating, or updating tags.
:::
There are 2 administrator jobs associated with sidecar files: `SYNC` and `DISCOVER`. The sync job will re-scan all media with existing sidecar files and queue them for a metadata refresh. This is a great use case when third-party applications are used to modify the metadata of media. The discover job will attempt to scan the filesystem for new sidecar files for all media that does not currently have a sidecar file associated with it.
| Metadata | Immich writes to XMP | Immich reads from XMP |
| --------------- | ------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| **Description** | `dc:description`, `tiff:ImageDescription` | `dc:description`, `tiff:ImageDescription` |
| **Rating** | `xmp:Rating` | `xmp:Rating` |
| **DateTime** | `exif:DateTimeOriginal`, `photoshop:DateCreated` | In prioritized order:<br/>`exif:SubSecDateTimeOriginal`<br/>`exif:DateTimeOriginal`<br/>`xmp:SubSecCreateDate`<br/>`xmp:CreateDate`<br/>`xmp:CreationDate`<br/>`xmp:MediaCreateDate`<br/>`xmp:SubSecMediaCreateDate`<br/>`xmp:DateTimeCreated` |
| **Location** | `exif:GPSLatitude`, `exif:GPSLongitude` | `exif:GPSLatitude`, `exif:GPSLongitude` |
| **Tags** | `digiKam:TagsList` | In prioritized order: <br/>`digiKam:TagsList`<br/>`lr:HierarchicalSubject`<br/>`IPTC:Keywords` |
:::note
All other fields (e.g. `Creator`, `Source`, IPTC, Lightroom edits) remain in the `.xmp` file and are **not searchable** in Immich.
:::
## File Naming Rules
A sidecar must share the base name of the media file:
-`IMG_0001.jpg.xmp` ← preferred
-`IMG_0001.xmp` ← fallback
-`myphoto_meta.xmp` ← not recognized
If both `.jpg.xmp` and `.xmp` are present, Immich uses the **`.jpg.xmp`** file.
## CLI Support
1. **Detect** Immich looks for a `.xmp` file placed next to each media file during upload.
2. **Copy** Both the media and the sidecar file are copied into Immichs internal library folder.
The sidecar is renamed to match the internal filename template, e.g.:
`upload/library/<user>/YYYY/YYYY-MM-DD/IMG_0001.jpg`
`upload/library/<user>/YYYY/YYYY-MM-DD/IMG_0001.jpg.xmp`
3. **Extract** Selected metadata (title, description, date, rating, tags) is parsed from the sidecar and saved to the database.
4. **Write-back** If you later update tags, rating, or description in the web UI, Immich will update **both** the database _and_ the copied `.xmp` file to stay in sync.
## External Library (Mounted Folder) Support
1. **Detect** The `DISCOVER` job automatically associates `.xmp` files that sit next to existing media files in your mounted folder. No files are moved or renamed.
2. **Extract** Immich reads and saves the same metadata fields from the sidecar to the database.
3. **Write-back** If Immich has **write access** to the mount, any future metadata edits (e.g., rating or tags) are also written back to the original `.xmp` file on disk.
:::danger
If the mount is **read-only**, Immich cannot update either the sidecar **or** the database — **metadata edits will silently fail** with no warning see issue [#10538](https://github.com/immich-app/immich/issues/10538) for more details.
:::
## Admin Jobs
Immich provides two admin jobs for managing sidecars:
| Job | What it does |
| ---------- | ------------------------------------------------------------------------------------------------- |
| `DISCOVER` | Finds new `.xmp` files next to media that dont already have one linked |
| `SYNC` | Re-reads existing `.xmp` files and refreshes metadata in the database (e.g. after external edits) |
![Sidecar Admin Jobs](./img/sidecar-jobs.webp)
<img src={require('./img/sidecar-jobs.webp').default} title='Sidecar Administrator Jobs' />

View File

@@ -27,11 +27,11 @@ After defining the locations of these files, we will edit the `docker-compose.ym
services:
immich-server:
volumes:
- ${UPLOAD_LOCATION}:/data
+ - ${THUMB_LOCATION}:/data/thumbs
+ - ${ENCODED_VIDEO_LOCATION}:/data/encoded-video
+ - ${PROFILE_LOCATION}:/data/profile
+ - ${BACKUP_LOCATION}:/data/backups
- ${UPLOAD_LOCATION}:/usr/src/app/upload
+ - ${THUMB_LOCATION}:/usr/src/app/upload/thumbs
+ - ${ENCODED_VIDEO_LOCATION}:/usr/src/app/upload/encoded-video
+ - ${PROFILE_LOCATION}:/usr/src/app/upload/profile
+ - ${BACKUP_LOCATION}:/usr/src/app/upload/backups
- /etc/localtime:/etc/localtime:ro
```
@@ -44,7 +44,7 @@ docker compose up -d
:::note
Because of the underlying properties of docker bind mounts, it is not recommended to mount the `upload/` and `library/` folders as separate bind mounts if they are on the same device.
For this reason, we mount the HDD or the network storage (NAS) to `/data` and then mount the folders we want to access under that folder.
For this reason, we mount the HDD or the network storage (NAS) to `/usr/src/app/upload` and then mount the folders we want to access under that folder.
The `thumbs/` folder contains both the small thumbnails displayed in the timeline and the larger previews shown when clicking into an image. These cannot be separated.

View File

@@ -14,14 +14,14 @@ online generators you can use.
2. Paste the link to your JSON style in either the **Light Style** or **Dark Style**. (You can add different styles which will help make the map style more appropriate depending on whether you set **Immich** to Light or Dark mode.)
3. Save your selections. Reload the map, and enjoy your custom map style!
## Use MapTiler to build a custom style
## Use Maptiler to build a custom style
Customizing the map style can be done easily using MapTiler, if you do not want to write an entire JSON document by hand.
Customizing the map style can be done easily using Maptiler, if you do not want to write an entire JSON document by hand.
1. Create a free account at https://cloud.maptiler.com
2. Once logged in, you can either create a brand new map by clicking on **New Map**, selecting a starter map, and then clicking **Customize**, OR by selecting a **Standard Map** and customizing it from there.
3. The **editor** interface is self-explanatory. You can change colors, remove visible layers, or add optional layers (e.g., administrative, topo, hydro, etc.) in the composer.
4. Once you have your map composed, click on **Save** at the top right. Give it a unique name to save it to your account.
5. Next, **Publish** your style using the **Publish** button at the top right. This will deploy it to production, which means it is able to be exposed over the Internet. MapTiler will present an interactive side-by-side map with the original and your changes prior to publication.<br/>![MapTiler Publication Settings](img/immich_map_styles_publish.webp)
6. MapTiler will warn you that changing the map will change it across all apps using the map. Since no apps are using the map yet, this is okay.
7. Clicking on the name of your new map at the top left will bring you to the item's **details** page. From here, copy the link to the JSON style under **Use vector style**. This link will automatically contain your personal API key to MapTiler.
5. Next, **Publish** your style using the **Publish** button at the top right. This will deploy it to production, which means it is able to be exposed over the Internet. Maptiler will present an interactive side-by-side map with the original and your changes prior to publication.<br/>![Maptiler Publication Settings](img/immich_map_styles_publish.webp)
6. Maptiler will warn you that changing the map will change it across all apps using the map. Since no apps are using the map yet, this is okay.
7. Clicking on the name of your new map at the top left will bring you to the item's **details** page. From here, copy the link to the JSON style under **Use vector style**. This link will automatically contain your personal API key to Maptiler.

View File

@@ -1,7 +1,7 @@
# Database Queries
:::danger
Keep in mind that mucking around in the database might set the Moon on fire. Avoid modifying the database directly when possible, and always have current backups.
Keep in mind that mucking around in the database might set the moon on fire. Avoid modifying the database directly when possible, and always have current backups.
:::
:::tip
@@ -12,131 +12,105 @@ Run `docker exec -it immich_postgres psql --dbname=<DB_DATABASE_NAME> --username
## Assets
### Name
:::note
The `"originalFileName"` column is the name of the file at time of upload, including the extension.
:::
```sql title="Find by original filename"
SELECT * FROM "asset" WHERE "originalFileName" = 'PXL_20230903_232542848.jpg';
SELECT * FROM "asset" WHERE "originalFileName" LIKE 'PXL_%'; -- all files starting with PXL_
SELECT * FROM "asset" WHERE "originalFileName" LIKE '%_2023_%'; -- all files with _2023_ in the middle
SELECT * FROM "assets" WHERE "originalFileName" = 'PXL_20230903_232542848.jpg';
SELECT * FROM "assets" WHERE "originalFileName" LIKE 'PXL_%'; -- all files starting with PXL_
SELECT * FROM "assets" WHERE "originalFileName" LIKE '%_2023_%'; -- all files with _2023_ in the middle
```
```sql title="Find by path"
SELECT * FROM "asset" WHERE "originalPath" = 'upload/library/admin/2023/2023-09-03/PXL_2023.jpg';
SELECT * FROM "asset" WHERE "originalPath" LIKE 'upload/library/admin/2023/%';
SELECT * FROM "assets" WHERE "originalPath" = 'upload/library/admin/2023/2023-09-03/PXL_2023.jpg';
SELECT * FROM "assets" WHERE "originalPath" LIKE 'upload/library/admin/2023/%';
```
### ID
```sql title="Find by ID"
SELECT * FROM "asset" WHERE "id" = '9f94e60f-65b6-47b7-ae44-a4df7b57f0e9';
SELECT * FROM "assets" WHERE "id" = '9f94e60f-65b6-47b7-ae44-a4df7b57f0e9';
```
```sql title="Find by partial ID"
SELECT * FROM "asset" WHERE "id"::text LIKE '%ab431d3a%';
SELECT * FROM "assets" WHERE "id"::text LIKE '%ab431d3a%';
```
### Checksum
:::note
You can calculate the checksum for a particular file by using the command `sha1sum <filename>`.
:::
```sql title="Find by checksum (SHA-1)"
SELECT encode("checksum", 'hex') FROM "asset";
SELECT * FROM "asset" WHERE "checksum" = decode('69de19c87658c4c15d9cacb9967b8e033bf74dd1', 'hex');
SELECT * FROM "asset" WHERE "checksum" = '\x69de19c87658c4c15d9cacb9967b8e033bf74dd1'; -- alternate notation
SELECT encode("checksum", 'hex') FROM "assets";
SELECT * FROM "assets" WHERE "checksum" = decode('69de19c87658c4c15d9cacb9967b8e033bf74dd1', 'hex');
SELECT * FROM "assets" WHERE "checksum" = '\x69de19c87658c4c15d9cacb9967b8e033bf74dd1'; -- alternate notation
```
```sql title="Find duplicate assets with identical checksum (SHA-1) (excluding trashed files)"
SELECT T1."checksum", array_agg(T2."id") ids FROM "asset" T1
INNER JOIN "asset" T2 ON T1."checksum" = T2."checksum" AND T1."id" != T2."id" AND T2."deletedAt" IS NULL
SELECT T1."checksum", array_agg(T2."id") ids FROM "assets" T1
INNER JOIN "assets" T2 ON T1."checksum" = T2."checksum" AND T1."id" != T2."id" AND T2."deletedAt" IS NULL
WHERE T1."deletedAt" IS NULL GROUP BY T1."checksum";
```
### Metadata
```sql title="Live photos"
SELECT * FROM "asset" WHERE "livePhotoVideoId" IS NOT NULL;
SELECT * FROM "assets" WHERE "livePhotoVideoId" IS NOT NULL;
```
```sql title="By description"
SELECT "asset".*, "asset_exif"."description" FROM "asset_exif"
JOIN "asset" ON "asset"."id" = "asset_exif"."assetId"
WHERE TRIM("asset_exif"."description") <> ''; -- all files with a description
SELECT "asset".*, "asset_exif"."description" FROM "asset_exif"
JOIN "asset" ON "asset"."id" = "asset_exif"."assetId"
WHERE "asset_exif"."description" ILIKE '%string to match%'; -- search by string
SELECT "assets".*, "exif"."description" FROM "exif"
JOIN "assets" ON "assets"."id" = "exif"."assetId"
WHERE TRIM("exif"."description") <> ''; -- all files with a description
SELECT "assets".*, "exif"."description" FROM "exif"
JOIN "assets" ON "assets"."id" = "exif"."assetId"
WHERE "exif"."description" ILIKE '%string to match%'; -- search by string
```
```sql title="Without metadata"
SELECT "asset".* FROM "asset_exif"
LEFT JOIN "asset" ON "asset"."id" = "asset_exif"."assetId"
WHERE "asset_exif"."assetId" IS NULL;
SELECT "assets".* FROM "exif"
LEFT JOIN "assets" ON "assets"."id" = "exif"."assetId"
WHERE "exif"."assetId" IS NULL;
```
```sql title="size < 100,000 bytes, smallest to largest"
SELECT * FROM "asset"
JOIN "asset_exif" ON "asset"."id" = "asset_exif"."assetId"
WHERE "asset_exif"."fileSizeInByte" < 100000
ORDER BY "asset_exif"."fileSizeInByte" ASC;
SELECT * FROM "assets"
JOIN "exif" ON "assets"."id" = "exif"."assetId"
WHERE "exif"."fileSizeInByte" < 100000
ORDER BY "exif"."fileSizeInByte" ASC;
```
### Type
```sql title="Without thumbnails"
SELECT * FROM "assets" WHERE "assets"."previewPath" IS NULL OR "assets"."thumbnailPath" IS NULL;
```
```sql title="By type"
SELECT * FROM "asset" WHERE "asset"."type" = 'VIDEO';
SELECT * FROM "asset" WHERE "asset"."type" = 'IMAGE';
SELECT * FROM "assets" WHERE "assets"."type" = 'VIDEO';
SELECT * FROM "assets" WHERE "assets"."type" = 'IMAGE';
```
```sql title="Count by type"
SELECT "asset"."type", COUNT(*) FROM "asset" GROUP BY "asset"."type";
SELECT "assets"."type", COUNT(*) FROM "assets" GROUP BY "assets"."type";
```
```sql title="Count by type (per user)"
SELECT "user"."email", "asset"."type", COUNT(*) FROM "asset"
JOIN "user" ON "asset"."ownerId" = "user"."id"
GROUP BY "asset"."type", "user"."email" ORDER BY "user"."email";
SELECT "users"."email", "assets"."type", COUNT(*) FROM "assets"
JOIN "users" ON "assets"."ownerId" = "users"."id"
GROUP BY "assets"."type", "users"."email" ORDER BY "users"."email";
```
## Tags
```sql title="Count by tag"
SELECT "t"."value" AS "tag_name", COUNT(*) AS "number_assets" FROM "tag" "t"
JOIN "tag_asset" "ta" ON "t"."id" = "ta"."tagsId" JOIN "asset" "a" ON "ta"."assetsId" = "a"."id"
WHERE "a"."visibility" != 'hidden'
GROUP BY "t"."value" ORDER BY "number_assets" DESC;
```
```sql title="Count by tag (per user)"
SELECT "t"."value" AS "tag_name", "u"."email" as "user_email", COUNT(*) AS "number_assets" FROM "tag" "t"
JOIN "tag_asset" "ta" ON "t"."id" = "ta"."tagsId" JOIN "asset" "a" ON "ta"."assetsId" = "a"."id" JOIN "user" "u" ON "a"."ownerId" = "u"."id"
WHERE "a"."visibility" != 'hidden'
GROUP BY "t"."value", "u"."email" ORDER BY "number_assets" DESC;
```sql title="Failed file movements"
SELECT * FROM "move_history";
```
## Users
```sql title="List all users"
SELECT * FROM "user";
SELECT * FROM "users";
```
```sql title="Get owner info from asset ID"
SELECT "user".* FROM "user" JOIN "asset" ON "user"."id" = "asset"."ownerId" WHERE "asset"."id" = 'fa310b01-2f26-4b7a-9042-d578226e021f';
SELECT "users".* FROM "users" JOIN "assets" ON "users"."id" = "assets"."ownerId" WHERE "assets"."id" = 'fa310b01-2f26-4b7a-9042-d578226e021f';
```
## Persons
```sql title="Delete person and unset it for the faces it was associated with"
DELETE FROM "person" WHERE "name" = 'PersonNameHere';
```
## System
### Config
## System Config
```sql title="Custom settings"
SELECT "key", "value" FROM "system_metadata" WHERE "key" = 'system-config';
@@ -144,17 +118,10 @@ SELECT "key", "value" FROM "system_metadata" WHERE "key" = 'system-config';
(Only used when not using the [config file](/docs/install/config-file))
### File properties
## Persons
```sql title="Without thumbnails"
SELECT * FROM "asset"
WHERE (NOT EXISTS (SELECT 1 FROM "asset_file" WHERE "asset"."id" = "asset_file"."assetId" AND "asset_file"."type" = 'thumbnail')
OR NOT EXISTS (SELECT 1 FROM "asset_file" WHERE "asset"."id" = "asset_file"."assetId" AND "asset_file"."type" = 'preview'))
AND "asset"."visibility" = 'timeline';
```
```sql title="Failed file movements"
SELECT * FROM "move_history";
```sql title="Delete person and unset it for the faces it was associated with"
DELETE FROM "person" WHERE "name" = 'PersonNameHere';
```
## Postgres internal

View File

@@ -12,7 +12,7 @@ If you want Immich to be able to delete the images in the external library or ad
```diff
immich-server:
volumes:
- ${UPLOAD_LOCATION}:/data
- ${UPLOAD_LOCATION}:/usr/src/app/upload
+ - /home/user/photos1:/home/user/photos1:ro
+ - /mnt/photos2:/mnt/photos2:ro # you can delete this line if you only have one mount point, or you can add more lines if you have more than two
```
@@ -41,7 +41,7 @@ In the Immich web UI:
- Click Add path
<img src={require('./img/add-path-button.webp').default} width="50%" title="Add Path button" />
- Enter **/home/user/photos1** as the path and click Add
- Enter **/usr/src/app/external** as the path and click Add
<img src={require('./img/add-path-field.webp').default} width="50%" title="Add Path field" />
- Save the new path

Some files were not shown because too many files have changed in this diff Show More