Compare commits

..

1 Commits

Author SHA1 Message Date
Jason Rasmussen
8af7a9c1b9 refactor: orientation 2024-09-19 12:06:20 -04:00
1491 changed files with 57111 additions and 77106 deletions

View File

@@ -1,2 +0,0 @@
ARG BASEIMAGE=mcr.microsoft.com/devcontainers/typescript-node:22@sha256:9791f4aa527774bc370c6bd2f6705ce5a686f1e6f204badd8dfaacce28c631ae
FROM ${BASEIMAGE}

View File

@@ -1,20 +0,0 @@
{
"name": "Immich devcontainers",
"build": {
"dockerfile": "Dockerfile",
"args": {
"BASEIMAGE": "mcr.microsoft.com/devcontainers/typescript-node:22"
}
},
"customizations": {
"vscode": {
"extensions": [
"svelte.svelte-vscode"
]
}
},
"forwardPorts": [],
"postCreateCommand": "make install-all",
"remoteUser": "node"
}

7
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,7 @@
version: 2
updates:
# Maintain dependencies for GitHub Actions
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"

4
.github/release.yml vendored
View File

@@ -4,10 +4,6 @@ changelog:
labels:
- changelog:breaking-change
- title: 🫥 Deprecated Changes
labels:
- changelog:deprecated
- title: 🔒 Security
labels:
- changelog:security

View File

@@ -59,7 +59,7 @@ jobs:
uses: docker/setup-qemu-action@v3.2.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3.8.0
uses: docker/setup-buildx-action@v3.6.1
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
@@ -88,7 +88,7 @@ jobs:
type=raw,value=latest,enable=${{ github.event_name == 'release' }}
- name: Build and push image
uses: docker/build-push-action@v6.10.0
uses: docker/build-push-action@v6.7.0
with:
file: cli/Dockerfile
platforms: linux/amd64,linux/arm64

View File

@@ -22,7 +22,7 @@ concurrency:
jobs:
cleanup-images:
name: Cleanup Stale Images Tags for ${{ matrix.primary-name }}
runs-on: ubuntu-24.04
runs-on: ubuntu-22.04
strategy:
fail-fast: false
matrix:
@@ -35,7 +35,7 @@ jobs:
steps:
- name: Clean temporary images
if: "${{ env.TOKEN != '' }}"
uses: stumpylog/image-cleaner-action/ephemeral@v0.9.0
uses: stumpylog/image-cleaner-action/ephemeral@v0.8.0
with:
token: "${{ env.TOKEN }}"
owner: "immich-app"
@@ -48,7 +48,7 @@ jobs:
cleanup-untagged-images:
name: Cleanup Untagged Images Tags for ${{ matrix.primary-name }}
runs-on: ubuntu-24.04
runs-on: ubuntu-22.04
needs:
- cleanup-images
strategy:
@@ -64,7 +64,7 @@ jobs:
steps:
- name: Clean untagged images
if: "${{ env.TOKEN != '' }}"
uses: stumpylog/image-cleaner-action/untagged@v0.9.0
uses: stumpylog/image-cleaner-action/untagged@v0.8.0
with:
token: "${{ env.TOKEN }}"
owner: "immich-app"

View File

@@ -33,7 +33,6 @@ jobs:
- 'server/**'
- 'openapi/**'
- 'web/**'
- 'i18n/**'
machine-learning:
- 'machine-learning/**'
@@ -125,7 +124,7 @@ jobs:
uses: docker/setup-qemu-action@v3.2.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3.8.0
uses: docker/setup-buildx-action@v3.6.1
- name: Login to Docker Hub
# Only push to Docker Hub when making a release
@@ -174,7 +173,7 @@ jobs:
fi
- name: Build and push image
uses: docker/build-push-action@v6.10.0
uses: docker/build-push-action@v6.7.0
with:
context: ${{ env.context }}
file: ${{ env.file }}
@@ -216,7 +215,7 @@ jobs:
uses: docker/setup-qemu-action@v3.2.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3.8.0
uses: docker/setup-buildx-action@v3.6.1
- name: Login to Docker Hub
# Only push to Docker Hub when making a release
@@ -265,7 +264,7 @@ jobs:
fi
- name: Build and push image
uses: docker/build-push-action@v6.10.0
uses: docker/build-push-action@v6.7.0
with:
context: ${{ env.context }}
file: ${{ env.file }}

View File

@@ -27,7 +27,7 @@ jobs:
- 'docs/**'
- name: Check if we should force jobs to run
id: should_force
run: echo "should_force=${{ github.event_name == 'release' || github.ref_name == 'main' }}" >> "$GITHUB_OUTPUT"
run: echo "should_force=${{ github.event_name == 'release' }}" >> "$GITHUB_OUTPUT"
build:
name: Docs Build

View File

@@ -23,7 +23,7 @@ jobs:
tg_version: "0.58.12"
tofu_version: "1.7.1"
tg_dir: "deployment/modules/cloudflare/docs"
tg_command: "destroy -refresh=false"
tg_command: "destroy"
- name: Comment
uses: actions-cool/maintain-one-comment@v3

View File

@@ -1,52 +0,0 @@
name: Fix formatting
on:
pull_request:
types: [labeled]
jobs:
fix-formatting:
runs-on: ubuntu-latest
if: ${{ github.event.label.name == 'fix:formatting' }}
permissions:
pull-requests: write
steps:
- name: Generate a token
id: generate-token
uses: actions/create-github-app-token@v1
with:
app-id: ${{ secrets.PUSH_O_MATIC_APP_ID }}
private-key: ${{ secrets.PUSH_O_MATIC_APP_KEY }}
- name: 'Checkout'
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.ref }}
token: ${{ steps.generate-token.outputs.token }}
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version-file: './server/.nvmrc'
- name: Fix formatting
run: make install-all && make format-all
- name: Commit and push
uses: EndBug/add-and-commit@v9
with:
default_author: github_actions
message: 'chore: fix formatting'
- name: Remove label
uses: actions/github-script@v7
if: always()
with:
script: |
github.rest.issues.removeLabel({
issue_number: context.payload.pull_request.number,
owner: context.repo.owner,
repo: context.repo.repo,
name: 'fix:formatting'
})

View File

@@ -9,7 +9,7 @@ jobs:
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
pull-requests: read
steps:
- name: Require PR to have a changelog label
uses: mheap/github-action-required-labels@v5
@@ -19,4 +19,3 @@ jobs:
use_regex: true
labels: "changelog:.*"
add_comment: true
message: "Label error. Requires {{errorString}} {{count}} of: {{ provided }}. Found: {{ applied }}. A maintainer will add the required label."

View File

@@ -9,7 +9,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: PR Conventional Commit Validation
uses: ytanikin/PRConventionalCommits@1.3.0
uses: ytanikin/PRConventionalCommits@1.2.0
with:
task_types: '["feat","fix","docs","test","ci","refactor","perf","chore","revert"]'
add_label: 'false'

View File

@@ -30,7 +30,6 @@ jobs:
filters: |
web:
- 'web/**'
- 'i18n/**'
- 'open-api/typescript-sdk/**'
server:
- 'server/**'
@@ -81,7 +80,7 @@ jobs:
run: npm run check
if: ${{ !cancelled() }}
- name: Run small tests & coverage
- name: Run unit tests & coverage
run: npm run test:cov
if: ${{ !cancelled() }}
@@ -244,26 +243,6 @@ jobs:
run: npm run check
if: ${{ !cancelled() }}
medium-tests-server:
name: Medium Tests (Server)
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_server == 'true' }}
runs-on: mich
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
submodules: 'recursive'
- name: Production build
if: ${{ !cancelled() }}
run: docker compose -f e2e/docker-compose.yml build
- name: Run medium tests
if: ${{ !cancelled() }}
run: make test-medium
e2e-tests-server-cli:
name: End-to-End Tests (Server & CLI)
needs: pre-job

2
.gitignore vendored
View File

@@ -21,5 +21,3 @@ mobile/openapi/.openapi-generator/FILES
open-api/typescript-sdk/build
mobile/android/fastlane/report.xml
mobile/ios/fastlane/report.xml
vite.config.js.timestamp-*

View File

@@ -41,4 +41,4 @@
"explorer.fileNesting.patterns": {
"*.ts": "${capture}.spec.ts,${capture}.mock.ts"
}
}
}

View File

@@ -39,7 +39,7 @@ attach-server:
renovate:
LOG_LEVEL=debug npx renovate --platform=local --repository-cache=reset
MODULES = e2e server web cli sdk docs
MODULES = e2e server web cli sdk
audit-%:
npm --prefix $(subst sdk,open-api/typescript-sdk,$*) audit fix
@@ -48,9 +48,11 @@ install-%:
build-cli: build-sdk
build-web: build-sdk
build-%: install-%
npm --prefix $(subst sdk,open-api/typescript-sdk,$*) run build
npm --prefix $(subst sdk,open-api/typescript-sdk,$*) run | grep 'build' >/dev/null \
&& npm --prefix $(subst sdk,open-api/typescript-sdk,$*) run build || true
format-%:
npm --prefix $* run format:fix
npm --prefix $(subst sdk,open-api/typescript-sdk,$*) run | grep 'format:fix' >/dev/null \
&& npm --prefix $(subst sdk,open-api/typescript-sdk,$*) run format:fix || true
lint-%:
npm --prefix $* run lint:fix
check-%:
@@ -64,27 +66,15 @@ test-e2e:
docker compose -f ./e2e/docker-compose.yml build
npm --prefix e2e run test
npm --prefix e2e run test:web
test-medium:
docker run \
--rm \
-v ./server/src:/usr/src/app/src \
-v ./server/test:/usr/src/app/test \
-v ./server/vitest.config.medium.mjs:/usr/src/app/vitest.config.medium.mjs \
-v ./server/tsconfig.json:/usr/src/app/tsconfig.json \
-e NODE_ENV=development \
immich-server:latest \
-c "npm ci && npm run test:medium -- --run"
test-medium-dev:
docker exec -it immich_server /bin/sh -c "npm run test:medium"
build-all: $(foreach M,$(filter-out e2e,$(MODULES)),build-$M) ;
build-all: $(foreach M,$(MODULES),build-$M) ;
install-all: $(foreach M,$(MODULES),install-$M) ;
check-all: $(foreach M,$(filter-out sdk cli docs,$(MODULES)),check-$M) ;
lint-all: $(foreach M,$(filter-out sdk docs,$(MODULES)),lint-$M) ;
format-all: $(foreach M,$(filter-out sdk,$(MODULES)),format-$M) ;
check-all: $(foreach M,$(MODULES),check-$M) ;
lint-all: $(foreach M,$(MODULES),lint-$M) ;
format-all: $(foreach M,$(MODULES),format-$M) ;
audit-all: $(foreach M,$(MODULES),audit-$M) ;
hygiene-all: lint-all format-all check-all sql audit-all;
test-all: $(foreach M,$(filter-out sdk docs,$(MODULES)),test-$M) ;
test-all: $(foreach M,$(MODULES),test-$M) ;
clean:
find . -name "node_modules" -type d -prune -exec rm -rf '{}' +

View File

@@ -17,24 +17,23 @@
<img src="design/immich-screenshots.png" title="Main Screenshot">
</a>
<br/>
<p align="center">
<a href="readme_i18n/README_ca_ES.md">Català</a>
<a href="readme_i18n/README_es_ES.md">Español</a>
<a href="readme_i18n/README_fr_FR.md">Français</a>
<a href="readme_i18n/README_it_IT.md">Italiano</a>
<a href="readme_i18n/README_ja_JP.md">日本語</a>
<a href="readme_i18n/README_ko_KR.md">한국어</a>
<a href="readme_i18n/README_de_DE.md">Deutsch</a>
<a href="readme_i18n/README_nl_NL.md">Nederlands</a>
<a href="readme_i18n/README_tr_TR.md">Türkçe</a>
<a href="readme_i18n/README_zh_CN.md">中文</a>
<a href="readme_i18n/README_ru_RU.md">Русский</a>
<a href="readme_i18n/README_pt_BR.md">Português Brasileiro</a>
<a href="readme_i18n/README_sv_SE.md">Svenska</a>
<a href="readme_i18n/README_ar_JO.md">العربية</a>
<a href="readme_i18n/README_vi_VN.md">Tiếng Việt</a>
<a href="readme_i18n/README_th_TH.md">ภาษาไทย</a>
<a href="readme_i18n/README_ca_ES.md">Català</a>
<a href="readme_i18n/README_es_ES.md">Español</a>
<a href="readme_i18n/README_fr_FR.md">Français</a>
<a href="readme_i18n/README_it_IT.md">Italiano</a>
<a href="readme_i18n/README_ja_JP.md">日本語</a>
<a href="readme_i18n/README_ko_KR.md">한국어</a>
<a href="readme_i18n/README_de_DE.md">Deutsch</a>
<a href="readme_i18n/README_nl_NL.md">Nederlands</a>
<a href="readme_i18n/README_tr_TR.md">Türkçe</a>
<a href="readme_i18n/README_zh_CN.md">中文</a>
<a href="readme_i18n/README_ru_RU.md">Русский</a>
<a href="readme_i18n/README_pt_BR.md">Português Brasileiro</a>
<a href="readme_i18n/README_sv_SE.md">Svenska</a>
<a href="readme_i18n/README_ar_JO.md">العربية</a>
</p>
## Disclaimer
@@ -102,8 +101,6 @@ For the mobile app, you can use `https://demo.immich.app/api` for the `Server En
| Offline support | Yes | No |
| Read-only gallery | Yes | Yes |
| Stacked Photos | Yes | Yes |
| Tags | No | Yes |
| Folder View | No | Yes |
## Translations

View File

@@ -1 +1 @@
22.12.0
20.17.0

View File

@@ -1,4 +1,4 @@
FROM node:22.12.0-alpine3.20@sha256:96cc8323e25c8cc6ddcb8b965e135cfd57846e8003ec0d7bcec16c5fd5f6d39f AS core
FROM node:20.17.0-alpine3.20@sha256:2d07db07a2df6830718ae2a47db6fedce6745f5bcd174c398f2acdda90a11c03 AS core
WORKDIR /usr/src/open-api/typescript-sdk
COPY open-api/typescript-sdk/package*.json open-api/typescript-sdk/tsconfig*.json ./

850
cli/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{
"name": "@immich/cli",
"version": "2.2.37",
"version": "2.2.19",
"description": "Command Line Interface (CLI) for Immich",
"type": "module",
"exports": "./dist/index.js",
@@ -20,17 +20,17 @@
"@types/cli-progress": "^3.11.0",
"@types/lodash-es": "^4.17.12",
"@types/mock-fs": "^4.13.1",
"@types/node": "^22.10.2",
"@typescript-eslint/eslint-plugin": "^8.15.0",
"@typescript-eslint/parser": "^8.15.0",
"@types/node": "^20.16.5",
"@typescript-eslint/eslint-plugin": "^8.0.0",
"@typescript-eslint/parser": "^8.0.0",
"@vitest/coverage-v8": "^2.0.5",
"byte-size": "^9.0.0",
"cli-progress": "^3.12.0",
"commander": "^12.0.0",
"eslint": "^9.14.0",
"eslint": "^9.0.0",
"eslint-config-prettier": "^9.1.0",
"eslint-plugin-prettier": "^5.1.3",
"eslint-plugin-unicorn": "^56.0.1",
"eslint-plugin-unicorn": "^55.0.0",
"globals": "^15.9.0",
"mock-fs": "^5.2.0",
"prettier": "^3.2.5",
@@ -39,7 +39,7 @@
"vite": "^5.0.12",
"vite-tsconfig-paths": "^5.0.0",
"vitest": "^2.0.5",
"vitest-fetch-mock": "^0.4.0",
"vitest-fetch-mock": "^0.3.0",
"yaml": "^2.3.1"
},
"scripts": {
@@ -67,6 +67,6 @@
"lodash-es": "^4.17.21"
},
"volta": {
"node": "22.12.0"
"node": "20.17.0"
}
}

View File

@@ -1,6 +1,5 @@
import {
Action,
AssetBulkUploadCheckItem,
AssetBulkUploadCheckResult,
AssetMediaResponseDto,
AssetMediaStatus,
@@ -12,7 +11,7 @@ import {
getSupportedMediaTypes,
} from '@immich/sdk';
import byteSize from 'byte-size';
import { MultiBar, Presets, SingleBar } from 'cli-progress';
import { Presets, SingleBar } from 'cli-progress';
import { chunk } from 'lodash-es';
import { Stats, createReadStream } from 'node:fs';
import { stat, unlink } from 'node:fs/promises';
@@ -91,23 +90,23 @@ export const checkForDuplicates = async (files: string[], { concurrency, skipHas
return { newFiles: files, duplicates: [] };
}
const multiBar = new MultiBar(
{ format: '{message} | {bar} | {percentage}% | ETA: {eta}s | {value}/{total} assets' },
const progressBar = new SingleBar(
{ format: 'Checking files | {bar} | {percentage}% | ETA: {eta}s | {value}/{total} assets' },
Presets.shades_classic,
);
const hashProgressBar = multiBar.create(files.length, 0, { message: 'Hashing files ' });
const checkProgressBar = multiBar.create(files.length, 0, { message: 'Checking for duplicates' });
progressBar.start(files.length, 0);
const newFiles: string[] = [];
const duplicates: Asset[] = [];
const checkBulkUploadQueue = new Queue<AssetBulkUploadCheckItem[], void>(
async (assets: AssetBulkUploadCheckItem[]) => {
const response = await checkBulkUpload({ assetBulkUploadCheckDto: { assets } });
const queue = new Queue<string[], AssetBulkUploadCheckResults>(
async (filepaths: string[]) => {
const dto = await Promise.all(
filepaths.map(async (filepath) => ({ id: filepath, checksum: await sha1(filepath) })),
);
const response = await checkBulkUpload({ assetBulkUploadCheckDto: { assets: dto } });
const results = response.results as AssetBulkUploadCheckResults;
for (const { id: filepath, assetId, action } of results) {
if (action === Action.Accept) {
newFiles.push(filepath);
@@ -116,46 +115,19 @@ export const checkForDuplicates = async (files: string[], { concurrency, skipHas
duplicates.push({ id: assetId as string, filepath });
}
}
checkProgressBar.increment(assets.length);
},
{ concurrency, retry: 3 },
);
const results: { id: string; checksum: string }[] = [];
let checkBulkUploadRequests: AssetBulkUploadCheckItem[] = [];
const queue = new Queue<string, AssetBulkUploadCheckItem[]>(
async (filepath: string): Promise<AssetBulkUploadCheckItem[]> => {
const dto = { id: filepath, checksum: await sha1(filepath) };
results.push(dto);
checkBulkUploadRequests.push(dto);
if (checkBulkUploadRequests.length === 5000) {
const batch = checkBulkUploadRequests;
checkBulkUploadRequests = [];
void checkBulkUploadQueue.push(batch);
}
hashProgressBar.increment();
progressBar.increment(filepaths.length);
return results;
},
{ concurrency, retry: 3 },
);
for (const item of files) {
void queue.push(item);
for (const items of chunk(files, concurrency)) {
await queue.push(items);
}
await queue.drained();
if (checkBulkUploadRequests.length > 0) {
void checkBulkUploadQueue.push(checkBulkUploadRequests);
}
await checkBulkUploadQueue.drained();
multiBar.stop();
progressBar.stop();
console.log(`Found ${newFiles.length} new files and ${duplicates.length} duplicate${s(duplicates.length)}`);
@@ -229,8 +201,8 @@ export const uploadFiles = async (files: string[], { dryRun, concurrency }: Uplo
{ concurrency, retry: 3 },
);
for (const item of files) {
void queue.push(item);
for (const filepath of files) {
await queue.push(filepath);
}
await queue.drained();

View File

@@ -72,8 +72,8 @@ export class Queue<T, R> {
* @returns Promise<void> - The returned Promise will be resolved when all tasks in the queue have been processed by a worker.
* This promise could be ignored as it will not lead to a `unhandledRejection`.
*/
drained(): Promise<void> {
return this.queue.drained();
async drained(): Promise<void> {
await this.queue.drain();
}
/**

View File

@@ -115,7 +115,17 @@ const tests: Test[] = [
'/albums/image3.jpg': true,
},
},
{
test: 'should support globbing paths',
options: {
pathsToCrawl: ['/photos*'],
},
files: {
'/photos1/image1.jpg': true,
'/photos2/image2.jpg': true,
'/images/image3.jpg': false,
},
},
{
test: 'should crawl a single path without trailing slash',
options: {

View File

@@ -141,21 +141,25 @@ export const crawl = async (options: CrawlOptions): Promise<string[]> => {
}
}
if (patterns.length === 0) {
let searchPattern: string;
if (patterns.length === 1) {
searchPattern = patterns[0];
} else if (patterns.length === 0) {
return crawledFiles;
} else {
searchPattern = '{' + patterns.join(',') + '}';
}
const searchPatterns = patterns.map((pattern) => {
let escapedPattern = pattern;
if (recursive) {
escapedPattern = escapedPattern + '/**';
}
return `${escapedPattern}/*.{${extensions.join(',')}}`;
});
if (recursive) {
searchPattern = searchPattern + '/**/';
}
const globbedFiles = await glob(searchPatterns, {
searchPattern = `${searchPattern}/*.{${extensions.join(',')}}`;
const globbedFiles = await glob(searchPattern, {
absolute: true,
caseSensitiveMatch: false,
onlyFiles: true,
dot: includeHidden,
ignore: [`**/${exclusionPattern}`],
});

View File

@@ -2,37 +2,37 @@
# Manual edits may be lost in future updates.
provider "registry.opentofu.org/cloudflare/cloudflare" {
version = "4.48.0"
constraints = "4.48.0"
version = "4.41.0"
constraints = "4.41.0"
hashes = [
"h1:0IKUOR32xEI1suS5QCOjfxjQ2mRd058btXk8hVnaOJ4=",
"h1:3YG6vu/bFPcYOeLdSUZhiAWiWKaFlOAR34z2o8cbE9k=",
"h1:FvGy06/i9AMtVkSIUnCrXNv5xF6jqBqMH8oPVLyeeAg=",
"h1:GXH7nIF0ocMqebbA41+fSGIYfM+VAM/PvTe7fJr8UrQ=",
"h1:H0ll0ph4404vFE868W3qJ3zhOyy4jbXrOMtdkViEZsU=",
"h1:SX42e3k73IcFcrQlZ2e/Veqt2tvCMy6fwlo5yNUktCE=",
"h1:Uu/gjBc99GefdPdSrlBwU75DWU0ZcwGcrd3ZFyTeL0s=",
"h1:VZw0uN41PWRmNlhg7Ze0Eh7cdoklX1oZbfNAXNYnU1I=",
"h1:cMdV7ql6PsFa4qtb0EoZSctvTaTqV7yplBSDwcLRCLc=",
"h1:ePGvSurmlqOCkD761vkhRmz7bsK36/EnIvx2Xy8TdXo=",
"h1:fOYufF+1bzw2N3aHLpkLB6E8VbZ4ysXDODYQOlwhwd4=",
"h1:qe8RbnWq0T4xhqjn9QcbO6YW5YDx47P+eJ0NUMIfwCc=",
"h1:tRD2av6PafHDP/b9jDQsG5/aX+lHeKxpbIEHYYLBVUc=",
"h1:zyl6Gvx/CFpwYW8pFFDesfO8Lxv+a6CopyAsIMhp54s=",
"zh:04c0a49c2b23140b2f21cfd0d52f9798d70d3bdae3831613e156aabe519bbc6c",
"zh:185f21b4834ba63e8df1f84aa34639d8a7e126429a4007bb5f9ad82f2602a997",
"zh:234724f52cb4c0c3f7313d3b2697caef26d921d134f26ae14801e7afac522f7b",
"zh:38a56fcd1b3e40706af995611c977816543b53f1e55fe2720944aae2b6828fcb",
"zh:419938f5430fc78eff933470aefbf94a460a478f867cf7761a3dea177b4eb153",
"zh:4b46d92bfde1deab7de7ba1a6bbf4ba7c711e4fd925341ddf09d4cc28dae03d8",
"zh:537acd4a31c752f1bae305ba7190f60b71ad1a459f22d464f3f914336c9e919f",
"zh:5ff36b005aad07697dd0b30d4f0c35dbcdc30dc52b41722552060792fa87ce04",
"zh:635c5ee419daea098060f794d9d7d999275301181e49562c4e4c08f043076937",
"zh:859277c330d61f91abe9e799389467ca11b77131bf34bedbef52f8da68b2bb49",
"h1:0mc+YrjQrcctGrGYDmzlcqcgSv9MYB74rvMaZylIKC8=",
"h1:0zUx4vk4jOORQqn6xHBF7dO6N6bielFHdJ0mgF4Obn8=",
"h1:AsIZW3uLFNOZO7kL/K7/Y/S0IYxUV9Hz85NNk/3TTsA=",
"h1:FSgYM4+LHMbX/a4Y1kx7FPPWmXqS3/MQYzvjMJHHHWM=",
"h1:Tx6Nh3BWP1x9L3KK/Eyi+ET0T26g3+jf1jyiuqpNIis=",
"h1:VRI9wu8P43xxfpeTndRwsisLnqncfnmEYMOEH5zH4pQ=",
"h1:YxQqmiES/Yanq/VfGqBEqg+VIO7FGhO88aKoWFHyGIg=",
"h1:ZWHiaesjgDLKWlfdNj0oKyj/DWdxcfsO6NINu39zfpY=",
"h1:a2aCgDDBz3ccrr8YstIMl7VFnKo1xZAp+rOv59PPJ7U=",
"h1:aRyv8tB6wBAF9lKsLEdiHyCqnK5LfZq0FqMXCcUB4UU=",
"h1:lXpuO7zv2uD2GzPE1ARxznreRAh+QHTc2lAJ7iOoFgY=",
"h1:sA1xq0QNQ4fH8SHXouYNq50xirVD18SamKQwPsBQrrY=",
"h1:v7sHvKq7oqMYPn47ULHFyIQsKD9o+6Xg/uHbxQUixEw=",
"h1:wo/x4atWyXuWGlfR6h5nH0YwBAmBwTRY27HtWP8ycLo=",
"zh:339d26e06dc6fb299ea8aad9476a60fd65bb1d40631ae8eeb81cddf2dd2bebc8",
"zh:3dec2ad96ac2c283fd34ce65781b55c4edbb4d5c5cb53da8e31537176c0ed562",
"zh:5f63a5f8080319a2fff09d4d49944829fa708723436520787cfb60725ced80cf",
"zh:67162c28ccea71cb8141ed15c0637e35621354ebe14878e0b75a8f160fc5505d",
"zh:6ac1e07f5347b6395aca690ed22101bb25e957d25f986f760ff673a7adfd5ef6",
"zh:70282a723c7b52fcabde2baad41c864ed3a8d69f0c4d27a6b6933cac434cffc6",
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
"zh:927dfdb8d9aef37ead03fceaa29e87ba076a3dd24e19b6cefdbb0efe9987ff8c",
"zh:bbf2226f07f6b1e721877328e69ded4b64f9c196634d2e2429e3cfabbe41e532",
"zh:daeed873d6f38604232b46ee4a5830c85d195b967f8dbcafe2fcffa98daf9c5f",
"zh:f8f2fc4646c1ba44085612fa7f4dbb7cbcead43b4e661f2b98ddfb4f68afc758",
"zh:924cd23abc326c6b3914e2cd9c94c7832c2552e1e9ae258fb9fd9aedaa5f7ce7",
"zh:a4b75e4c239879296259e7d54f1befbc7fdc16da2d62d1294e9f73add4cae61e",
"zh:a6ceb08feb63b00c7141783b31e45a154c76fd8cdebbdf371074805f0053572d",
"zh:afae1843f9ba85f2f6d94108c65cf43a457e83531a632d44d863e935160cb2ba",
"zh:bd6628ce60c778960a5755f7010b7e2cc5c6ff0341a21c175341b28058ec843d",
"zh:cd30866a1ff99d72b5fa1699db582fa4f25562e6ab21dcc6870324f3056108e0",
"zh:df5924cca691a8220aaaebb5cb55c3d6c32ff0a881f198695eff28155eb12b54",
"zh:e78d0696c941aba58df1cb36b8a0d25cd5f3963f01d9338fdbda74db58afdd49",
]
}

View File

@@ -5,7 +5,7 @@ terraform {
required_providers {
cloudflare = {
source = "cloudflare/cloudflare"
version = "4.48.0"
version = "4.41.0"
}
}
}

View File

@@ -2,37 +2,37 @@
# Manual edits may be lost in future updates.
provider "registry.opentofu.org/cloudflare/cloudflare" {
version = "4.48.0"
constraints = "4.48.0"
version = "4.41.0"
constraints = "4.41.0"
hashes = [
"h1:0IKUOR32xEI1suS5QCOjfxjQ2mRd058btXk8hVnaOJ4=",
"h1:3YG6vu/bFPcYOeLdSUZhiAWiWKaFlOAR34z2o8cbE9k=",
"h1:FvGy06/i9AMtVkSIUnCrXNv5xF6jqBqMH8oPVLyeeAg=",
"h1:GXH7nIF0ocMqebbA41+fSGIYfM+VAM/PvTe7fJr8UrQ=",
"h1:H0ll0ph4404vFE868W3qJ3zhOyy4jbXrOMtdkViEZsU=",
"h1:SX42e3k73IcFcrQlZ2e/Veqt2tvCMy6fwlo5yNUktCE=",
"h1:Uu/gjBc99GefdPdSrlBwU75DWU0ZcwGcrd3ZFyTeL0s=",
"h1:VZw0uN41PWRmNlhg7Ze0Eh7cdoklX1oZbfNAXNYnU1I=",
"h1:cMdV7ql6PsFa4qtb0EoZSctvTaTqV7yplBSDwcLRCLc=",
"h1:ePGvSurmlqOCkD761vkhRmz7bsK36/EnIvx2Xy8TdXo=",
"h1:fOYufF+1bzw2N3aHLpkLB6E8VbZ4ysXDODYQOlwhwd4=",
"h1:qe8RbnWq0T4xhqjn9QcbO6YW5YDx47P+eJ0NUMIfwCc=",
"h1:tRD2av6PafHDP/b9jDQsG5/aX+lHeKxpbIEHYYLBVUc=",
"h1:zyl6Gvx/CFpwYW8pFFDesfO8Lxv+a6CopyAsIMhp54s=",
"zh:04c0a49c2b23140b2f21cfd0d52f9798d70d3bdae3831613e156aabe519bbc6c",
"zh:185f21b4834ba63e8df1f84aa34639d8a7e126429a4007bb5f9ad82f2602a997",
"zh:234724f52cb4c0c3f7313d3b2697caef26d921d134f26ae14801e7afac522f7b",
"zh:38a56fcd1b3e40706af995611c977816543b53f1e55fe2720944aae2b6828fcb",
"zh:419938f5430fc78eff933470aefbf94a460a478f867cf7761a3dea177b4eb153",
"zh:4b46d92bfde1deab7de7ba1a6bbf4ba7c711e4fd925341ddf09d4cc28dae03d8",
"zh:537acd4a31c752f1bae305ba7190f60b71ad1a459f22d464f3f914336c9e919f",
"zh:5ff36b005aad07697dd0b30d4f0c35dbcdc30dc52b41722552060792fa87ce04",
"zh:635c5ee419daea098060f794d9d7d999275301181e49562c4e4c08f043076937",
"zh:859277c330d61f91abe9e799389467ca11b77131bf34bedbef52f8da68b2bb49",
"h1:0mc+YrjQrcctGrGYDmzlcqcgSv9MYB74rvMaZylIKC8=",
"h1:0zUx4vk4jOORQqn6xHBF7dO6N6bielFHdJ0mgF4Obn8=",
"h1:AsIZW3uLFNOZO7kL/K7/Y/S0IYxUV9Hz85NNk/3TTsA=",
"h1:FSgYM4+LHMbX/a4Y1kx7FPPWmXqS3/MQYzvjMJHHHWM=",
"h1:Tx6Nh3BWP1x9L3KK/Eyi+ET0T26g3+jf1jyiuqpNIis=",
"h1:VRI9wu8P43xxfpeTndRwsisLnqncfnmEYMOEH5zH4pQ=",
"h1:YxQqmiES/Yanq/VfGqBEqg+VIO7FGhO88aKoWFHyGIg=",
"h1:ZWHiaesjgDLKWlfdNj0oKyj/DWdxcfsO6NINu39zfpY=",
"h1:a2aCgDDBz3ccrr8YstIMl7VFnKo1xZAp+rOv59PPJ7U=",
"h1:aRyv8tB6wBAF9lKsLEdiHyCqnK5LfZq0FqMXCcUB4UU=",
"h1:lXpuO7zv2uD2GzPE1ARxznreRAh+QHTc2lAJ7iOoFgY=",
"h1:sA1xq0QNQ4fH8SHXouYNq50xirVD18SamKQwPsBQrrY=",
"h1:v7sHvKq7oqMYPn47ULHFyIQsKD9o+6Xg/uHbxQUixEw=",
"h1:wo/x4atWyXuWGlfR6h5nH0YwBAmBwTRY27HtWP8ycLo=",
"zh:339d26e06dc6fb299ea8aad9476a60fd65bb1d40631ae8eeb81cddf2dd2bebc8",
"zh:3dec2ad96ac2c283fd34ce65781b55c4edbb4d5c5cb53da8e31537176c0ed562",
"zh:5f63a5f8080319a2fff09d4d49944829fa708723436520787cfb60725ced80cf",
"zh:67162c28ccea71cb8141ed15c0637e35621354ebe14878e0b75a8f160fc5505d",
"zh:6ac1e07f5347b6395aca690ed22101bb25e957d25f986f760ff673a7adfd5ef6",
"zh:70282a723c7b52fcabde2baad41c864ed3a8d69f0c4d27a6b6933cac434cffc6",
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
"zh:927dfdb8d9aef37ead03fceaa29e87ba076a3dd24e19b6cefdbb0efe9987ff8c",
"zh:bbf2226f07f6b1e721877328e69ded4b64f9c196634d2e2429e3cfabbe41e532",
"zh:daeed873d6f38604232b46ee4a5830c85d195b967f8dbcafe2fcffa98daf9c5f",
"zh:f8f2fc4646c1ba44085612fa7f4dbb7cbcead43b4e661f2b98ddfb4f68afc758",
"zh:924cd23abc326c6b3914e2cd9c94c7832c2552e1e9ae258fb9fd9aedaa5f7ce7",
"zh:a4b75e4c239879296259e7d54f1befbc7fdc16da2d62d1294e9f73add4cae61e",
"zh:a6ceb08feb63b00c7141783b31e45a154c76fd8cdebbdf371074805f0053572d",
"zh:afae1843f9ba85f2f6d94108c65cf43a457e83531a632d44d863e935160cb2ba",
"zh:bd6628ce60c778960a5755f7010b7e2cc5c6ff0341a21c175341b28058ec843d",
"zh:cd30866a1ff99d72b5fa1699db582fa4f25562e6ab21dcc6870324f3056108e0",
"zh:df5924cca691a8220aaaebb5cb55c3d6c32ff0a881f198695eff28155eb12b54",
"zh:e78d0696c941aba58df1cb36b8a0d25cd5f3963f01d9338fdbda74db58afdd49",
]
}

View File

@@ -5,7 +5,7 @@ terraform {
required_providers {
cloudflare = {
source = "cloudflare/cloudflare"
version = "4.48.0"
version = "4.41.0"
}
}
}

10
docker/.gitignore vendored
View File

@@ -1,9 +1 @@
.DS_Store
node_modules
/build
/.svelte-kit
/dist
/package
.env
.env.*
!.env.example
.env

View File

@@ -1 +0,0 @@
engine-strict=true

View File

@@ -1 +0,0 @@
22.11.0

View File

@@ -1,14 +0,0 @@
.DS_Store
node_modules
/build
/package
/coverage
.env
.env.*
!.env.example
*.md
# Ignore files for PNPM, NPM and YARN
pnpm-lock.yaml
package-lock.json
yarn.lock

View File

@@ -1,9 +0,0 @@
{
"jsonRecursiveSort": true,
"organizeImportsSkipDestructiveCodeActions": true,
"plugins": ["prettier-plugin-organize-imports", "prettier-plugin-sort-json"],
"printWidth": 120,
"semi": true,
"singleQuote": true,
"trailingComma": "all"
}

View File

@@ -36,18 +36,14 @@ services:
IMMICH_BUILD_URL: https://github.com/immich-app/immich/actions/runs/9654404849
IMMICH_BUILD_IMAGE: development
IMMICH_BUILD_IMAGE_URL: https://github.com/immich-app/immich/pkgs/container/immich-server
IMMICH_THIRD_PARTY_SOURCE_URL: https://github.com/immich-app/immich/
IMMICH_THIRD_PARTY_BUG_FEATURE_URL: https://github.com/immich-app/immich/issues
IMMICH_THIRD_PARTY_DOCUMENTATION_URL: https://immich.app/docs
IMMICH_THIRD_PARTY_SUPPORT_URL: https://immich.app/docs/third-party
ulimits:
nofile:
soft: 1048576
hard: 1048576
ports:
- 3001:3001
- 9230:9230
- 9231:9231
- 2283:2283
depends_on:
- redis
- database
@@ -57,19 +53,16 @@ services:
immich-web:
container_name: immich_web
image: immich-web-dev:latest
# Needed for rootless docker setup, see https://github.com/moby/moby/issues/45919
# user: 0:0
build:
context: ../web
command: ['/usr/src/app/bin/immich-web']
env_file:
- .env
ports:
- 3000:3000
- 2283:3000
- 24678:24678
volumes:
- ../web:/usr/src/app
- ../i18n:/usr/src/i18n
- ../open-api/:/usr/src/open-api/
- /usr/src/app/node_modules
ulimits:
@@ -106,7 +99,7 @@ services:
redis:
container_name: immich_redis
image: redis:6.2-alpine@sha256:eaba718fecd1196d88533de7ba49bf903ad33664a92debb24660a922ecd9cac8
image: redis:6.2-alpine@sha256:2d1463258f2764328496376f5d965f20c6a67f66ea2b06dc42af351f75248792
healthcheck:
test: redis-cli ping || exit 1
@@ -125,25 +118,28 @@ services:
ports:
- 5432:5432
healthcheck:
test: >-
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1;
Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align
--command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')";
echo "checksum failure count is $$Chksum";
[ "$$Chksum" = '0' ] || exit 1
test: pg_isready --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command: >-
postgres
-c shared_preload_libraries=vectors.so
-c 'search_path="$$user", public, vectors'
-c logging_collector=on
-c max_wal_size=2GB
-c shared_buffers=512MB
-c wal_compression=on
command:
[
'postgres',
'-c',
'shared_preload_libraries=vectors.so',
'-c',
'search_path="$$user", public, vectors',
'-c',
'logging_collector=on',
'-c',
'max_wal_size=2GB',
'-c',
'shared_buffers=512MB',
'-c',
'wal_compression=on',
]
# set IMMICH_TELEMETRY_INCLUDE=all in .env to enable metrics
# set IMMICH_METRICS=true in .env to enable metrics
# immich-prometheus:
# container_name: immich_prometheus
# ports:

View File

@@ -16,7 +16,7 @@ services:
env_file:
- .env
ports:
- 2283:2283
- 2283:3001
depends_on:
- redis
- database
@@ -47,7 +47,7 @@ services:
redis:
container_name: immich_redis
image: redis:6.2-alpine@sha256:eaba718fecd1196d88533de7ba49bf903ad33664a92debb24660a922ecd9cac8
image: redis:6.2-alpine@sha256:2d1463258f2764328496376f5d965f20c6a67f66ea2b06dc42af351f75248792
healthcheck:
test: redis-cli ping || exit 1
restart: always
@@ -67,31 +67,19 @@ services:
ports:
- 5432:5432
healthcheck:
test: >-
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1;
Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align
--command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')";
echo "checksum failure count is $$Chksum";
[ "$$Chksum" = '0' ] || exit 1
test: pg_isready --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command: >-
postgres
-c shared_preload_libraries=vectors.so
-c 'search_path="$$user", public, vectors'
-c logging_collector=on
-c max_wal_size=2GB
-c shared_buffers=512MB
-c wal_compression=on
command: ["postgres", "-c", "shared_preload_libraries=vectors.so", "-c", 'search_path="$$user", public, vectors', "-c", "logging_collector=on", "-c", "max_wal_size=2GB", "-c", "shared_buffers=512MB", "-c", "wal_compression=on"]
restart: always
# set IMMICH_TELEMETRY_INCLUDE=all in .env to enable metrics
# set IMMICH_METRICS=true in .env to enable metrics
immich-prometheus:
container_name: immich_prometheus
ports:
- 9090:9090
image: prom/prometheus@sha256:565ee86501224ebbb98fc10b332fa54440b100469924003359edf49cbce374bd
image: prom/prometheus@sha256:f6639335d34a77d9d9db382b92eeb7fc00934be8eae81dbc03b31cfe90411a94
volumes:
- ./prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus-data:/prometheus
@@ -103,7 +91,7 @@ services:
command: ['./run.sh', '-disable-reporting']
ports:
- 3000:3000
image: grafana/grafana:11.4.0-ubuntu@sha256:afccec22ba0e4815cca1d2bf3836e414322390dc78d77f1851976ffa8d61051c
image: grafana/grafana:11.2.0-ubuntu@sha256:8e2c13739563c3da9d45de96c6bcb63ba617cac8c571c060112c7fc8ad6914e9
volumes:
- grafana-data:/var/lib/grafana

View File

@@ -22,7 +22,7 @@ services:
env_file:
- .env
ports:
- '2283:2283'
- 2283:3001
depends_on:
- redis
- database
@@ -48,7 +48,7 @@ services:
redis:
container_name: immich_redis
image: docker.io/redis:6.2-alpine@sha256:eaba718fecd1196d88533de7ba49bf903ad33664a92debb24660a922ecd9cac8
image: docker.io/redis:6.2-alpine@sha256:2d1463258f2764328496376f5d965f20c6a67f66ea2b06dc42af351f75248792
healthcheck:
test: redis-cli ping || exit 1
restart: always
@@ -65,23 +65,11 @@ services:
# Do not edit the next line. If you want to change the database storage location on your system, edit the value of DB_DATA_LOCATION in the .env file
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data
healthcheck:
test: >-
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1;
Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align
--command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')";
echo "checksum failure count is $$Chksum";
[ "$$Chksum" = '0' ] || exit 1
test: pg_isready --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command: >-
postgres
-c shared_preload_libraries=vectors.so
-c 'search_path="$$user", public, vectors'
-c logging_collector=on
-c max_wal_size=2GB
-c shared_buffers=512MB
-c wal_compression=on
command: ["postgres", "-c", "shared_preload_libraries=vectors.so", "-c", 'search_path="$$user", public, vectors', "-c", "logging_collector=on", "-c", "max_wal_size=2GB", "-c", "shared_buffers=512MB", "-c", "wal_compression=on"]
restart: always
volumes:

View File

@@ -1,86 +0,0 @@
import { FlatCompat } from '@eslint/eslintrc';
import js from '@eslint/js';
import typescriptEslint from '@typescript-eslint/eslint-plugin';
import tsParser from '@typescript-eslint/parser';
import globals from 'globals';
import path from 'node:path';
import { fileURLToPath } from 'node:url';
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
const compat = new FlatCompat({
baseDirectory: __dirname,
recommendedConfig: js.configs.recommended,
allConfig: js.configs.all,
});
export default [
{
ignores: [
'**/.DS_Store',
'**/node_modules',
'dist',
'lib/docker-compose/types.ts',
'build',
'package',
'**/.env',
'**/.env.*',
'!**/.env.example',
'**/pnpm-lock.yaml',
'**/package-lock.json',
'**/yarn.lock',
'eslint.config.mjs',
'vite.config.js',
'coverage',
],
},
...compat.extends('eslint:recommended', 'plugin:@typescript-eslint/recommended', 'plugin:unicorn/recommended'),
{
ignores: ['src/**'],
plugins: {
'@typescript-eslint': typescriptEslint,
},
languageOptions: {
globals: {
...globals.browser,
...globals.node,
NodeJS: true,
},
parser: tsParser,
ecmaVersion: 2022,
sourceType: 'module',
parserOptions: {
tsconfigRootDir: __dirname,
project: ['./tsconfig.json'],
},
},
rules: {
'@typescript-eslint/no-unused-vars': [
'warn',
{
argsIgnorePattern: '^_$',
varsIgnorePattern: '^_$',
},
],
curly: 2,
'unicorn/no-useless-undefined': 'off',
'unicorn/prefer-spread': 'off',
'unicorn/no-null': 'off',
'unicorn/prevent-abbreviations': 'off',
'unicorn/no-nested-ternary': 'off',
'unicorn/consistent-function-scoping': 'off',
'unicorn/prefer-top-level-await': 'off',
'unicorn/import-style': 'off',
'@typescript-eslint/await-thenable': 'error',
'@typescript-eslint/no-floating-promises': 'error',
'@typescript-eslint/no-misused-promises': 'error',
'@typescript-eslint/require-await': 'error',
'object-shorthand': ['error', 'always'],
},
},
];

View File

@@ -51,4 +51,5 @@ services:
volumes:
- /usr/lib/wsl:/usr/lib/wsl
environment:
- LD_LIBRARY_PATH=/usr/lib/wsl/lib
- LIBVA_DRIVER_NAME=d3d12

View File

@@ -1,87 +0,0 @@
import { dump as dumpYaml } from 'js-yaml';
import { ComposeBuilder, ServiceBuilder } from 'lib/docker-compose/builder';
import { ContainerName, GeneratorOptions, ServiceName } from 'lib/types';
import { asQueryParams, getImmichEnvironment, getImmichVolumes, isExternalPostgres, isIoRedis } from 'lib/utils';
const RELEASE_VERSION = 'v1.122.0';
const postgresHealthCheck = [
'pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1;',
`Chksum="$$(psql --dbname="$\${POSTGRES_DB}" --username="$\${POSTGRES_USER}" --tuples-only --no-align`,
`--command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')";`,
'echo "checksum failure count is $$Chksum";',
`[ "$$Chksum" = '0' ] || exit 1\n`,
].join(' ');
const postgresCommand = [
`postgres`,
`-c shared_preload_libraries=vectors.so`,
`-c 'search_path="$$user", public, vectors'`,
`-c logging_collector=on`,
`-c max_wal_size=2GB`,
`-c shared_buffers=512MB`,
`-c wal_compression=on`,
].join(' ');
const build = (options: GeneratorOptions) => {
const healthchecksEnabled = options.healthchecks ?? true;
const containerNames = options.containerNames ?? true;
const immichService = ServiceBuilder.create(ServiceName.ImmichServer)
.setImage(`ghcr.io/immich-app/immich-server:${RELEASE_VERSION}`)
.setContainerName(containerNames && ContainerName.ImmichServer)
.setRestartPolicy('always')
.setHealthcheck(healthchecksEnabled)
.setEnvironment(getImmichEnvironment(options))
.addExposedPort(2283)
.addVolumes(getImmichVolumes(options));
const machineLearningEnabled = options.machineLearning;
const modelCacheVolume = 'model-cache';
const machineLearningService =
machineLearningEnabled &&
ServiceBuilder.create(ServiceName.ImmichMachineLearning)
.setImage(`ghcr.io/immich-app/immich-machine-learning:${RELEASE_VERSION}-cuda`)
.setContainerName(containerNames && ContainerName.ImmichMachineLearning)
.setRestartPolicy('always')
.setHealthcheck(healthchecksEnabled)
.addVolume(`${modelCacheVolume}:/cache`);
const redisService = isIoRedis(options)
? false
: ServiceBuilder.create(ServiceName.Redis)
.setImage('docker.io/redis:6.2-alpine')
.setContainerName(containerNames && ContainerName.Redis)
.setRestartPolicy('always')
.setHealthcheck(healthchecksEnabled && 'redis-cli ping || exit 1');
const postgresService = isExternalPostgres(options)
? false
: ServiceBuilder.create(ServiceName.Postgres)
.setImage('docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0')
.setContainerName(containerNames && ContainerName.Postgres)
.setRestartPolicy('always')
.setEnvironment({
POSTGRES_PASSWORD: options.postgresPassword,
POSTGRES_USER: options.postgresUser,
POSTGRES_DB: options.postgresDatabase,
POSTGRES_INITDB_ARGS: '--data-checksums',
})
.setHealthcheck(healthchecksEnabled && postgresHealthCheck)
.setCommand(postgresCommand)
.addVolume(`${options.postgresDataLocation}:/var/lib/postgresql/data`);
const domain = 'https://get.immich.app';
const url = `${domain}/compose?${asQueryParams(options)}`;
return ComposeBuilder.create('immich')
.addComment(`This docker compose file was originally generated at https://get.immich.app/compose`)
.addComment(url)
.addComment(`${dumpYaml({ options }, { indent: 2 })}`)
.addService(immichService.addDependsOn(redisService).addDependsOn(postgresService))
.addService(machineLearningService)
.addService(redisService)
.addService(postgresService)
.addVolume(modelCacheVolume, machineLearningEnabled && {});
};
export const buildSpec = (options: GeneratorOptions) => build(options).asSpec();
export const buildYaml = (options: GeneratorOptions) => build(options).asYaml();

View File

@@ -1,208 +0,0 @@
import { dump as dumpYaml } from 'js-yaml';
import {
Command,
ComposeSpecification,
DefinitionsService,
DefinitionsVolume,
ListOfStrings,
} from 'lib/docker-compose/types';
type ServiceNameAccessor = { getName: () => string };
type ServiceBuildAccessor = { build: () => DefinitionsService };
const withNewLines = (yaml: string) =>
yaml.replaceAll(/(?<leading>[^:]\n)(?<key>[ ]{0,2}\S+:)$/gm, '$<leading>\n$<key>');
export class ComposeBuilder {
private spec: ComposeSpecification = {};
private comments: string[] = [];
private constructor(projectName?: string) {
if (projectName) {
this.setProjectName(projectName);
}
}
static create(projectName?: string) {
return new ComposeBuilder(projectName);
}
setProjectName(name: string) {
this.spec.name = name;
return this;
}
addComment(comment: string) {
this.comments.push(comment + '\n');
return this;
}
addService(spec: false | (ServiceNameAccessor & ServiceBuildAccessor)) {
if (!spec) {
return this;
}
if (!this.spec.services) {
this.spec.services = {};
}
this.spec.services[spec.getName()] = spec.build();
return this;
}
addVolume(name: string, volume: false | DefinitionsVolume) {
if (volume === false) {
return this;
}
if (!this.spec.volumes) {
this.spec.volumes = {};
}
this.spec.volumes[name] = volume;
return this;
}
asSpec() {
return this.spec;
}
asYaml() {
let prefix = '';
if (this.comments.length > 0) {
const comments =
this.comments
.flatMap((comment) => comment.split('\n'))
.join('\n')
.trim()
.split('\n')
.map((comment) => `# ${comment}`)
.join('\n') + '\n\n';
prefix += comments;
}
const spec = withNewLines(dumpYaml(this.spec, { indent: 2, lineWidth: 140 })).trim();
return prefix + spec;
}
}
export class ServiceBuilder {
private spec: DefinitionsService = {};
private constructor(private name: string) {}
static create(name: string) {
return new ServiceBuilder(name);
}
getName() {
return this.name;
}
setImage(image: string) {
this.spec.image = image;
return this;
}
setContainerName(name: false | string) {
if (name === false) {
return this;
}
this.spec.container_name = name;
return this;
}
addExposedPort(port: number | { internal: number; external: number }) {
if (typeof port === 'number') {
port = { internal: port, external: port };
}
const { internal, external } = port;
if (!this.spec.ports) {
this.spec.ports = [];
}
this.spec.ports.push(`${external}:${internal}`);
return this;
}
addDependsOn(service: false | string | ServiceNameAccessor) {
if (service === false) {
return this;
}
let serviceName = service as string;
if ('getName' in (service as ServiceNameAccessor)) {
serviceName = (service as ServiceNameAccessor).getName();
}
if (!this.spec.depends_on) {
this.spec.depends_on = [];
}
(this.spec.depends_on as ListOfStrings).push(serviceName);
return this;
}
setRestartPolicy(restart: string) {
this.spec.restart = restart;
return this;
}
setEnvironment(env: Record<string, string | number | undefined>) {
this.spec.environment = env;
return this;
}
setHealthcheck(test: boolean | string) {
if (test === true) {
return this;
}
if (test === false) {
this.spec.healthcheck = { disable: true };
return this;
}
this.spec.healthcheck = { test };
return this;
}
setCommand(command: Command) {
this.spec.command = command;
return this;
}
addVolume(volume: string) {
if (!this.spec.volumes) {
this.spec.volumes = [];
}
this.spec.volumes.push(volume);
return this;
}
addVolumes(volumes: string[]) {
for (const volume of volumes) {
this.addVolume(volume);
}
return this;
}
build() {
return this.spec;
}
}

View File

@@ -1,937 +0,0 @@
export type DefinitionsInclude =
| string
| {
path?: StringOrList;
env_file?: StringOrList;
project_directory?: string;
};
export type StringOrList = string | ListOfStrings;
export type ListOfStrings = string[];
export type DefinitionsDevelopment = {
watch?: {
ignore?: string[];
path: string;
action: 'rebuild' | 'sync' | 'sync+restart';
target?: string;
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
}[];
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
} & Development;
export type Development = {
watch?: {
ignore?: string[];
path: string;
action: 'rebuild' | 'sync' | 'sync+restart';
target?: string;
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
}[];
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
} | null;
export type DefinitionsDeployment = {
mode?: string;
endpoint_mode?: string;
replicas?: number | string;
labels?: ListOrDict;
rollback_config?: {
parallelism?: number | string;
delay?: string;
failure_action?: string;
monitor?: string;
max_failure_ratio?: number | string;
order?: 'start-first' | 'stop-first';
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
};
update_config?: {
parallelism?: number | string;
delay?: string;
failure_action?: string;
monitor?: string;
max_failure_ratio?: number | string;
order?: 'start-first' | 'stop-first';
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
};
resources?: {
limits?: {
cpus?: number | string;
memory?: string;
pids?: number | string;
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
};
reservations?: {
cpus?: number | string;
memory?: string;
generic_resources?: DefinitionsGenericResources;
devices?: DefinitionsDevices;
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
};
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
};
restart_policy?: {
condition?: string;
delay?: string;
max_attempts?: number | string;
window?: string;
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
};
placement?: {
constraints?: string[];
preferences?: {
spread?: string;
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
}[];
max_replicas_per_node?: number | string;
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
};
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
} & Deployment;
export type ListOrDict =
| {
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` ".+".
*/
[k: string]: undefined | string | number | boolean | null;
}
| string[];
export type DefinitionsGenericResources = {
discrete_resource_spec?: {
kind?: string;
value?: number | string;
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
};
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
}[];
export type DefinitionsDevices = {
capabilities: ListOfStrings;
count?: string | number;
device_ids?: ListOfStrings;
driver?: string;
options?: ListOrDict;
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
}[];
export type Deployment = {
mode?: string;
endpoint_mode?: string;
replicas?: number | string;
labels?: ListOrDict;
rollback_config?: {
parallelism?: number | string;
delay?: string;
failure_action?: string;
monitor?: string;
max_failure_ratio?: number | string;
order?: 'start-first' | 'stop-first';
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
};
update_config?: {
parallelism?: number | string;
delay?: string;
failure_action?: string;
monitor?: string;
max_failure_ratio?: number | string;
order?: 'start-first' | 'stop-first';
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
};
resources?: {
limits?: {
cpus?: number | string;
memory?: string;
pids?: number | string;
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
};
reservations?: {
cpus?: number | string;
memory?: string;
generic_resources?: DefinitionsGenericResources;
devices?: DefinitionsDevices;
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
};
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
};
restart_policy?: {
condition?: string;
delay?: string;
max_attempts?: number | string;
window?: string;
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
};
placement?: {
constraints?: string[];
preferences?: {
spread?: string;
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
}[];
max_replicas_per_node?: number | string;
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
};
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
} | null;
export type ExtraHosts = {} | string[];
export type ServiceConfigOrSecret = (
| string
| {
source?: string;
target?: string;
uid?: string;
gid?: string;
mode?: number | string;
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
}
)[];
export type Command = null | string | string[];
export type EnvFile =
| string
| (
| string
| {
path: string;
format?: string;
required?: boolean | string;
}
)[];
/**
* This interface was referenced by `PropertiesNetworks`'s JSON-Schema definition
* via the `patternProperty` "^[a-zA-Z0-9._-]+$".
*/
export type DefinitionsNetwork = {
name?: string;
driver?: string;
driver_opts?: {
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^.+$".
*/
[k: string]: string | number;
};
ipam?: {
driver?: string;
config?: {
subnet?: string;
ip_range?: string;
gateway?: string;
aux_addresses?: {
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^.+$".
*/
[k: string]: string;
};
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
}[];
options?: {
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^.+$".
*/
[k: string]: string;
};
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
};
external?:
| boolean
| string
| {
name?: string;
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
};
internal?: boolean | string;
enable_ipv6?: boolean | string;
attachable?: boolean | string;
labels?: ListOrDict;
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
} & Network;
export type Network = {
name?: string;
driver?: string;
driver_opts?: {
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^.+$".
*/
[k: string]: string | number;
};
ipam?: {
driver?: string;
config?: {
subnet?: string;
ip_range?: string;
gateway?: string;
aux_addresses?: {
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^.+$".
*/
[k: string]: string;
};
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
}[];
options?: {
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^.+$".
*/
[k: string]: string;
};
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
};
external?:
| boolean
| string
| {
name?: string;
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
};
internal?: boolean | string;
enable_ipv6?: boolean | string;
attachable?: boolean | string;
labels?: ListOrDict;
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
} | null;
/**
* This interface was referenced by `PropertiesVolumes`'s JSON-Schema definition
* via the `patternProperty` "^[a-zA-Z0-9._-]+$".
*/
export type DefinitionsVolume = {
name?: string;
driver?: string;
driver_opts?: {
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^.+$".
*/
[k: string]: string | number;
};
external?:
| boolean
| string
| {
name?: string;
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
};
labels?: ListOrDict;
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
} & Volume;
export type Volume = {
name?: string;
driver?: string;
driver_opts?: {
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^.+$".
*/
[k: string]: string | number;
};
external?:
| boolean
| string
| {
name?: string;
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
};
labels?: ListOrDict;
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
} | null;
/**
* The Compose file is a YAML file defining a multi-containers based application.
*/
export interface ComposeSpecification {
/**
* declared for backward compatibility, ignored.
*/
version?: string;
/**
* define the Compose project name, until user defines one explicitly.
*/
name?: string;
/**
* compose sub-projects to be included.
*/
include?: DefinitionsInclude[];
services?: PropertiesServices;
networks?: PropertiesNetworks;
volumes?: PropertiesVolumes;
secrets?: PropertiesSecrets;
configs?: PropertiesConfigs;
/**
* This interface was referenced by `ComposeSpecification`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
}
export interface PropertiesServices {
[k: string]: DefinitionsService;
}
/**
* This interface was referenced by `PropertiesServices`'s JSON-Schema definition
* via the `patternProperty` "^[a-zA-Z0-9._-]+$".
*/
export interface DefinitionsService {
develop?: DefinitionsDevelopment;
deploy?: DefinitionsDeployment;
annotations?: ListOrDict;
attach?: boolean | string;
build?:
| string
| {
context?: string;
dockerfile?: string;
dockerfile_inline?: string;
entitlements?: string[];
args?: ListOrDict;
ssh?: ListOrDict;
labels?: ListOrDict;
cache_from?: string[];
cache_to?: string[];
no_cache?: boolean | string;
additional_contexts?: ListOrDict;
network?: string;
pull?: boolean | string;
target?: string;
shm_size?: number | string;
extra_hosts?: ExtraHosts;
isolation?: string;
privileged?: boolean | string;
secrets?: ServiceConfigOrSecret;
tags?: string[];
ulimits?: Ulimits;
platforms?: string[];
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
};
blkio_config?: {
device_read_bps?: BlkioLimit[];
device_read_iops?: BlkioLimit[];
device_write_bps?: BlkioLimit[];
device_write_iops?: BlkioLimit[];
weight?: number | string;
weight_device?: BlkioWeight[];
};
cap_add?: string[];
cap_drop?: string[];
cgroup?: 'host' | 'private';
cgroup_parent?: string;
command?: Command;
configs?: ServiceConfigOrSecret;
container_name?: string;
cpu_count?: string | number;
cpu_percent?: string | number;
cpu_shares?: number | string;
cpu_quota?: number | string;
cpu_period?: number | string;
cpu_rt_period?: number | string;
cpu_rt_runtime?: number | string;
cpus?: number | string;
cpuset?: string;
credential_spec?: {
config?: string;
file?: string;
registry?: string;
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
};
depends_on?:
| ListOfStrings
| {
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^[a-zA-Z0-9._-]+$".
*/
[k: string]: {
restart?: boolean | string;
required?: boolean;
condition: 'service_started' | 'service_healthy' | 'service_completed_successfully';
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
};
};
device_cgroup_rules?: ListOfStrings;
devices?: (
| string
| {
source: string;
target?: string;
permissions?: string;
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
}
)[];
dns?: StringOrList;
dns_opt?: string[];
dns_search?: StringOrList;
domainname?: string;
entrypoint?: Command;
env_file?: EnvFile;
environment?: ListOrDict;
expose?: (string | number)[];
extends?:
| string
| {
service: string;
file?: string;
};
external_links?: string[];
extra_hosts?: ExtraHosts;
group_add?: (string | number)[];
healthcheck?: DefinitionsHealthcheck;
hostname?: string;
image?: string;
init?: boolean | string;
ipc?: string;
isolation?: string;
labels?: ListOrDict;
links?: string[];
logging?: {
driver?: string;
options?: {
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^.+$".
*/
[k: string]: string | number | null;
};
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
};
mac_address?: string;
mem_limit?: number | string;
mem_reservation?: string | number;
mem_swappiness?: number | string;
memswap_limit?: number | string;
network_mode?: string;
networks?:
| ListOfStrings
| {
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^[a-zA-Z0-9._-]+$".
*/
[k: string]: {
aliases?: ListOfStrings;
ipv4_address?: string;
ipv6_address?: string;
link_local_ips?: ListOfStrings;
mac_address?: string;
driver_opts?: {
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^.+$".
*/
[k: string]: string | number;
};
priority?: number;
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
} | null;
};
oom_kill_disable?: boolean | string;
oom_score_adj?: string | number;
pid?: string | null;
pids_limit?: number | string;
platform?: string;
ports?: (
| number
| string
| {
name?: string;
mode?: string;
host_ip?: string;
target?: number | string;
published?: string | number;
protocol?: string;
app_protocol?: string;
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
}
)[];
post_start?: DefinitionsServiceHook[];
pre_stop?: DefinitionsServiceHook[];
privileged?: boolean | string;
profiles?: ListOfStrings;
pull_policy?: 'always' | 'never' | 'if_not_present' | 'build' | 'missing';
read_only?: boolean | string;
restart?: string;
runtime?: string;
scale?: number | string;
security_opt?: string[];
shm_size?: number | string;
secrets?: ServiceConfigOrSecret;
sysctls?: ListOrDict;
stdin_open?: boolean | string;
stop_grace_period?: string;
stop_signal?: string;
storage_opt?: {
[k: string]: unknown;
};
tmpfs?: StringOrList;
tty?: boolean | string;
ulimits?: Ulimits;
user?: string;
uts?: string;
userns_mode?: string;
volumes?: (
| string
| {
type: string;
source?: string;
target?: string;
read_only?: boolean | string;
consistency?: string;
bind?: {
propagation?: string;
create_host_path?: boolean | string;
selinux?: 'z' | 'Z';
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
};
volume?: {
nocopy?: boolean | string;
subpath?: string;
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
};
tmpfs?: {
size?: number | string;
mode?: number | string;
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
};
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
}
)[];
volumes_from?: string[];
working_dir?: string;
/**
* This interface was referenced by `DefinitionsService`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
}
export interface Ulimits {
/**
* This interface was referenced by `Ulimits`'s JSON-Schema definition
* via the `patternProperty` "^[a-z]+$".
*/
[k: string]:
| (number | string)
| {
hard: number | string;
soft: number | string;
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
};
}
export interface BlkioLimit {
path?: string;
rate?: number | string;
}
export interface BlkioWeight {
path?: string;
weight?: number | string;
}
export interface DefinitionsHealthcheck {
disable?: boolean | string;
interval?: string;
retries?: number | string;
test?: string | string[];
timeout?: string;
start_period?: string;
start_interval?: string;
/**
* This interface was referenced by `DefinitionsHealthcheck`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
}
export interface DefinitionsServiceHook {
command?: Command;
user?: string;
privileged?: boolean | string;
working_dir?: string;
environment?: ListOrDict;
/**
* This interface was referenced by `DefinitionsServiceHook`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
}
export interface PropertiesNetworks {
[k: string]: DefinitionsNetwork;
}
export interface PropertiesVolumes {
[k: string]: DefinitionsVolume;
}
export interface PropertiesSecrets {
[k: string]: DefinitionsSecret;
}
/**
* This interface was referenced by `PropertiesSecrets`'s JSON-Schema definition
* via the `patternProperty` "^[a-zA-Z0-9._-]+$".
*/
export interface DefinitionsSecret {
name?: string;
environment?: string;
file?: string;
external?:
| boolean
| string
| {
name?: string;
[k: string]: unknown;
};
labels?: ListOrDict;
driver?: string;
driver_opts?: {
/**
* This interface was referenced by `undefined`'s JSON-Schema definition
* via the `patternProperty` "^.+$".
*/
[k: string]: string | number;
};
template_driver?: string;
/**
* This interface was referenced by `DefinitionsSecret`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
}
export interface PropertiesConfigs {
[k: string]: DefinitionsConfig;
}
/**
* This interface was referenced by `PropertiesConfigs`'s JSON-Schema definition
* via the `patternProperty` "^[a-zA-Z0-9._-]+$".
*/
export interface DefinitionsConfig {
name?: string;
content?: string;
environment?: string;
file?: string;
external?:
| boolean
| string
| {
name?: string;
[k: string]: unknown;
};
labels?: ListOrDict;
template_driver?: string;
/**
* This interface was referenced by `DefinitionsConfig`'s JSON-Schema definition
* via the `patternProperty` "^x-".
*/
[k: string]: unknown;
}

View File

@@ -1,2 +0,0 @@
export * from 'lib/build';
export * from 'lib/types';

View File

@@ -1,57 +0,0 @@
export enum ServiceName {
ImmichServer = 'immich-server',
ImmichMachineLearning = 'immich-machine-learning',
Postgres = 'immich-postgres',
Redis = 'immich-redis',
}
export enum ContainerName {
ImmichServer = 'immich-server',
ImmichMachineLearning = 'immich-machine-learning',
Postgres = 'immich-postgres',
Redis = 'immich-redis',
}
export type BaseOptions = {
releaseVersion: string;
healthchecks?: boolean;
machineLearning: boolean;
containerNames?: boolean;
serverTimeZone?: string;
};
export type GeneratorOptions = (BaseOptions & FolderOptions & PostgresOptions) & RedisOptions;
export type FolderOptions = {
baseLocation: string;
encodedVideoLocation?: string;
libraryLocation?: string;
uploadLocation?: string;
profileLocation?: string;
thumbnailsLocation?: string;
backupsLocation?: string;
};
export type PostgresOptions = InternalPostgresOptions | ExternalPostgresOptions;
export type InternalPostgresOptions = {
postgresUser: string;
postgresPassword: string;
postgresDatabase: string;
postgresDataLocation: string;
};
export type ExternalPostgresOptions = { postgresUrl: string; postgresVectorExtension?: VectorExtension };
export type RedisOptions = ExternalRedisOptions | IoRedisOptions | { redis: true };
export type ExternalRedisOptions = {
redisHost: string;
redisPort: number;
redisDbIndex?: number;
redisUsername?: string;
redisPassword?: string;
redisSocket?: string;
};
export type IoRedisOptions = { redisUrl: string };
export type VectorExtension = 'pgvector' | 'pgvecto.rs';
export type HardwareAccelerationPlatform = 'nvenc' | 'quicksync' | 'rkmpp' | 'vappi' | 'vaapi-wsl';

View File

@@ -1,84 +0,0 @@
import {
ExternalPostgresOptions,
ExternalRedisOptions,
GeneratorOptions,
IoRedisOptions,
PostgresOptions,
RedisOptions,
ServiceName,
} from 'lib/types';
export const isExternalPostgres = (options: PostgresOptions): options is ExternalPostgresOptions =>
'postgresUrl' in options;
export const isIoRedis = (options: RedisOptions): options is IoRedisOptions => 'redisUrl' in options;
export const isExternalRedis = (options: RedisOptions): options is ExternalRedisOptions => 'redisHost' in options;
export const asQueryParams = (values: Record<string, string | number | boolean | undefined>) => {
return new URLSearchParams(
Object.entries(values)
.filter(Boolean)
.map(([key, value]) => [key, String(value)]),
).toString();
};
export const getImmichVolumes = (options: GeneratorOptions) => {
const {
baseLocation,
encodedVideoLocation,
uploadLocation,
backupsLocation,
profileLocation,
libraryLocation,
thumbnailsLocation,
} = options;
const internalBaseLocation = '/usr/src/app/upload';
const volumes = [`${baseLocation}:${internalBaseLocation}`];
for (const { override, folder } of [
{ override: encodedVideoLocation, folder: 'encoded-video' },
{ override: libraryLocation, folder: 'library' },
{ override: uploadLocation, folder: 'upload' },
{ override: profileLocation, folder: 'profile' },
{ override: thumbnailsLocation, folder: 'thumbs' },
{ override: backupsLocation, folder: 'backups' },
]) {
if (override) {
volumes.push(`${override}:${internalBaseLocation}/${folder}`);
}
}
volumes.push(`/etc/localtime:/etc/localtime:ro`);
return volumes;
};
export const getImmichEnvironment = (options: GeneratorOptions) => {
const env: Record<string, string | number | undefined> = {};
if (isExternalPostgres(options)) {
env.DB_URL = options.postgresUrl;
env.DB_VECTOR_EXTENSION = options.postgresVectorExtension;
} else {
const { postgresUser, postgresPassword, postgresDatabase } = options;
env.DB_URL = `postgres://${postgresUser}:${postgresPassword}@${ServiceName.Postgres}:5432/${postgresDatabase}`;
}
if (isIoRedis(options)) {
env.REDIS_URL = options.redisUrl;
} else if (isExternalRedis(options)) {
env.REDIS_HOSTNAME = options.redisHost;
env.REDIS_PORT = options.redisPort;
env.REDIS_DBINDEX = options.redisDbIndex;
env.REDIS_USERNAME = options.redisUsername;
env.REDIS_PASSWORD = options.redisPassword;
env.REDIS_SOCKET = options.redisSocket;
} else {
env.REDIS_HOSTNAME = ServiceName.Redis;
}
env.TZ = options.serverTimeZone;
return env;
};

4555
docker/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,38 +0,0 @@
{
"name": "immich-docker",
"version": "0.0.0",
"description": "A docker-compose generator for Immich",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1",
"build": "vite build",
"generate": "npx tsx src/index.ts",
"lint": "eslint . --max-warnings 0",
"lint:fix": "npm run lint -- --fix",
"format": "prettier --check .",
"format:fix": "prettier --write ."
},
"type": "module",
"exports": "./dist/immich-docker.js",
"author": "team@immich.app",
"private": true,
"license": "GNU Affero General Public License version 3",
"dependencies": {
"js-yaml": "^4.1.0"
},
"devDependencies": {
"@eslint/eslintrc": "^3.2.0",
"@eslint/js": "^9.16.0",
"@types/js-yaml": "^4.0.9",
"@types/node": "^22.10.2",
"@typescript-eslint/eslint-plugin": "^8.18.0",
"@typescript-eslint/parser": "^8.18.0",
"eslint-plugin-unicorn": "^56.0.1",
"globals": "^15.13.0",
"json-schema-to-ts": "^3.1.1",
"prettier-plugin-organize-imports": "^4.1.0",
"prettier-plugin-sort-json": "^4.0.0",
"vite": "^6.0.3",
"vitest": "^2.1.8"
}
}

View File

@@ -1,5 +1,5 @@
global:
scrape_interval: 15s
scrape_interval: 15s
evaluation_interval: 15s
scrape_configs:

View File

@@ -1,60 +0,0 @@
import { mkdirSync, writeFileSync } from 'node:fs';
import { buildYaml } from '../lib/index';
import { GeneratorOptions } from '../lib/types';
const main = () => {
const commonOptions = {
releaseVersion: 'v1.122.0',
baseLocation: '/home/immich/library',
serverTimeZone: 'America/New_York',
healthchecks: true,
machineLearning: true,
containerNames: true,
// hardwareAcceleration: 'nvenc',
};
const postgresOptions = {
postgresUser: 'postgres',
postgresPassword: 'postgres',
postgresDatabase: 'immich',
postgresDataLocation: '/home/immich/database',
};
const defaultOptions: GeneratorOptions = { ...commonOptions, ...postgresOptions, redis: true };
const samples: Array<{ name: string; options: GeneratorOptions }> = [
{ name: 'defaults', options: defaultOptions },
{ name: 'no-names', options: { ...defaultOptions, containerNames: false } },
{ name: 'no-healthchecks', options: { ...defaultOptions, healthchecks: false } },
{ name: 'external-ioredis', options: { ...defaultOptions, redisUrl: 'ioredis://<base64>' } },
{ name: 'external-redis', options: { ...defaultOptions, redisHost: '192.168.0.5', redisPort: 1234 } },
{
name: 'external-postgres',
options: {
...defaultOptions,
postgresUrl: 'postgres://immich:immich@localhost:5432/immich',
postgresVectorExtension: 'pgvector',
},
},
{
name: 'split-storage',
options: {
...defaultOptions,
thumbnailsLocation: '/home/fast/thumbs',
encodedVideoLocation: '/home/fast/encoded-videos',
},
},
];
// TODO replace with vitest test files/scripts
mkdirSync('./examples', { recursive: true });
for (const { name, options } of samples) {
const spec = buildYaml(options);
const filename = `./examples/docker-compose.${name}.yaml`;
writeFileSync(filename, spec);
console.log(`Wrote ${filename}`);
}
};
main();

View File

@@ -1,19 +0,0 @@
{
"compilerOptions": {
"allowJs": true,
"baseUrl": "./",
"checkJs": true,
"esModuleInterop": true,
"forceConsistentCasingInFileNames": true,
"module": "es2020",
"moduleResolution": "bundler",
"outDir": "./dist",
"resolveJsonModule": true,
"skipLibCheck": true,
"sourceMap": true,
"strict": true,
"target": "es2022",
"types": []
},
"include": ["lib"]
}

View File

@@ -1,25 +0,0 @@
{
"compilerOptions": {
"allowSyntheticDefaultImports": true,
"baseUrl": "./",
"declaration": true,
"emitDecoratorMetadata": true,
"esModuleInterop": true,
"experimentalDecorators": true,
"forceConsistentCasingInFileNames": true,
"incremental": true,
"jsx": "react",
"lib": ["dom", "es2023"],
"module": "node16",
"moduleResolution": "node16",
"outDir": "./dist",
"preserveWatchOutput": true,
"removeComments": true,
"resolveJsonModule": true,
"skipLibCheck": true,
"sourceMap": true,
"strict": true,
"target": "es2022"
},
"include": ["src", "lib"]
}

View File

@@ -1,20 +0,0 @@
import { resolve } from 'node:path';
import { defineConfig } from 'vite';
export default defineConfig({
resolve: {
alias: {
lib: resolve('lib'),
src: resolve('src'),
test: resolve('test'),
},
},
build: {
lib: {
entry: resolve(__dirname, 'lib/index.ts'),
name: 'immich-docker',
// the proper extensions will be added
fileName: 'immich-docker',
},
},
});

View File

@@ -1 +1 @@
22.12.0
20.17.0

View File

@@ -69,8 +69,7 @@ However, Immich will delete original files that have been trashed when the trash
### Why do my file names appear as a random string in the file manager?
When Storage Template is off (default) Immich saves the file names in a random string (also known as random UUIDs) to prevent duplicate file names.
To retrieve the original file names, you must enable the Storage Template and then run the STORAGE TEMPLATE MIGRATION job.
When Storage Template is off (default) Immich saves the file names in a random string (also known as random UUIDs) to prevent duplicate file names. To retrieve the original file names, you must enable the Storage Template and then run the STORAGE TEMPLATE MIGRATION job.
It is recommended to read about [Storage Template](https://immich.app/docs/administration/storage-template) before activation.
### Can I add my existing photo library?
@@ -83,20 +82,11 @@ Template changes will only apply to _new_ assets. To retroactively apply the tem
### Why are only photos and not videos being uploaded to Immich?
This often happens when using a reverse proxy in front of Immich.
Make sure to [set your reverse proxy](/docs/administration/reverse-proxy/) to allow large requests.
Also, check the disk space of your reverse proxy.
In some cases, proxies cache requests to disk before passing them on, and if disk space runs out, the request fails.
If you are using Cloudflare Tunnel, please know that they set a maxiumum filesize of 100 MB that cannot be changed.
At times, files larger than this may work, potentially up to 1 GB. However, the official limit is 100 MB.
If you are having issues, we recommend switching to a different network deployment.
This often happens when using a reverse proxy (such as Nginx or Cloudflare tunnel) in front of Immich. Make sure to set your reverse proxy to allow large `POST` requests. In `nginx`, set `client_max_body_size 50000M;` or similar. Also, check the disk space of your reverse proxy. In some cases, proxies cache requests to disk before passing them on, and if disk space runs out, the request fails.
### Why are some photos stored in the file system with the wrong date?
There are a few different scenarios that can lead to this situation. The solution is to rerun the storage migration job.
The job is only automatically run once per asset after upload. If metadata extraction originally failed, the jobs were cleared/canceled, etc.,
the job may not have run automatically the first time.
There are a few different scenarios that can lead to this situation. The solution is to rerun the storage migration job. The job is only automatically run once per asset after upload. If metadata extraction originally failed, the jobs were cleared/canceled, etc., the job may not have run automatically the first time.
### How can I hide photos from the timeline?
@@ -126,8 +116,7 @@ Also, there are additional jobs for person (face) thumbnails.
### Why do files from WhatsApp not appear with the correct date?
Files sent on WhatsApp are saved without metadata on the file. Therefore, Immich has no way of knowing the original date of the file when files are uploaded from WhatsApp,
not the order of arrival on the device. [See #9116](https://github.com/immich-app/immich/discussions/9116).
Files sent on WhatsApp are saved without metadata on the file. Therefore, Immich has no way of knowing the original date of the file when files are uploaded from WhatsApp, not the order of arrival on the device. [See #3527](https://github.com/immich-app/immich/issues/3527).
### What happens if an asset exists in more than one account?
@@ -198,7 +187,7 @@ However, when the trash is emptied, the files will re-appear in the main timelin
### How does smart search work?
Immich uses CLIP models. An ML model converts each image to an "embedding", which is essentially a string of numbers that semantically encodes what is in the image. The same is done for the text that you enter when you do a search, and that text embedding is then compared with those of the images to find similar ones. As such, there are no "tags", "labels", or "descriptions" generated that you can look at. For more information about CLIP and its capabilities, read about it [here](https://openai.com/research/clip).
Immich uses CLIP models. For more information about CLIP and its capabilities, read about it [here](https://openai.com/research/clip).
### How does facial recognition work?
@@ -319,7 +308,7 @@ Do not exaggerate with the job concurrency because you're probably thoroughly ov
### My server shows Server Status Offline | Version Unknown. What can I do?
You need to [enable WebSockets](/docs/administration/reverse-proxy/) on your reverse proxy.
You need to enable WebSockets on your reverse proxy.
---
@@ -344,13 +333,9 @@ You may need to add mount points or docker volumes for the following internal co
- `immich-machine-learning:/.cache`
- `redis:/data`
The non-root user/group needs read/write access to the volume mounts, including `UPLOAD_LOCATION` and `/cache` for machine-learning.
The non-root user/group needs read/write access to the volume mounts, including `UPLOAD_LOCATION`.
:::note Docker Compose Volumes
The Docker Compose top level volume element does not support non-root access, all of the above volumes must be local volume mounts.
:::
For a further hardened system, you can add the following block to every container.
For a further hardened system, you can add the following block to every container except for `immich_postgres`.
<details>
<summary>docker-compose.yml</summary>
@@ -399,21 +384,22 @@ If the error says the worker is exiting, then this is normal. This is a feature
There are a few reasons why this can happen.
If the error mentions SIGKILL or error code 137, it most likely means the service is running out of memory.
Consider either increasing the server's RAM or moving the service to a server with more RAM.
If the error mentions SIGKILL or error code 137, it most likely means the service is running out of memory. Consider either increasing the server's RAM or moving the service to a server with more RAM.
If it mentions SIGILL (note the lack of a K) or error code 132, it most likely means your server's CPU is incompatible with Immich.
If it mentions SIGILL (note the lack of a K) or error code 132, it most likely means your server's CPU is incompatible. This is unlikely to occur on version 1.92.0 or later. Consider upgrading if your version of Immich is below that.
If your version of Immich is below 1.92.0 and the crash occurs after logs about tracing or exporting a model, consider either upgrading or disabling the Tag Objects job.
## Database
### Why am I getting database ownership errors?
If you get database errors such as `FATAL: data directory "/var/lib/postgresql/data" has wrong ownership` upon database startup, this is likely due to an issue with your filesystem.
NTFS and ex/FAT/32 filesystems are not supported. See [here](/docs/install/requirements#special-requirements-for-windows-users) for more details.
NTFS and ex/FAT/32 filesystems are not supported. See [here](/docs/install/environment-variables#supported-filesystems) for more details.
### How can I verify the integrity of my database?
Database checksums are enabled by default for new installations since v1.104.0. You can check if they are enabled by running the following command.
If you installed Immich using v1.104.0 or later, you likely have database checksums enabled by default. You can check this by running the following command.
A result of `on` means that checksums are enabled.
<details>
@@ -429,7 +415,7 @@ docker exec -it immich_postgres psql --dbname=immich --username=<DB_USERNAME> --
</details>
If checksums are enabled, you can check the status of the database with the following command. A normal result is all `0`s.
If checksums are enabled, you can check the status of the database with the following command. A normal result is all zeroes.
<details>
<summary>Check for database corruption</summary>

View File

@@ -15,21 +15,12 @@ Immich saves [file paths in the database](https://github.com/immich-app/immich/d
Refer to the official [postgres documentation](https://www.postgresql.org/docs/current/backup.html) for details about backing up and restoring a postgres database.
:::
The recommended way to backup and restore the Immich database is to use the `pg_dumpall` command. When restoring, you need to delete the `DB_DATA_LOCATION` folder (if it exists) to reset the database.
:::caution
It is not recommended to directly backup the `DB_DATA_LOCATION` folder. Doing so while the database is running can lead to a corrupted backup that cannot be restored.
:::
### Automatic Database Backups
Immich will automatically create database backups by default. The backups are stored in `UPLOAD_LOCATION/backups`.
You can adjust the schedule and amount of kept backups in the [admin settings](http://my.immich.app/admin/system-settings?isOpen=backup).
By default, Immich will keep the last 14 backups and create a new backup every day at 2:00 AM.
#### Restoring
We hope to make restoring simpler in future versions, for now you can find the backups in the `UPLOAD_LOCATION/backups` folder on your host.
Then please follow the steps in the following section for restoring the database.
### Manual Backup and Restore
<Tabs>
@@ -43,49 +34,84 @@ docker exec -t immich_postgres pg_dumpall --clean --if-exists --username=postgre
docker compose down -v # CAUTION! Deletes all Immich data to start from scratch
## Uncomment the next line and replace DB_DATA_LOCATION with your Postgres path to permanently reset the Postgres database
# rm -rf DB_DATA_LOCATION # CAUTION! Deletes all Immich data to start from scratch
docker compose pull # Update to latest version of Immich (if desired)
docker compose create # Create Docker containers for Immich apps without running them
docker compose pull # Update to latest version of Immich (if desired)
docker compose create # Create Docker containers for Immich apps without running them
docker start immich_postgres # Start Postgres server
sleep 10 # Wait for Postgres server to start up
# Check the database user if you deviated from the default
sleep 10 # Wait for Postgres server to start up
gunzip < "/path/to/backup/dump.sql.gz" \
| sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" \
| docker exec -i immich_postgres psql --username=postgres # Restore Backup
docker compose up -d # Start remainder of Immich apps
| docker exec -i immich_postgres psql --username=postgres # Restore Backup
docker compose up -d # Start remainder of Immich apps
```
</TabItem>
<TabItem value="Windows system (PowerShell)" label="Windows system (PowerShell)">
```powershell title='Backup'
[System.IO.File]::WriteAllLines("C:\absolute\path\to\backup\dump.sql", (docker exec -t immich_postgres pg_dumpall --clean --if-exists --username=postgres))
docker exec -t immich_postgres pg_dumpall --clean --if-exists --username=postgres | Set-Content -Encoding utf8 "C:\path\to\backup\dump.sql"
```
```powershell title='Restore'
docker compose down -v # CAUTION! Deletes all Immich data to start from scratch
## Uncomment the next line and replace DB_DATA_LOCATION with your Postgres path to permanently reset the Postgres database
# Remove-Item -Recurse -Force DB_DATA_LOCATION # CAUTION! Deletes all Immich data to start from scratch
## You should mount the backup (as a volume, example: - 'C:\path\to\backup\dump.sql':/dump.sql) into the immich_postgres container using the docker-compose.yml
docker compose pull # Update to latest version of Immich (if desired)
docker compose create # Create Docker containers for Immich apps without running them
docker compose pull # Update to latest version of Immich (if desired)
docker compose create # Create Docker containers for Immich apps without running them
docker start immich_postgres # Start Postgres server
sleep 10 # Wait for Postgres server to start up
docker exec -it immich_postgres bash # Enter the Docker shell and run the following command
# Check the database user if you deviated from the default
cat "/dump.sql" \
| sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" \
| psql --username=postgres # Restore Backup
exit # Exit the Docker shell
docker compose up -d # Start remainder of Immich apps
sleep 10 # Wait for Postgres server to start up
gc "C:\path\to\backup\dump.sql" | docker exec -i immich_postgres psql --username=postgres # Restore Backup
docker compose up -d # Start remainder of Immich apps
```
</TabItem>
</Tabs>
Note that for the database restore to proceed properly, it requires a completely fresh install (i.e. the Immich server has never run since creating the Docker containers). If the Immich app has run, Postgres conflicts may be encountered upon database restoration (relation already exists, violated foreign key constraints, multiple primary keys, etc.), in which case you need to delete the `DB_DATA_LOCATION` folder to reset the database.
Note that for the database restore to proceed properly, it requires a completely fresh install (i.e. the Immich server has never run since creating the Docker containers). If the Immich app has run, Postgres conflicts may be encountered upon database restoration (relation already exists, violated foreign key constraints, multiple primary keys, etc.).
:::tip
Some deployment methods make it difficult to start the database without also starting the server. In these cases, you may set the environment variable `DB_SKIP_MIGRATIONS=true` before starting the services. This will prevent the server from running migrations that interfere with the restore process. Be sure to remove this variable and restart the services after the database is restored.
Some deployment methods make it difficult to start the database without also starting the server or microservices. In these cases, you may set the environmental variable `DB_SKIP_MIGRATIONS=true` before starting the services. This will prevent the server from running migrations that interfere with the restore process. Note that both the server and microservices must have this variable set to prevent the migrations from running. Be sure to remove this variable and restart the services after the database is restored.
:::
### Automatic Database Backups
The database dumps can also be automated (using [this image](https://github.com/prodrigestivill/docker-postgres-backup-local)) by editing the docker compose file to match the following:
```yaml
services:
...
backup:
container_name: immich_db_dumper
image: prodrigestivill/postgres-backup-local:14
restart: always
env_file:
- .env
environment:
POSTGRES_HOST: database
POSTGRES_CLUSTER: 'TRUE'
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_DB: ${DB_DATABASE_NAME}
SCHEDULE: "@daily"
POSTGRES_EXTRA_OPTS: '--clean --if-exists'
BACKUP_DIR: /db_dumps
volumes:
- ./db_dumps:/db_dumps
depends_on:
- database
```
Then you can restore with the same command but pointed at the latest dump.
```bash title='Automated Restore'
gunzip < db_dumps/last/immich-latest.sql.gz \
| sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" \
| docker exec -i immich_postgres psql --username=postgres
```
:::note
If you see the error `ERROR: type "earth" does not exist`, or you have problems with Reverse Geocoding after a restore, add the following `sed` fragment to your restore command.
Example: `gunzip < "/path/to/backup/dump.sql.gz" | sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" | docker exec -i immich_postgres psql --username=postgres`
:::
## Filesystem
@@ -171,7 +197,7 @@ When you turn off the storage template engine, it will leave the assets in `UPLO
- Stored in `UPLOAD_LOCATION/profile/<userID>`.
- **Thumbs Images:**
- Preview images (blurred, small, large) for each asset and thumbnails for recognized faces.
- Stored in `UPLOAD_LOCATION/thumbs/<userID>`.
- Stored in `UPLOCAD_LOCATION/thumbs/<userID>`.
- **Encoded Assets:**
- Videos that have been re-encoded from the original for wider compatibility. The original is not removed.
- Stored in `UPLOAD_LOCATION/encoded-video/<userID>`.

View File

@@ -19,9 +19,3 @@ You can use [this guide](/docs/guides/smtp-gmail) to use Gmail's SMTP server.
Users can manage their email notification settings from their account settings page on the web. They can choose to turn email notifications on or off for the following events:
<img src={require('./img/user-notifications-settings.png').default} width="80%" title="User notification settings" />
## Notification templates
You can override the default notification text with custom templates in HTML format. You can use tags to show dynamic tags in your templates.
<img src={require('./img/user-notifications-templates.png').default} width="80%" title="User notification templates" />

Binary file not shown.

Before

Width:  |  Height:  |  Size: 195 KiB

View File

@@ -22,7 +22,7 @@ Copy the entire `immich-server` block as a new service and make the following ch
- container_name: immich_server
...
- ports:
- - 2283:2283
- - 2283:3001
+ immich-microservices:
+ container_name: immich_microservices
```

View File

@@ -11,7 +11,7 @@ Unable to set `app.immich:///oauth-callback` as a valid redirect URI? See [Mobil
Immich supports 3rd party authentication via [OpenID Connect][oidc] (OIDC), an identity layer built on top of OAuth2. OIDC is supported by most identity providers, including:
- [Authentik](https://goauthentik.io/integrations/sources/oauth/#openid-connect)
- [Authelia](https://www.authelia.com/integration/openid-connect/immich/)
- [Authelia](https://www.authelia.com/configuration/identity-providers/openid-connect/clients/)
- [Okta](https://www.okta.com/openid-connect/)
- [Google](https://developers.google.com/identity/openid-connect/openid-connect)

View File

@@ -13,9 +13,9 @@ Running with a pre-existing Postgres server can unlock powerful administrative f
You must install pgvecto.rs into your instance of Postgres using their [instructions][vectors-install]. After installation, add `shared_preload_libraries = 'vectors.so'` to your `postgresql.conf`. If you already have some `shared_preload_libraries` set, you can separate each extension with a comma. For example, `shared_preload_libraries = 'pg_stat_statements, vectors.so'`.
:::note
Immich is known to work with Postgres versions 14, 15, and 16. Earlier versions are unsupported. Postgres 17 is nominally compatible, but pgvecto.rs does not have prebuilt images or packages for it as of writing.
Immich is known to work with Postgres versions 14, 15, and 16. Earlier versions are unsupported.
Make sure the installed version of pgvecto.rs is compatible with your version of Immich. The current accepted range for pgvecto.rs is `>= 0.2.0, < 0.4.0`.
Make sure the installed version of pgvecto.rs is compatible with your version of Immich. For example, if your Immich version uses the dedicated database image `tensorchord/pgvecto-rs:pg14-v0.2.1`, you must install pgvecto.rs `>= 0.2.1, < 0.3.0`.
:::
## Specifying the connection URL

View File

@@ -1,9 +1,5 @@
# Repair Page
:::warning
This feature is currently disabled and will be reworked in the near future.
:::
The repair page is designed to give information to the system administrator about files that are not tracked, or offline paths.
## Natural State

View File

@@ -40,26 +40,6 @@ server {
}
```
#### Compatibility with Let's Encrypt
In the event that your nginx configuration includes a section for Let's Encrypt, it's likely that you have a segment similar to the following:
```nginx
location ~ /.well-known {
...
}
```
This particular `location` directive can inadvertently prevent mobile clients from reaching the `/.well-known/immich` path, which is crucial for discovery. Usual error message for this case is: "Your app major version is not compatible with the server". To remedy this, you should introduce an additional location block specifically for this path, ensuring that requests are correctly proxied to the Immich server:
```nginx
location = /.well-known/immich {
proxy_pass http://<backend_url>:2283;
}
```
By doing so, you'll maintain the functionality of Let's Encrypt while allowing mobile clients to access the necessary Immich path without obstruction.
### Caddy example config
As an alternative to nginx, you can also use [Caddy](https://caddyserver.com/) as a reverse proxy (with automatic HTTPS configuration). Below is an example config.
@@ -84,43 +64,3 @@ Below is an example config for Apache2 site configuration.
ProxyPreserveHost On
</VirtualHost>
```
### Traefik Proxy example config
The example below is for Traefik version 3.
The most important is to increase the `respondingTimeouts` of the entrypoint used by immich. In this example of entrypoint `websecure` for port `443`. Per default it's set to 60s which leeds to videos stop uploading after 1 minute (Error Code 499). With this config it will fail after 10 minutes which is in most cases enough. Increase it if needed.
`traefik.yaml`
```yaml
[...]
entryPoints:
websecure:
address: :443
# this section needs to be added
transport:
respondingTimeouts:
readTimeout: 600s
idleTimeout: 600s
writeTimeout: 600s
```
The second part is in the `docker-compose.yml` file where immich is in. Add the Traefik specific labels like in the example.
`docker-compose.yml`
```yaml
services:
immich-server:
[...]
labels:
traefik.enable: true
# increase readingTimeouts for the entrypoint used here
traefik.http.routers.immich.entrypoints: websecure
traefik.http.routers.immich.rule: Host(`immich.your-domain.com`)
traefik.http.services.immich.loadbalancer.server.port: 2283
```
Keep in mind, that Traefik needs to communicate with the network where immich is in, usually done
by adding the Traefik network to the `immich-server`.

View File

@@ -7,7 +7,7 @@ If a storage quota has been defined for the user, the usage number will be displ
:::
:::info External library
External libraries are not included in the storage quota.
External library is not included in the storage quota.
:::
<img src={require('./img/server-stats.png').default} title="server statistic" />

View File

@@ -1,49 +0,0 @@
# System Integrity
## Folder checks
:::info
The folders considered for these checks include: `upload/`, `library/`, `thumbs/`, `encoded-video/`, `profile/`, `backups/`
:::
When Immich starts, it performs a series of checks in order to validate that it can read and write files to the volume mounts used by the storage system. If it cannot perform all the required operations, it will fail to start. The checks include:
- Creating an initial hidden file (`.immich`) in each folder
- Reading a hidden file (`.immich`) in each folder
- Overwriting a hidden file (`.immich`) in each folder
The checks are designed to catch the following situations:
- Incorrect permissions (cannot read/write files)
- Missing volume mount (`.immich` files should exist, but are missing)
### Common issues
:::note
`.immich` files serve as markers and help keep track of volume mounts being used by Immich. Except for the situations listed below, they should never be manually created or deleted.
:::
#### Missing `.immich` files
```
Verifying system mount folder checks (enabled=true)
...
ENOENT: no such file or directory, open 'upload/encoded-video/.immich'
```
The above error messages show that the server has previously (successfully) written `.immich` files to each folder, but now does not detect them. This could be because any of the following:
- Permission error - unable to read the file, but it exists
- File does not exist - volume mount has changed and should be corrected
- File does not exist - user manually deleted it and should be manually re-created (`touch .immich`)
- File does not exist - user restored from a backup, but did not restore each folder (user should restore all folders or manually create `.immich` in any missing folders)
### Ignoring the checks
:::warning
The checks are designed to catch common problems that we have seen users have in the past, and often indicate there's something wrong that you should solve. If you know what you're doing and you want to disable them you can set the following environment variable:
:::
```
IMMICH_IGNORE_MOUNT_CHECK_ERRORS=true
```

View File

@@ -157,10 +157,6 @@ Immich supports [Reverse Geocoding](/docs/features/reverse-geocoding) using data
SMTP server setup, for user creation notifications, new albums, etc. More information can be found [here](/docs/administration/email-notification)
## Notification Templates
Override the default notifications text with notification templates. More information can be found [here](/docs/administration/email-notification)
## Server Settings
### External Domain
@@ -209,68 +205,4 @@ When this option is enabled the `immich-server` will periodically make requests
## Video Transcoding Settings
The system administrator can configure which video files will be converted to different formats. The settings can be changed in depth, to learn more about the terminology used here, refer to FFmpeg documentation for [H.264](https://trac.ffmpeg.org/wiki/Encode/H.264) codec, [HEVC](https://trac.ffmpeg.org/wiki/Encode/H.265) codec and [VP9](https://trac.ffmpeg.org/wiki/Encode/VP9) codec.
Which streams of a video file will be transcoded is determined by the [Transcode Policy](#ffmpeg.transcode). Streams that are transcoded use the following settings (config file name in brackets). Streams that are not transcoded are untouched and preserve their original settings.
### Accepted containers (`ffmpeg.acceptedContainers`) {#ffmpeg.acceptedContainers}
If the video asset's container format is not in this list, it will be remuxed to MP4 even if no streams need to be transcoded.
The default set of accepted container formats is `mov`, `ogg` and `webm`.
### Preset (`ffmpeg.preset`) {#ffmpeg.preset}
The amount of "compute effort" to put into transcoding. These use [the preset names from h264](https://trac.ffmpeg.org/wiki/Encode/H.264#Preset) and will be converted to appropriate values for encoders that configure effort in different ways.
The default value is `ultrafast`.
### Audio codec (`ffmpeg.targetAudioCodec`) {#ffmpeg.targetAudioCodec}
Which audio codec to use when the audio stream is being transcoded. Can be one of `mp3`, `aac`, `libopus`.
The default value is `aac`.
### Video Codec (`ffmpeg.targetVideoCodec`) {#ffmpeg.targetVideoCodec}
Which video codec to use when the video stream is being transcoded. Can be one of `h264`, `hevc`, `vp9` or `av1`.
The default value is `h264`.
### Target resolution (`ffmpeg.targetResolution`) {#ffmpeg.targetResolution}
When transcoding a video stream, downscale the largest dimension to this value while preserving aspect ratio. Videos are never upscaled.
The default value is `720`.
### Transcode policy (`ffmpeg.transcode`) {#ffmpeg.transcode}
The transcoding policy configures which streams of a video asset will be transcoded. The transcoding decision is made independently for video streams and audio streams. This means that if a video stream needs to be transcoded, but an audio stream does not, then the video stream will be transcoded while the audio stream will be copied. If the transcoding policy does not require any stream to be transcoded and does not require the video to be remuxed, then no separate video file will be created.
The default policy is `required`.
#### All videos (`all`) {#ffmpeg.transcode-all}
Videos are always transcoded. This ensures consistency during video playback.
#### Don't transcode any videos (`disabled`) {#ffmpeg.transcode-disabled}
Videos are never transcoded. This saves space and resources on the server, but may prevent playback on devices that don't support the source format (especially web browsers) or result in high bandwidth usage when playing high-bitrate files.
#### Only videos not in an accepted format (`required`) {#ffmpeg.transcode-required}
Video streams are transcoded when any of the following conditions are met:
- The video is HDR.
- The video is not in the yuv420p pixel format.
- The video codec is not in `acceptedVideoCodecs`.
Audio is transcoded if the audio codec is not in `acceptedAudioCodecs`.
#### Videos higher than max bitrate or not in an accepted format (`bitrate`) {#ffmpeg.transcode-bitrate}
In addition to the conditions in `required`, video streams are also transcoded if their bitrate is over `maxBitrate`.
#### Videos higher than target resolution or not in an accepted format (`optimal`) {#ffmpeg.transcode-optimal}
In addition to the conditions in `required`, video streams are also transcoded if the horizontal **and** vertical dimensions are higher than [`targetResolution`](#ffmpeg.targetResolution).
The system administrator can define parameters according to which video files will be converted to different formats (depending on the settings). The settings can be changed in depth, to learn more about the terminology used here, refer to FFmpeg documentation for [H.264](https://trac.ffmpeg.org/wiki/Encode/H.264) codec, [HEVC](https://trac.ffmpeg.org/wiki/Encode/H.265) codec and [VP9](https://trac.ffmpeg.org/wiki/Encode/VP9) codec.

View File

@@ -3,7 +3,6 @@ sidebar_position: 1
---
import AppArchitecture from './img/app-architecture.png';
import MobileArchitecture from './img/immich_mobile_architecture.svg';
# Architecture
@@ -29,14 +28,7 @@ All three clients use [OpenAPI](./open-api.md) to auto-generate rest clients for
### Mobile App
The mobile app is written in [Dart](https://dart.dev/) using [Flutter](https://flutter.dev/). Below is an architecture overview:
<MobileArchitecture className="p-4 dark:bg-immich-dark-primary my-4" />
The diagrams shows the target architecture, the current state of the code-base is not always following the architecture yet. New code and contributions should follow this architecture.
Currently, it uses [Isar Database](https://isar.dev/) for a local database and [Riverpod](https://riverpod.dev/) for state management (providers).
Entities and Models are the two types of data classes used. While entities are stored in the on-device database, models are ephemeral and only kept in memory.
The Repositories should be the only place where other data classes are used internally (such as OpenAPI DTOs). However, their interfaces must not use foreign data classes!
The mobile app is written in [Flutter](https://flutter.dev/). It uses [Isar Database](https://isar.dev/) for a local database and [Riverpod](https://riverpod.dev/) for state management.
### Web Client

View File

@@ -15,7 +15,7 @@ Our [GitHub Repository](https://github.com/immich-app/immich) is a [monorepo](ht
| `design/` | Screenshots and logos for the README |
| `docs/` | Source code for the [https://immich.app](https://immich.app) website |
| `machine-learning/` | Source code for the `immich-machine-learning` docker image |
| `misc/release/` | Scripts for version bumps and draft releases |
| `misc/release/` | Scripts for version pumps and draft releases |
| `mobile/` | Source code for the mobile app, both Android and iOS |
| `server/` | Source code for the `immich-server` docker image |
| `web/` | Source code for the `web` |

View File

@@ -1,104 +0,0 @@
<mxfile host="app.diagrams.net" agent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36" version="24.7.16">
<diagram name="Page-1" id="Bp2gX--FtC4sSMWxsLrs">
<mxGraphModel dx="1728" dy="954" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="0" pageScale="1" pageWidth="850" pageHeight="1100" background="none" math="0" shadow="0">
<root>
<mxCell id="0" />
<mxCell id="1" parent="0" />
<mxCell id="zHhczcy2-Jv_nqmJUiNH-1" value="" style="verticalLabelPosition=bottom;verticalAlign=top;html=1;shape=mxgraph.basic.polygon;polyCoords=[[0.25,0],[0.75,0],[1,0.25],[1,0.75],[0.75,1],[0.25,1],[0,0.75],[0,0.25]];polyline=0;strokeWidth=4;rounded=1;fillColor=#4251B0;" vertex="1" parent="1">
<mxGeometry x="280" y="217.5" width="465" height="465" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-2" value="&lt;b&gt;&lt;font style=&quot;font-size: 22px;&quot;&gt;Mobile App&lt;/font&gt;&lt;/b&gt;" style="text;html=1;align=center;verticalAlign=middle;resizable=0;points=[];autosize=1;strokeColor=none;fillColor=none;rounded=1;fontColor=#ffffff;" vertex="1" parent="1">
<mxGeometry x="442.5" y="225" width="140" height="40" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-25" style="edgeStyle=none;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-4" target="zHhczcy2-Jv_nqmJUiNH-5">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-4" value="Services" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#FFB400;" vertex="1" parent="1">
<mxGeometry x="530" y="420" width="80" height="60" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-26" style="edgeStyle=none;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-5" target="zHhczcy2-Jv_nqmJUiNH-12">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-5" value="Repositories" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#1E83F7;" vertex="1" parent="1">
<mxGeometry x="650" y="420" width="80" height="60" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-24" style="edgeStyle=none;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-6" target="zHhczcy2-Jv_nqmJUiNH-4">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-6" value="Providers" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#ED79B5;" vertex="1" parent="1">
<mxGeometry x="410" y="420" width="80" height="60" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-29" style="edgeStyle=none;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-7" target="zHhczcy2-Jv_nqmJUiNH-8">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-30" style="edgeStyle=none;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.75;entryDx=0;entryDy=0;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-7" target="zHhczcy2-Jv_nqmJUiNH-6">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-7" value="Pages" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#FA2921;" vertex="1" parent="1">
<mxGeometry x="290" y="480" width="80" height="60" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-31" style="edgeStyle=none;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.25;entryDx=0;entryDy=0;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-8" target="zHhczcy2-Jv_nqmJUiNH-6">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-8" value="Widgets" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#FA2921;" vertex="1" parent="1">
<mxGeometry x="290" y="360" width="80" height="60" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-11" value="User" style="shape=umlActor;verticalLabelPosition=bottom;verticalAlign=top;html=1;outlineConnect=0;rounded=1;fillColor=#4251B0;" vertex="1" parent="1">
<mxGeometry x="180" y="368.5" width="81.5" height="163" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-12" value="platform&lt;div&gt;system&lt;/div&gt;" style="rhombus;whiteSpace=wrap;html=1;rounded=1;fillColor=#ED79B5;" vertex="1" parent="1">
<mxGeometry x="800" y="410" width="80" height="80" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-13" value="on-device&lt;div&gt;database&lt;/div&gt;" style="shape=cylinder3;whiteSpace=wrap;html=1;boundedLbl=1;backgroundOutline=1;size=15;rounded=1;fillColor=#FA2921;" vertex="1" parent="1">
<mxGeometry x="810" y="310" width="60" height="80" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-14" value="server" style="ellipse;shape=cloud;whiteSpace=wrap;html=1;rounded=1;fillColor=#FFB400;" vertex="1" parent="1">
<mxGeometry x="780" y="500" width="120" height="80" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-16" style="edgeStyle=none;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.75;exitDx=0;exitDy=0;entryX=0.07;entryY=0.4;entryDx=0;entryDy=0;entryPerimeter=0;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-5" target="zHhczcy2-Jv_nqmJUiNH-14">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-39" value="OpenAPI" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];rounded=1;labelBackgroundColor=#1E83F7;" vertex="1" connectable="0" parent="zHhczcy2-Jv_nqmJUiNH-16">
<mxGeometry x="0.0697" y="1" relative="1" as="geometry">
<mxPoint x="8" y="10" as="offset" />
</mxGeometry>
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-23" style="edgeStyle=none;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.75;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-6" target="zHhczcy2-Jv_nqmJUiNH-6">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-27" style="edgeStyle=none;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.25;exitDx=0;exitDy=0;entryX=0;entryY=1;entryDx=0;entryDy=-15;entryPerimeter=0;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-5" target="zHhczcy2-Jv_nqmJUiNH-13">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-34" style="edgeStyle=none;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;dashed=1;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-3">
<mxGeometry relative="1" as="geometry">
<mxPoint x="810" y="360" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-36" value="" style="endArrow=none;dashed=1;html=1;rounded=1;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-9">
<mxGeometry width="50" height="50" relative="1" as="geometry">
<mxPoint x="512.08" y="665" as="sourcePoint" />
<mxPoint x="512.08" y="265" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-37" value="UI part" style="text;html=1;align=center;verticalAlign=middle;resizable=0;points=[];autosize=1;strokeColor=none;fillColor=none;fontStyle=1;fontSize=14;fontColor=#FFFFFF;" vertex="1" parent="1">
<mxGeometry x="387.5" y="640" width="70" height="30" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-38" value="non-UI part" style="text;html=1;align=center;verticalAlign=middle;resizable=0;points=[];autosize=1;strokeColor=none;fillColor=none;fontStyle=1;fontSize=14;fontColor=#FFFFFF;" vertex="1" parent="1">
<mxGeometry x="550" y="640" width="90" height="30" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-41" value="" style="endArrow=none;dashed=1;html=1;rounded=1;" edge="1" parent="1" target="zHhczcy2-Jv_nqmJUiNH-9">
<mxGeometry width="50" height="50" relative="1" as="geometry">
<mxPoint x="512.08" y="665" as="sourcePoint" />
<mxPoint x="512.08" y="265" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-9" value="Models" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#18C249;" vertex="1" parent="1">
<mxGeometry x="470" y="510" width="80" height="60" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-3" value="Entities" style="rounded=1;whiteSpace=wrap;html=1;gradientColor=none;fillColor=#18C249;" vertex="1" parent="1">
<mxGeometry x="472.5" y="330" width="80" height="60" as="geometry" />
</mxCell>
</root>
</mxGraphModel>
</diagram>
</mxfile>

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 20 KiB

View File

@@ -1,9 +1,5 @@
# PR Checklist
A minimal devcontainer is supplied with this repository. All commands can be executed directly inside this container to avoid tedious installation of the environment.
:::warning
The provided devcontainer isn't complete at the moment. At least all dockerized steps in the Makefile won't work (`make dev`, ....). Feel free to contribute!
:::
When contributing code through a pull request, please check the following:
## Web Checks
@@ -11,7 +7,6 @@ When contributing code through a pull request, please check the following:
- [ ] `npm run lint` (linting via ESLint)
- [ ] `npm run format` (formatting via Prettier)
- [ ] `npm run check:svelte` (Type checking via SvelteKit)
- [ ] `npm run check:typescript` (check typescript)
- [ ] `npm test` (unit tests)
## Documentation

View File

@@ -39,16 +39,13 @@ All the services are packaged to run as with single Docker Compose command.
make dev # required Makefile installed on the system.
```
5. Access the dev instance in your browser at http://localhost:3000, or connect via the mobile app.
5. Access the dev instance in your browser at http://localhost:2283, or connect via the mobile app.
All the services will be started with hot-reloading enabled for a quick feedback loop.
You can access the web from `http://your-machine-ip:3000` or `http://localhost:3000` and access the server from the mobile app at `http://your-machine-ip:3000/api`
You can access the web from `http://your-machine-ip:2283` or `http://localhost:2283` and access the server from the mobile app at `http://your-machine-ip:2283/api`
**Notes:**
- The "web" development container runs with uid 1000. If that uid does not have read/write permissions on the mounted volumes, you may encounter errors
- In case of rootless docker setup, you need to use root within the container, otherwise you will encounter read/write permission related errors, see comments in `docker/docker-compose.dev.yml`.
**Note:** the "web" development container runs with uid 1000. If that uid does not have read/write permissions on the mounted volumes, you may encounter errors
#### Connect web to a remote backend
@@ -79,7 +76,7 @@ Setting these in the IDE give a better developer experience, auto-formatting cod
### Dart Code Metrics
The mobile app uses DCM (Dart Code Metrics) for linting and metrics calculation. Please refer to the [Getting Started](https://dcm.dev/docs/) page for more information on setting up DCM
The mobile app uses DCM (Dart Code Metrics) for linting and metrics calculation. Please refer to the [Getting Started](https://dcm.dev/docs/getting-started/#installation) page for more information on setting up DCM
Note: Activating the license is not required.

View File

@@ -1,7 +1,7 @@
# Hardware Transcoding [Experimental]
This feature allows you to use a GPU to accelerate transcoding and reduce CPU load.
Note that hardware transcoding produces significantly larger videos than software transcoding with similar settings, typically with lower quality. Using slow presets and preferring more efficient codecs can narrow this gap.
Note that hardware transcoding is much less efficient for file sizes.
As this is a new feature, it is still experimental and may not work on all systems.
:::info
@@ -23,7 +23,7 @@ You do not need to redo any transcoding jobs after enabling hardware acceleratio
- Raspberry Pi is currently not supported.
- Two-pass mode is only supported for NVENC. Other APIs will ignore this setting.
- By default, only encoding is currently hardware accelerated. This means the CPU is still used for software decoding and tone-mapping.
- You can benefit from end-to-end acceleration by enabling hardware decoding in the video transcoding settings.
- NVENC and RKMPP can be fully accelerated by enabling hardware decoding in the video transcoding settings.
- Hardware dependent
- Codec support varies, but H.264 and HEVC are usually supported.
- Notably, NVIDIA and AMD GPUs do not support VP9 encoding.
@@ -49,7 +49,7 @@ For RKMPP to work:
- You must have a supported Rockchip ARM SoC.
- Only RK3588 supports hardware tonemapping, other SoCs use slower software tonemapping while still using hardware encoding.
- Tonemapping requires `/usr/lib/aarch64-linux-gnu/libmali.so.1` to be present on your host system. Install the [`libmali`][libmali-rockchip] release that corresponds to your Mali GPU (`libmali-valhall-g610-g13p0-gbm` on RK3588) and modify the [`hwaccel.transcoding.yml`][hw-file] file:
- Tonemapping requires `/usr/lib/aarch64-linux-gnu/libmali.so.1` to be present on your host system. Install [`libmali-valhall-g610-g6p0-gbm`][libmali-rockchip] and modify the [`hwaccel.transcoding.yml`][hw-file] file:
- under `rkmpp` uncomment the 3 lines required for OpenCL tonemapping by removing the `#` symbol at the beginning of each line
- `- /dev/mali0:/dev/mali0`
- `- /etc/OpenCL:/etc/OpenCL:ro`
@@ -62,14 +62,11 @@ For RKMPP to work:
1. If you do not already have it, download the latest [`hwaccel.transcoding.yml`][hw-file] file and ensure it's in the same folder as the `docker-compose.yml`.
2. In the `docker-compose.yml` under `immich-server`, uncomment the `extends` section and change `cpu` to the appropriate backend.
Note: For VAAPI on WSL2, be sure to use `vaapi-wsl` rather than `vaapi`
- For VAAPI on WSL2, be sure to use `vaapi-wsl` rather than `vaapi`
3. Redeploy the `immich-server` container with these updated settings.
4. In the Admin page under `Video transcoding settings`, change the hardware acceleration setting to the appropriate option and save.
Note: For Jasper Lake and Elkhart Lake CPUs, you will need to set the `Hardware Acceleration` -> `Constant quality mode` to `CQP`
5. (Optional) Enable hardware decoding for optimal performance.
5. (Optional) If using a compatible backend, you may enable hardware decoding for optimal performance.
#### Single Compose File
@@ -92,7 +89,16 @@ immich-server:
devices:
- /dev/dri:/dev/dri
volumes:
...
- ${UPLOAD_LOCATION}:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
env_file:
- .env
ports:
- 2283:3001
depends_on:
- redis
- database
restart: always
```
Once this is done, you can continue to step 3 of "Basic Setup".

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.5 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 379 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.1 MiB

View File

@@ -1,14 +1,18 @@
# External Libraries
# Libraries
External libraries track assets stored in the filesystem outside of Immich. When the external library is scanned, Immich will load videos and photos from disk and create the corresponding assets. These assets will then be shown in the main timeline, and they will look and behave like any other asset, including viewing on the map, adding to albums, etc. Later, if a file is modified outside of Immich, you need to scan the library for the changes to show up.
## Overview
If an external asset is deleted from disk, Immich will move it to trash on rescan. To restore the asset, you need to restore the original file. After 30 days the file will be removed from trash, and any changes to metadata within Immich will be lost.
Immich supports the creation of libraries which is a top-level asset container. Currently, there are two types of libraries: traditional upload libraries that can sync with a mobile device, and external libraries, that keeps up to date with files on disk. Libraries are different from albums in that an asset can belong to multiple albums but only one library, and deleting a library deletes all assets contained within. As of August 2023, this is a new feature and libraries have a lot of potential for future development beyond what is documented here. This document attempts to describe the current state of libraries.
:::caution
## External Libraries
If you add metadata to an external asset in any way (i.e. add it to an album or edit the description), that metadata is only stored inside Immich and will not be persisted to the external asset file. If you move an asset to another location within the library all such metadata will be lost upon rescan. This is because the asset is considered a new asset after the move. This is a known issue and will be fixed in a future release.
External libraries tracks assets stored outside of Immich, i.e. in the file system. When the external library is scanned, Immich will read the metadata from the file and create an asset in the library for each image or video file. These items will then be shown in the main timeline, and they will look and behave like any other asset, including viewing on the map, adding to albums, etc.
:::
If a file is modified outside of Immich, the changes will not be reflected in immich until the library is scanned again. There are different ways to scan a library depending on the use case:
- Scan Library Files: This is the default scan method and also the quickest. It will scan all files in the library and add new files to the library. It will notice if any files are missing (see below) but not check existing assets
- Scan All Library Files: Same as above, but will check each existing asset to see if the modification time has changed. If it has, the asset will be updated. Since it has to check each asset, this is slower than Scan Library Files.
- Force Scan All Library Files: Same as above, but will read each asset from disk no matter the modification time. This is useful in some cases where an asset has been modified externally but the modification time has not changed. This is the slowest way to scan because it reads each asset from disk.
:::caution
@@ -16,6 +20,22 @@ Due to aggressive caching it can take some time for a refreshed asset to appear
:::
In external libraries, the file path is used for duplicate detection. This means that if a file is moved to a different location, it will be added as a new asset. If the file is moved back to its original location, it will be added as a new asset. In contrast to upload libraries, two identical files can be uploaded if they are in different locations. This is a deliberate design choice to make Immich reflect the file system as closely as possible. Remember that duplication detection is only done within the same library, so if you have multiple external libraries, the same file can be added to multiple libraries.
:::caution
If you add assets from an external library to an album and then move the asset to another location within the library, the asset will be removed from the album upon rescan. This is because the asset is considered a new asset after the move. This is a known issue and will be fixed in a future release.
:::
### Deleted External Assets
Note: Either a manual or scheduled library scan must have been performed to identify offline assets before this process will work.
In all above scan methods, Immich will check if any files are missing. This can happen if files are deleted, or if they are on a storage location that is currently unavailable, like a network drive that is not mounted, or a USB drive that has been unplugged. In order to prevent accidental deletion of assets, Immich will not immediately delete an asset from the library if the file is missing. Instead, the asset will be internally marked as offline and will still be visible in the main timeline. If the file is moved back to its original location and the library is scanned again, the asset will be restored.
Finally, files can be deleted from Immich via the `Remove Offline Files` job. This job can be found by the three dots menu for the associated external storage that was configured under Administration > Libraries (the same location described at [create external libraries](#create-external-libraries)). When this job is run, any assets marked as offline will then be removed from Immich. Run this job whenever files have been deleted from the file system and you want to remove them from Immich.
### Import Paths
External libraries use import paths to determine which files to scan. Each library can have multiple import paths so that files from different locations can be added to the same library. Import paths are scanned recursively, and if a file is in multiple import paths, it will only be added once. Each import file must be a readable directory that exists on the filesystem; the import path dialog will alert you of any paths that are not accessible.
@@ -46,13 +66,9 @@ Some basic examples:
- `**/Raw/**` will exclude all files in any directory named `Raw`
- `**/*.{tif,jpg}` will exclude all files with the extension `.tif` or `.jpg`
Special characters such as @ should be escaped, for instance:
- `**/\@eadir/**` will exclude all files in any directory named `@eadir`
### Automatic watching (EXPERIMENTAL)
This feature - currently hidden in the config file - is considered experimental and for advanced users only. If enabled, it will allow automatic watching of the filesystem which means new assets are automatically imported to Immich without needing to rescan.
This feature - currently hidden in the config file - is considered experimental and for advanced users only. If enabled, it will allow automatic watching of the filesystem which means new assets are automatically imported to Immich without needing to rescan. Deleted assets are, as always, marked as offline and can be removed with the "Remove offline files" button.
If your photos are on a network drive, automatic file watching likely won't work. In that case, you will have to rely on a periodic library refresh to pull in your changes.
@@ -68,7 +84,7 @@ In rare cases, the library watcher can hang, preventing Immich from starting up.
### Nightly job
There is an automatic scan job that is scheduled to run once a day. This job also cleans up any libraries stuck in deletion.
There is an automatic job that's run once a day and refreshes all modified files in all libraries as well as cleans up any libraries stuck in deletion.
## Usage
@@ -104,7 +120,7 @@ This will disallow the images from being deleted in the web UI, or adding metada
_Remember to run `docker compose up -d` to register the changes. Make sure you can see the mounted path in the container._
:::
### Create A New Library
### Create External Libraries
These actions must be performed by the Immich administrator.
@@ -128,7 +144,7 @@ Next, we'll add an exclusion pattern to filter out raw files.
- Enter `**/Raw/**` and click save.
- Click save
- Click the drop-down menu on the newly created library
- Click on Scan
- Click on Scan Library Files
The christmas trip library will now be scanned in the background. In the meantime, let's add the videos and old photos to another library.
@@ -145,26 +161,10 @@ If you get an error here, please rename the other external library to something
- Click on Add Path
- Enter `/mnt/media/videos` then click Add
- Click Save
- Click on Scan
- Click on Scan Library Files
Within seconds, the assets from the old-pics and videos folders should show up in the main timeline.
### Folder view
:::info
This feature also exists for assets uploaded other than through external libraries.
:::tip
You can use the storage template migration feature for the best experience with uploaded assets in this view.
:::
You can browse your photos and videos by folder like in a file explorer.
Enable this feature from the Users Settings > Features > Folders.
The UI is currently only available for the web; mobile will come in a subsequent release.
<img src={require('./img/folder-view.png').default} width="75%" title='Folder-view' />
### Set Custom Scan Interval
:::note

View File

@@ -53,12 +53,6 @@ You do not need to redo any machine learning jobs after enabling hardware accele
3. Still in `immich-machine-learning`, add one of -[armnn, cuda, openvino] to the `image` section's tag at the end of the line.
4. Redeploy the `immich-machine-learning` container with these updated settings.
### Confirming Device Usage
You can confirm the device is being recognized and used by checking its utilization. There are many tools to display this, such as `nvtop` for NVIDIA or Intel and `intel_gpu_top` for Intel.
You can also check the logs of the `immich-machine-learning` container. When a Smart Search or Face Detection job begins, or when you search with text in Immich, you should either see a log for `Available ORT providers` containing the relevant provider (e.g. `CUDAExecutionProvider` in the case of CUDA), or a `Loaded ANN model` log entry without errors in the case of ARM NN.
#### Single Compose File
Some platforms, including Unraid and Portainer, do not support multiple Compose files as of writing. As an alternative, you can "inline" the relevant contents of the [`hwaccel.ml.yml`][hw-file] file into the `immich-machine-learning` service directly.
@@ -101,22 +95,9 @@ immich-machine-learning:
Once this is done, you can redeploy the `immich-machine-learning` container.
#### Multi-GPU
If you want to utilize multiple NVIDIA or Intel GPUs, you can set the `MACHINE_LEARNING_DEVICE_IDS` environmental variable to a comma-separated list of device IDs and set `MACHINE_LEARNING_WORKERS` to the number of listed devices. You can run a command such as `nvidia-smi -L` or `glxinfo -B` to see the currently available devices and their corresponding IDs.
For example, if you have devices 0 and 1, set the values as follows:
```
MACHINE_LEARNING_DEVICE_IDS=0,1
MACHINE_LEARNING_WORKERS=2
```
In this example, the machine learning service will spawn two workers, one of which will allocate models to device 0 and the other to device 1. Different requests will be processed by one worker or the other.
This approach can be used to simply specify a particular device as well. For example, setting `MACHINE_LEARNING_DEVICE_IDS=1` will ensure device 1 is always used instead of device 0.
Note that you should increase job concurrencies to increase overall utilization and more effectively distribute work across multiple GPUs. Additionally, each GPU must be able to load all models. It is not possible to distribute a single model to multiple GPUs that individually have insufficient VRAM, or to delegate a specific model to one GPU.
:::info
You can confirm the device is being recognized and used by checking its utilization (via `nvtop` for CUDA, `intel_gpu_top` for OpenVINO, etc.). You can also enable debug logging by setting `IMMICH_LOG_LEVEL=debug` in the `.env` file and restarting the `immich-machine-learning` container. When a Smart Search or Face Detection job begins, you should see a log for `Available ORT providers` containing the relevant provider. In the case of ARM NN, the absence of a `Could not load ANN shared libraries` log entry means it loaded successfully.
:::
[hw-file]: https://github.com/immich-app/immich/releases/latest/download/hwaccel.ml.yml
[nvct]: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html

View File

@@ -1,9 +1,6 @@
import Icon from '@mdi/react';
import { mdiCloudOffOutline, mdiCloudCheckOutline } from '@mdi/js';
import MobileAppDownload from '/docs/partials/_mobile-app-download.md';
import MobileAppLogin from '/docs/partials/_mobile-app-login.md';
import MobileAppBackup from '/docs/partials/_mobile-app-backup.md';
import { cloudDonePath, cloudOffPath } from '@site/src/components/svg-paths';
# Mobile App
@@ -30,63 +27,3 @@ The beta release channel allows users to test upcoming changes before they are o
:::info
You can enable automatic backup on supported devices. For more information see [Automatic Backup](/docs/features/automatic-backup.md).
:::
## Sync only selected photos
If you have a large number of photos on the device, and you would prefer not to backup all the photos, then it might be prudent to only backup selected photos from device to the Immich server.
First, you need to enable the Storage Indicator in your app's settings. Navigate to **<ins>Settings -> Photo Grid</ins>** and enable **"Show Storage indicator on asset tiles"**; this makes it easy to distinguish local-only assets and synced assets.
:::note
This will enable a small cloud icon on the bottom right corner of the asset tile, indicating that the asset is synced to the server:
1. <Icon path={mdiCloudOffOutline} size={1} /> - Local-only asset; not synced to the server
2. <Icon path={mdiCloudCheckOutline} size={1} /> - Asset is synced to the server :::
Now make sure that the local album is selected in the backup screen (steps 1-2 above). You can find these albums listed in **<ins>Library -> On this device</ins>**. To selectively upload photos from these albums, simply select the local-only photos and tap on "Upload" button in the dynamic bottom menu.
<img
src={require('./img/mobile-upload-open-photo.png').default}
width="50%"
title="Upload button on local asset preview"
/>
<img
src={require('./img/mobile-upload-selected-photos.png').default}
width="40%"
title="Upload button after photos selection"
/>
## Album Sync
You can sync or mirror an album from your phone to the Immich server on your account. For example, if you select Recents, Camera and Videos album for backup, the corresponding album with the same name will be created on the server. Once the assets from those albums are uploaded, they will be put into the target albums automatically.
### Album Synchronization Highlights
- **One-Way Sync:** Synchronization is one-way, from the device to the server.
- **Name Matching:** If an album on the server has the same name as the album on the device, images from the device will be merged with the existing images in the server album.
- **Shared Albums:** If the matching album on the server is shared, the new photos merged into the album will also be shared.
- **Album Structure:** When an album is created for the first time, its structure is based on the initial state. Future updates made on the phone (such as deleting or repositioning photos) will not be reflected in Immich.
- **User-Specific Sync:** Album synchronization is unique to each server user and does not sync between different users or partners.
- **Mobile-Only Feature:** Album synchronization is currently only available on mobile. For similar options on a computer, refer to [Libraries](/docs/features/libraries) for further details.
### Synchronizing albums from the past
Albums can be synchronized to the server even if they did not exist on the server before. In order to apply this setting you have to:
Enter the cloud on the top right -> cog wheel on the top right -> select the sync option under Sync albums.
:::info Sync albums delete/move photos
If you delete/move photos in the local album on your device, it will not be reflected in the album on the server **even if** you click Sync albums
It will only reflect files you add.
:::
If the same asset is in more than one album it will only sync to the first album it's in, after that it won't sync again even if the user clicks sync albums manually.
To overcome this limitation, the files must be removed from the blacklist by
App settings -> Advanced -> Duplicate Assets -> Clear
:::info
Cleaning duplicate assets from the list will cause all the previously uploaded duplicate files to be re-uploaded, the files will not actually be uploaded and will be rejected on the server side (due to duplication) but will be synchronized to the album and at the end will be added to the black list again at the end of the synchronization.
:::

View File

@@ -25,10 +25,10 @@ The metrics in immich are grouped into API (endpoint calls and response times),
### Configuration
Immich will not expose an endpoint for metrics by default. To enable this endpoint, you can add the `IMMICH_TELEMETRY_INCLUDE=all` environmental variable to your `.env` file. Note that only the server container currently use this variable.
Immich will not expose an endpoint for metrics by default. To enable this endpoint, you can add the `IMMICH_METRICS=true` environmental variable to your `.env` file. Note that only the server and microservices containers currently use this variable.
:::tip
`IMMICH_TELEMETRY_INCLUDE=all` enables all metrics. For a more granular configuration you can enumerate the telemetry metrics that should be included as a comma separated list (e.g. `IMMICH_TELEMETRY_INCLUDE=repo,api`). Alternatively, you can also exclude specific metrics with `IMMICH_TELEMETRY_EXCLUDE`. For more information refer to the [environment section](/docs/install/environment-variables.md#prometheus).
`IMMICH_METRICS` enables all metrics, but there are also [environmental variables](/docs/install/environment-variables.md#prometheus) to toggle specific metric groups. If you'd like to only expose certain kinds of metrics, you can set only those environmental variables to `true`. Explicitly setting the environmental variable for a metric group overrides `IMMICH_METRICS` for that group. For example, setting `IMMICH_METRICS=true` and `IMMICH_API_METRICS=false` will enable all metrics except API metrics.
:::
The next step is to configure a new or existing Prometheus instance to scrape this endpoint. The following steps assume that you do not have an existing Prometheus instance, but the steps will be similar either way.

View File

@@ -1,15 +1,15 @@
# Files Custom Locations
This guide explains how to store generated and raw files with docker's volume mount in different locations.
This guide explains storing generated and raw files with docker's volume mount in different locations.
:::caution Backup
It is important to remember to update the backup settings after following the guide to back up the new backup paths if using automatic backup tools, especially `profile/`.
:::
In our `.env` file, we will define variables that will help us in the future when we want to move to a more advanced server
In our `.env` file, we will define variables that will help us in the future when we want to move to a more advanced server in the future
```diff title=".env"
# You can find documentation for all the supported environment variables [here](/docs/install/environment-variables)
# You can find documentation for all the supported env variables [here](/docs/install/environment-variables)
# Custom location where your uploaded, thumbnails, and transcoded video files are stored
- UPLOAD_LOCATION=./library
@@ -17,11 +17,10 @@ In our `.env` file, we will define variables that will help us in the future whe
+ THUMB_LOCATION=/custom/path/immich/thumbs
+ ENCODED_VIDEO_LOCATION=/custom/path/immich/encoded-video
+ PROFILE_LOCATION=/custom/path/immich/profile
+ BACKUP_LOCATION=/custom/path/immich/backups
...
```
After defining the locations of these files, we will edit the `docker-compose.yml` file accordingly and add the new variables to the `immich-server` container.
After defining the locations for these files, we will edit the `docker-compose.yml` file accordingly and add the new variables to the `immich-server` container.
```diff title="docker-compose.yml"
services:
@@ -31,7 +30,6 @@ services:
+ - ${THUMB_LOCATION}:/usr/src/app/upload/thumbs
+ - ${ENCODED_VIDEO_LOCATION}:/usr/src/app/upload/encoded-video
+ - ${PROFILE_LOCATION}:/usr/src/app/upload/profile
+ - ${BACKUP_LOCATION}:/usr/src/app/upload/backups
- /etc/localtime:/etc/localtime:ro
```
@@ -43,11 +41,12 @@ docker compose up -d
:::note
Because of the underlying properties of docker bind mounts, it is not recommended to mount the `upload/` and `library/` folders as separate bind mounts if they are on the same device.
For this reason, we mount the HDD or the network storage (NAS) to `/usr/src/app/upload` and then mount the folders we want to access under that folder.
For this reason, we mount the HDD or network storage to `/usr/src/app/upload` and then mount the folders we want quick access to below this folder.
The `thumbs/` folder contains both the small thumbnails displayed in the timeline and the larger previews shown when clicking into an image. These cannot be separated.
The `thumbs/` folder contains both the small thumbnails shown in the timeline, and the larger previews shown when clicking into an image. These cannot be split up.
The storage metrics of the Immich server will track available storage at `UPLOAD_LOCATION`, so the administrator must set up some sort of monitoring to ensure the storage does not run out of space. The `profile/` folder is much smaller, usually less than 1 MB.
The storage metrics of the Immich server will track the storage available at `UPLOAD_LOCATION`,
so the administrator should setup some kind of monitoring to make sure the SSD does not run out of space. The `profile/` folder is much smaller, typically less than 1 MB.
:::
Thanks to [Jrasm91](https://github.com/immich-app/immich/discussions/2110#discussioncomment-5477767) for writing the guide.

View File

@@ -98,10 +98,6 @@ SELECT * FROM "move_history";
SELECT * FROM "users";
```
```sql title="Get owner info from asset ID"
SELECT "users".* FROM "users" JOIN "assets" ON "users"."id" = "assets"."ownerId" WHERE "assets"."id" = 'fa310b01-2f26-4b7a-9042-d578226e021f';
```
## System Config
```sql title="Custom settings"

View File

@@ -27,7 +27,7 @@ You may use a VPN service to open an encrypted connection to your Immich instanc
If you are unable to open a port on your router for Wireguard or OpenVPN to your server, [Tailscale](https://tailscale.com/) is a good option. Tailscale mediates a peer-to-peer wireguard tunnel between your server and remote device, even if one or both of them are behind a [NAT firewall](https://en.wikipedia.org/wiki/Network_address_translation).
:::tip Video tutorial
:::tip Video toturial
You can learn how to set up Tailscale together with Immich with the [tutorial video](https://www.youtube.com/watch?v=Vt4PDUXB_fg) they created.
:::

View File

@@ -1,20 +1,18 @@
# Remote Machine Learning
To alleviate [performance issues on low-memory systems](/docs/FAQ.mdx#why-is-immich-slow-on-low-memory-systems-like-the-raspberry-pi) like the Raspberry Pi, you may also host Immich's machine learning container on a more powerful system, such as your laptop or desktop computer. The server container will send requests containing the image preview to the remote machine learning container for processing. The machine learning container does not persist this data or associate it with a particular user.
To alleviate [performance issues on low-memory systems](/docs/FAQ.mdx#why-is-immich-slow-on-low-memory-systems-like-the-raspberry-pi) like the Raspberry Pi, you may also host Immich's machine-learning container on a more powerful system (e.g. your laptop or desktop computer):
- Set the URL in Machine Learning Settings on the Admin Settings page to point to the designated ML system, e.g. `http://workstation:3003`.
- Copy the following `docker-compose.yml` to your ML system.
- If using [hardware acceleration](/docs/features/ml-hardware-acceleration), the [hwaccel.ml.yml](https://github.com/immich-app/immich/releases/latest/download/hwaccel.ml.yml) file also needs to be added
- Start the container by running `docker compose up -d`.
:::info
Smart Search and Face Detection will use this feature, but Facial Recognition will not. This is because Facial Recognition uses the _outputs_ of these models that have already been saved to the database. As such, its processing is between the server container and the database.
Smart Search and Face Detection will use this feature, but Facial Recognition is handled in the server.
:::
:::danger
Image previews are sent to the remote machine learning container. Use this option carefully when running this on a public computer or a paid processing cloud. Additionally, as an internal service, the machine learning container has no security measures whatsoever. Please be mindful of where it's deployed and who can access it.
:::
1. Ensure the remote server has Docker installed
2. Copy the following `docker-compose.yml` to the remote server
:::info
If using hardware acceleration, the [hwaccel.ml.yml](https://github.com/immich-app/immich/releases/latest/download/hwaccel.ml.yml) file also needs to be added and the `docker-compose.yml` needs to be configured as described in the [hardware acceleration documentation](/docs/features/ml-hardware-acceleration)
When using remote machine learning, the thumbnails are sent to the remote machine learning container. Use this option carefully when running this on a public computer or a paid processing cloud.
:::
```yaml
@@ -39,26 +37,8 @@ volumes:
model-cache:
```
3. Start the remote machine learning container by running `docker compose up -d`
Please note that version mismatches between both hosts may cause instabilities and bugs, so make sure to always perform updates together.
:::info
Version mismatches between both hosts may cause bugs and instability, so remember to update this container as well when updating the local Immich instance.
:::
4. Navigate to the [Machine Learning Settings](https://my.immich.app/admin/system-settings?isOpen=machine-learning)
5. Click _Add URL_
6. Fill the new field with the URL to the remote machine learning container, e.g. `http://ip:port`
## Forcing remote processing
Adding a new URL to the settings is recommended over replacing the existing URL. This is because it will allow machine learning tasks to be processed successfully when the remote server is down by falling back to the local machine learning container. If you do not want machine learning tasks to be processed locally when the remote server is not available, you can instead replace the existing URL and only provide the remote container's URL. If doing this, you can remove the `immich-machine-learning` section of the local `docker-compose.yml` file to save resources, as this service will never be used.
Do note that this will mean that Smart Search and Face Detection jobs will fail to be processed when the remote instance is not available. This in turn means that tasks dependent on these features—Duplicate Detection and Facial Recognition—will not run for affected assets. If this occurs, you must manually click the _Missing_ button next to Smart Search and Face Detection in the [Job Status](http://my.immich.app/admin/jobs-status) page for the jobs to be retried.
## Load balancing
While several URLs can be provided in the settings, they are tried sequentially; there is no attempt to distribute load across multiple containers. It is recommended to use a dedicated load balancer for such use-cases and specify it as the only URL. Among other things, it may enable the use of different APIs on the same server by running multiple containers with different configurations. For example, one might run an OpenVINO container in addition to a CUDA container, or run a standard release container to maximize both CPU and GPU utilization.
:::tip
The machine learning container can be shared among several Immich instances regardless of the models a particular instance uses. However, using different models will lead to higher peak memory usage.
:::caution
As an internal service, the machine learning container has no security measures whatsoever. Please be mindful of where it's deployed and who can access it.
:::

View File

@@ -6,19 +6,10 @@ This script assumes you have a second hard drive connected to your server for on
The database is saved to your Immich upload folder in the `database-backup` subdirectory. The database is then backed up and versioned with your assets by Borg. This ensures that the database backup is in sync with your assets in every snapshot.
:::info
This script makes backups of your database along with your photo/video library. This is redundant with the [automatic database backup tool](https://immich.app/docs/administration/backup-and-restore#automatic-database-backups) built into Immich. Using this script to backup your database has two advantages over the built-in backup tool:
- This script uses storage more efficiently by versioning your backups instead of making multiple copies.
- The database backups are performed at the same time as the library backup, ensuring that the backups of your database and the library are always in sync.
If you are using this script, it is therefore safe to turn off the built-in automatic database backups from your admin panel to save storage space.
:::
### Prerequisites
- Borg needs to be installed on your server as well as the remote machine. You can find instructions to install Borg [here](https://borgbackup.readthedocs.io/en/latest/installation.html).
- (Optional) To run this script as a non-root user, you should [add your username to the docker group](https://docs.docker.com/engine/install/linux-postinstall/).
- (Optional) To run this sript as a non-root user, you should [add your username to the docker group](https://docs.docker.com/engine/install/linux-postinstall/).
- To run this script non-interactively, set up [passwordless ssh](https://www.redhat.com/sysadmin/passwordless-ssh) to your remote machine from your server. If you skipped the previous step, make sure this step is done from your root account.
To initialize the borg repository, run the following commands once.

View File

@@ -19,28 +19,20 @@ The default configuration looks like this:
"targetVideoCodec": "h264",
"acceptedVideoCodecs": ["h264"],
"targetAudioCodec": "aac",
"acceptedAudioCodecs": ["aac", "mp3", "libopus", "pcm_s16le"],
"acceptedContainers": ["mov", "ogg", "webm"],
"acceptedAudioCodecs": ["aac", "mp3", "libopus"],
"targetResolution": "720",
"maxBitrate": "0",
"bframes": -1,
"refs": 0,
"gopSize": 0,
"npl": 0,
"temporalAQ": false,
"cqMode": "auto",
"twoPass": false,
"preferredHwDevice": "auto",
"transcode": "required",
"tonemap": "hable",
"accel": "disabled",
"accelDecode": false
},
"backup": {
"database": {
"enabled": true,
"cronExpression": "0 02 * * *",
"keepLastAmount": 14
}
"accel": "disabled"
},
"job": {
"backgroundTask": {
@@ -68,13 +60,10 @@ The default configuration looks like this:
"concurrency": 5
},
"thumbnailGeneration": {
"concurrency": 3
"concurrency": 5
},
"videoConversion": {
"concurrency": 1
},
"notifications": {
"concurrency": 5
}
},
"logging": {
@@ -83,52 +72,46 @@ The default configuration looks like this:
},
"machineLearning": {
"enabled": true,
"urls": ["http://immich-machine-learning:3003"],
"url": "http://immich-machine-learning:3003",
"clip": {
"enabled": true,
"modelName": "ViT-B-32__openai"
},
"duplicateDetection": {
"enabled": true,
"maxDistance": 0.01
"enabled": false,
"maxDistance": 0.03
},
"facialRecognition": {
"enabled": true,
"modelName": "buffalo_l",
"minScore": 0.7,
"maxDistance": 0.5,
"maxDistance": 0.6,
"minFaces": 3
}
},
"map": {
"enabled": true,
"lightStyle": "https://tiles.immich.cloud/v1/style/light.json",
"darkStyle": "https://tiles.immich.cloud/v1/style/dark.json"
"lightStyle": "",
"darkStyle": ""
},
"reverseGeocoding": {
"enabled": true
},
"metadata": {
"faces": {
"import": false
}
},
"oauth": {
"autoLaunch": false,
"autoRegister": true,
"buttonText": "Login with OAuth",
"clientId": "",
"clientSecret": "",
"defaultStorageQuota": 0,
"enabled": false,
"issuerUrl": "",
"mobileOverrideEnabled": false,
"mobileRedirectUri": "",
"clientId": "",
"clientSecret": "",
"scope": "openid email profile",
"signingAlgorithm": "RS256",
"profileSigningAlgorithm": "none",
"storageLabelClaim": "preferred_username",
"storageQuotaClaim": "immich_quota"
"storageQuotaClaim": "immich_quota",
"defaultStorageQuota": 0,
"buttonText": "Login with OAuth",
"autoRegister": true,
"autoLaunch": false,
"mobileOverrideEnabled": false,
"mobileRedirectUri": ""
},
"passwordLogin": {
"enabled": true
@@ -139,16 +122,11 @@ The default configuration looks like this:
"template": "{{y}}/{{y}}-{{MM}}-{{dd}}/{{filename}}"
},
"image": {
"thumbnail": {
"format": "webp",
"size": 250,
"quality": 80
},
"preview": {
"format": "jpeg",
"size": 1440,
"quality": 80
},
"thumbnailFormat": "webp",
"thumbnailSize": 250,
"previewFormat": "jpeg",
"previewSize": 1440,
"quality": 80,
"colorspace": "p3",
"extractEmbedded": false
},
@@ -162,35 +140,23 @@ The default configuration looks like this:
"theme": {
"customCss": ""
},
"user": {
"deleteDelay": 7
},
"library": {
"scan": {
"enabled": true,
"cronExpression": "0 0 * * *"
},
"watch": {
"enabled": false
"enabled": false,
"usePolling": false,
"interval": 10000
}
},
"server": {
"externalDomain": "",
"loginPageMessage": ""
},
"notifications": {
"smtp": {
"enabled": false,
"from": "",
"replyTo": "",
"transport": {
"ignoreCert": false,
"host": "",
"port": 587,
"username": "",
"password": ""
}
}
},
"user": {
"deleteDelay": 7
}
}
```

View File

@@ -7,9 +7,10 @@ import ExampleEnv from '!!raw-loader!../../../docker/example.env';
# Docker Compose [Recommended]
Docker Compose is the recommended method to run Immich in production. Below are the steps to deploy Immich with Docker Compose.
Docker Compose is the recommended method to run Immich in production. Below are the steps to deploy Immich with Docker Compose.
Immich requires Docker Compose version 2.x.
## Step 1 - Download the required files
### Step 1 - Download the required files
Create a directory of your choice (e.g. `./immich-app`) to hold the `docker-compose.yml` and `.env` files.
@@ -18,7 +19,7 @@ mkdir ./immich-app
cd ./immich-app
```
Download [`docker-compose.yml`][compose-file] and [`example.env`][env-file] by running the following commands:
Download [`docker-compose.yml`][compose-file] and [`example.env`][env-file], either by running the following commands:
```bash title="Get docker-compose.yml file"
wget -O docker-compose.yml https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
@@ -28,11 +29,6 @@ wget -O docker-compose.yml https://github.com/immich-app/immich/releases/latest/
wget -O .env https://github.com/immich-app/immich/releases/latest/download/example.env
```
You can alternatively download these two files from your browser and move them to the directory that you created, in which case ensure that you rename `example.env` to `.env`.
:::info Optional Features
If you intend to use hardware acceleration for transcoding or machine learning (ML), you can download now the config files you'll need, in the same way:
```bash title="(Optional) Get hwaccel.transcoding.yml file"
wget -O hwaccel.transcoding.yml https://github.com/immich-app/immich/releases/latest/download/hwaccel.transcoding.yml
```
@@ -41,9 +37,15 @@ wget -O hwaccel.transcoding.yml https://github.com/immich-app/immich/releases/la
wget -O hwaccel.ml.yml https://github.com/immich-app/immich/releases/latest/download/hwaccel.ml.yml
```
or by downloading from your browser and moving the files to the directory that you created.
Note: If you downloaded the files from your browser, also ensure that you rename `example.env` to `.env`.
:::info
Optionally, you can enable hardware acceleration for machine learning and transcoding. See the [Hardware Transcoding](/docs/features/hardware-transcoding.md) and [Hardware-Accelerated Machine Learning](/docs/features/ml-hardware-acceleration.md) guides for info on how to set these up.
:::
## Step 2 - Populate the .env file with custom values
### Step 2 - Populate the .env file with custom values
<details>
<summary>
@@ -52,37 +54,30 @@ wget -O hwaccel.ml.yml https://github.com/immich-app/immich/releases/latest/down
<CodeBlock language="bash">{ExampleEnv}</CodeBlock>
</details>
- Populate `UPLOAD_LOCATION` with your preferred location for storing backup assets. It should be a new directory on the server with enough free space.
- Consider changing `DB_PASSWORD` to a custom value. Postgres is not publically exposed, so this password is only used for local authentication.
To avoid issues with Docker parsing this value, it is best to use only the characters `A-Za-z0-9`. `pwgen` is a handy utility for this.
- Set your timezone by uncommenting the `TZ=` line.
- Populate custom database information if necessary.
- Populate `UPLOAD_LOCATION` with your preferred location for storing backup assets.
- Consider changing `DB_PASSWORD` to a custom value. Postgres is not publically exposed, so this password is only used for local authentication.
To avoid issues with Docker parsing this value, it is best to use only the characters `A-Za-z0-9`.
:::info Optional Features
You can edit `docker-compose.yml` to add external libraries or enable hardware acceleration now by following [their guides](#setting-up-optional-features).
:::
### Step 3 - Start the containers
## Step 3 - Start the containers
From the directory you created in Step 1 (which should now contain your customized `docker-compose.yml` and `.env` files), run this command:
From the directory you created in Step 1, (which should now contain your customized `docker-compose.yml` and `.env` files) run `docker compose up -d`.
```bash title="Start the containers using docker compose command"
docker compose up -d
```
This starts immich as a background service (per the `-d` flag), ensuring it restarts after system reboots or crashes (per the `restart` fields in `docker-compose.yml`).
:::info Docker version
If you get an error `unknown shorthand flag: 'd' in -d`, you are probably running the wrong Docker version. (This happens, for example, with the docker.io package in Ubuntu 22.04.3 LTS.) You can correct the problem by following the complete [Docker Engine install](https://docs.docker.com/engine/install/) procedure for your distribution, crucially the "Uninstall old versions" and "Install using the apt/rpm repository" sections. These replace the distro's Docker packages with Docker's official ones.
If you get an error `unknown shorthand flag: 'd' in -d`, you are probably running the wrong Docker version. (This happens, for example, with the docker.io package in Ubuntu 22.04.3 LTS.) You can correct the problem by `apt remove`ing Ubuntu's docker.io package and installing docker and docker-compose via [Docker's official repository][docker-repo].
Note that the correct command really is `docker compose`, not `docker-compose`. If you try the latter on vanilla Ubuntu 22.04, it will fail in a different way:
Note that the correct command really is `docker compose`, not `docker-compose`. If you try the latter on vanilla Ubuntu 22.04 it will fail in a different way:
```
The Compose file './docker-compose.yml' is invalid because:
'name' does not match any of the regexes: '^x-'
```
See the previous paragraph about installing from the official Docker repository.
See the previous paragraph about installing from the official docker repository.
:::
:::info Health check start interval
@@ -97,17 +92,7 @@ For more information on how to use the application, please refer to the [Post In
Downloading container images might require you to authenticate to the GitHub Container Registry ([steps here][container-auth]).
:::
## Next Steps
### Setting Up Optional Features
You can set up the following now:
- [External Libraries](/docs/features/libraries.md)
- [Hardware Transcoding](/docs/features/hardware-transcoding.md)
- [Hardware-Accelerated Machine Learning](/docs/features/ml-hardware-acceleration.md)
### Upgrading
### Step 4 - Upgrading
:::danger Breaking Changes
It is important to follow breaking updates to avoid problems. You can see versions that had breaking changes [here][breaking].
@@ -115,18 +100,12 @@ It is important to follow breaking updates to avoid problems. You can see versio
If `IMMICH_VERSION` is set, it will need to be updated to the latest or desired version.
When a new version of Immich is [released][releases], the application can be upgraded and restarted with the following commands, run in the directory with the `docker-compose.yml` file:
When a new version of Immich is [released][releases], the application can be upgraded with the following commands, run in the directory with the `docker-compose.yml` file:
```bash title="Upgrade and restart Immich"
```bash title="Upgrade Immich"
docker compose pull && docker compose up -d
```
To clean up disk space, the old version's obsolete container images can be deleted with the following command:
```bash title="Delete all obsolete container images"
docker image prune
```
:::caution Automatic Updates
Immich is currently under heavy development, which means you can expect [breaking changes][breaking] and bugs. Therefore, we recommend reading the release notes prior to updating and to take special care when using automated tools like [Watchtower][watchtower].
:::
@@ -137,3 +116,4 @@ Immich is currently under heavy development, which means you can expect [breakin
[breaking]: https://github.com/immich-app/immich/discussions?discussions_q=label%3Achangelog%3Abreaking-change+sort%3Adate_created
[container-auth]: https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-container-registry#authenticating-to-the-container-registry
[releases]: https://github.com/immich-app/immich/releases
[docker-repo]: https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository

View File

@@ -27,14 +27,23 @@ If this should not work, try running `docker compose up -d --force-recreate`.
These environment variables are used by the `docker-compose.yml` file and do **NOT** affect the containers directly.
:::
### Supported filesystems
The Immich Postgres database (`DB_DATA_LOCATION`) must be located on a filesystem that supports user/group
ownership and permissions (EXT2/3/4, ZFS, APFS, BTRFS, XFS, etc.). It will not work on any filesystem formatted in NTFS or ex/FAT/32.
It will not work in WSL (Windows Subsystem for Linux) when using a mounted host directory (commonly under `/mnt`).
If this is an issue, you can change the bind mount to a Docker volume instead.
Regardless of filesystem, it is not recommended to use a network share for your database location due to performance and possible data loss issues.
## General
| Variable | Description | Default | Containers | Workers |
| :---------------------------------- | :---------------------------------------------------------------------------------------- | :--------------------------: | :----------------------- | :----------------- |
| `TZ` | Timezone | <sup>\*1</sup> | server | microservices |
| `TZ` | Timezone | | server | microservices |
| `IMMICH_ENV` | Environment (production, development) | `production` | server, machine learning | api, microservices |
| `IMMICH_LOG_LEVEL` | Log Level (verbose, debug, log, warn, error) | `log` | server, machine learning | api, microservices |
| `IMMICH_MEDIA_LOCATION` | Media Location inside the container ⚠️**You probably shouldn't set this**<sup>\*2</sup>⚠️ | `./upload`<sup>\*3</sup> | server | api, microservices |
| `IMMICH_MEDIA_LOCATION` | Media Location inside the container ⚠️**You probably shouldn't set this**<sup>\*1</sup>⚠️ | `./upload`<sup>\*2</sup> | server | api, microservices |
| `IMMICH_CONFIG_FILE` | Path to config file | | server | api, microservices |
| `NO_COLOR` | Set to `true` to disable color-coded log output | `false` | server, machine learning | |
| `CPU_CORES` | Amount of cores available to the immich server | auto-detected cpu core count | server | |
@@ -42,16 +51,18 @@ These environment variables are used by the `docker-compose.yml` file and do **N
| `IMMICH_MICROSERVICES_METRICS_PORT` | Port for the OTEL metrics | `8082` | server | microservices |
| `IMMICH_PROCESS_INVALID_IMAGES` | When `true`, generate thumbnails for invalid images | | server | microservices |
| `IMMICH_TRUSTED_PROXIES` | List of comma separated IPs set as trusted proxies | | server | api |
| `IMMICH_IGNORE_MOUNT_CHECK_ERRORS` | See [System Integrity](/docs/administration/system-integrity) | | server | api, microservices |
\*1: `TZ` should be set to a `TZ identifier` from [this list][tz-list]. For example, `TZ="Etc/UTC"`.
`TZ` is used by `exiftool` as a fallback in case the timezone cannot be determined from the image metadata. It is also used for logfile timestamps and cron job execution.
\*1: This path is where the Immich code looks for the files, which is internal to the docker container. Setting it to a path on your host will certainly break things, you should use the `UPLOAD_LOCATION` variable instead.
\*2: This path is where the Immich code looks for the files, which is internal to the docker container. Setting it to a path on your host will certainly break things, you should use the `UPLOAD_LOCATION` variable instead.
\*3: With the default `WORKDIR` of `/usr/src/app`, this path will resolve to `/usr/src/app/upload`.
\*2: With the default `WORKDIR` of `/usr/src/app`, this path will resolve to `/usr/src/app/upload`.
It only need to be set if the Immich deployment method is changing.
:::tip
`TZ` should be set to a `TZ identifier` from [this list][tz-list]. For example, `TZ="Etc/UTC"`.
`TZ` is used by `exiftool` as a fallback in case the timezone cannot be determined from the image metadata. It is also used for logfile timestamps and cron job execution.
:::
## Workers
| Variable | Description | Default | Containers |
@@ -68,7 +79,7 @@ Information on the current workers can be found [here](/docs/administration/jobs
| Variable | Description | Default |
| :------------ | :------------- | :----------------------------------------: |
| `IMMICH_HOST` | Listening host | `0.0.0.0` |
| `IMMICH_PORT` | Listening port | `2283` (server), `3003` (machine learning) |
| `IMMICH_PORT` | Listening port | `3001` (server), `3003` (machine learning) |
## Database
@@ -148,24 +159,22 @@ Redis (Sentinel) URL example JSON before encoding:
## Machine Learning
| Variable | Description | Default | Containers |
| :-------------------------------------------------------- | :-------------------------------------------------------------------------------------------------- | :-----------------------------: | :--------------- |
| `MACHINE_LEARNING_MODEL_TTL` | Inactivity time (s) before a model is unloaded (disabled if \<= 0) | `300` | machine learning |
| `MACHINE_LEARNING_MODEL_TTL_POLL_S` | Interval (s) between checks for the model TTL (disabled if \<= 0) | `10` | machine learning |
| `MACHINE_LEARNING_CACHE_FOLDER` | Directory where models are downloaded | `/cache` | machine learning |
| `MACHINE_LEARNING_REQUEST_THREADS`<sup>\*1</sup> | Thread count of the request thread pool (disabled if \<= 0) | number of CPU cores | machine learning |
| `MACHINE_LEARNING_MODEL_INTER_OP_THREADS` | Number of parallel model operations | `1` | machine learning |
| `MACHINE_LEARNING_MODEL_INTRA_OP_THREADS` | Number of threads for each model operation | `2` | machine learning |
| `MACHINE_LEARNING_WORKERS`<sup>\*2</sup> | Number of worker processes to spawn | `1` | machine learning |
| `MACHINE_LEARNING_HTTP_KEEPALIVE_TIMEOUT_S`<sup>\*3</sup> | HTTP Keep-alive time in seconds | `2` | machine learning |
| `MACHINE_LEARNING_WORKER_TIMEOUT` | Maximum time (s) of unresponsiveness before a worker is killed | `120` (`300` if using OpenVINO) | machine learning |
| `MACHINE_LEARNING_PRELOAD__CLIP` | Name of a CLIP model to be preloaded and kept in cache | | machine learning |
| `MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION` | Name of a facial recognition model to be preloaded and kept in cache | | machine learning |
| `MACHINE_LEARNING_ANN` | Enable ARM-NN hardware acceleration if supported | `True` | machine learning |
| `MACHINE_LEARNING_ANN_FP16_TURBO` | Execute operations in FP16 precision: increasing speed, reducing precision (applies only to ARM-NN) | `False` | machine learning |
| `MACHINE_LEARNING_ANN_TUNING_LEVEL` | ARM-NN GPU tuning level (1: rapid, 2: normal, 3: exhaustive) | `2` | machine learning |
| `MACHINE_LEARNING_DEVICE_IDS`<sup>\*4</sup> | Device IDs to use in multi-GPU environments | `0` | machine learning |
| `MACHINE_LEARNING_MAX_BATCH_SIZE__FACIAL_RECOGNITION` | Set the maximum number of faces that will be processed at once by the facial recognition model | None (`1` if using OpenVINO) | machine learning |
| Variable | Description | Default | Containers |
| :-------------------------------------------------------- | :-------------------------------------------------------------------------------------------------- | :-----------------------------------: | :--------------- |
| `MACHINE_LEARNING_MODEL_TTL` | Inactivity time (s) before a model is unloaded (disabled if \<= 0) | `300` | machine learning |
| `MACHINE_LEARNING_MODEL_TTL_POLL_S` | Interval (s) between checks for the model TTL (disabled if \<= 0) | `10` | machine learning |
| `MACHINE_LEARNING_CACHE_FOLDER` | Directory where models are downloaded | `/cache` | machine learning |
| `MACHINE_LEARNING_REQUEST_THREADS`<sup>\*1</sup> | Thread count of the request thread pool (disabled if \<= 0) | number of CPU cores | machine learning |
| `MACHINE_LEARNING_MODEL_INTER_OP_THREADS` | Number of parallel model operations | `1` | machine learning |
| `MACHINE_LEARNING_MODEL_INTRA_OP_THREADS` | Number of threads for each model operation | `2` | machine learning |
| `MACHINE_LEARNING_WORKERS`<sup>\*2</sup> | Number of worker processes to spawn | `1` | machine learning |
| `MACHINE_LEARNING_HTTP_KEEPALIVE_TIMEOUT_S`<sup>\*3</sup> | HTTP Keep-alive time in seconds | `2` | machine learning |
| `MACHINE_LEARNING_WORKER_TIMEOUT` | Maximum time (s) of unresponsiveness before a worker is killed | `120` (`300` if using OpenVINO image) | machine learning |
| `MACHINE_LEARNING_PRELOAD__CLIP` | Name of a CLIP model to be preloaded and kept in cache | | machine learning |
| `MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION` | Name of a facial recognition model to be preloaded and kept in cache | | machine learning |
| `MACHINE_LEARNING_ANN` | Enable ARM-NN hardware acceleration if supported | `True` | machine learning |
| `MACHINE_LEARNING_ANN_FP16_TURBO` | Execute operations in FP16 precision: increasing speed, reducing precision (applies only to ARM-NN) | `False` | machine learning |
| `MACHINE_LEARNING_ANN_TUNING_LEVEL` | ARM-NN GPU tuning level (1: rapid, 2: normal, 3: exhaustive) | `2` | machine learning |
\*1: It is recommended to begin with this parameter when changing the concurrency levels of the machine learning service and then tune the other ones.
@@ -173,8 +182,6 @@ Redis (Sentinel) URL example JSON before encoding:
\*3: For scenarios like HPA in K8S. https://github.com/immich-app/immich/discussions/12064
\*4: Using multiple GPUs requires `MACHINE_LEARNING_WORKERS` to be set greater than 1. A single device is assigned to each worker in round-robin priority.
:::info
Other machine learning parameters can be tuned from the admin UI.
@@ -183,10 +190,15 @@ Other machine learning parameters can be tuned from the admin UI.
## Prometheus
| Variable | Description | Default | Containers | Workers |
| :------------------------- | :-------------------------------------------------------------------------------------------------------------------- | :-----: | :--------- | :----------------- |
| `IMMICH_TELEMETRY_INCLUDE` | Collect these telemetries. List of `host`, `api`, `io`, `repo`, `job`. Note: You can also specify `all` to enable all | | server | api, microservices |
| `IMMICH_TELEMETRY_EXCLUDE` | Do not collect these telemetries. List of `host`, `api`, `io`, `repo`, `job` | | server | api, microservices |
| Variable | Description | Default | Containers | Workers |
| :----------------------------- | :-------------------------------------------------------------------------------------------- | :-----: | :--------- | :----------------- |
| `IMMICH_METRICS`<sup>\*1</sup> | Toggle all metrics (one of [`true`, `false`]) | | server | api, microservices |
| `IMMICH_API_METRICS` | Toggle metrics for endpoints and response times (one of [`true`, `false`]) | | server | api, microservices |
| `IMMICH_HOST_METRICS` | Toggle metrics for CPU and memory utilization for host and process (one of [`true`, `false`]) | | server | api, microservices |
| `IMMICH_IO_METRICS` | Toggle metrics for database queries, image processing, etc. (one of [`true`, `false`]) | | server | api, microservices |
| `IMMICH_JOB_METRICS` | Toggle metrics for jobs and queues (one of [`true`, `false`]) | | server | api, microservices |
\*1: Overridden for a metric group when its corresponding environmental variable is set.
## Docker Secrets

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.4 KiB

After

Width:  |  Height:  |  Size: 8.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 127 KiB

After

Width:  |  Height:  |  Size: 86 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 35 KiB

After

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 128 KiB

After

Width:  |  Height:  |  Size: 79 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

After

Width:  |  Height:  |  Size: 6.0 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.5 KiB

After

Width:  |  Height:  |  Size: 4.0 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 63 KiB

After

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

After

Width:  |  Height:  |  Size: 6.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 9.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 19 KiB

View File

@@ -8,61 +8,22 @@ Hardware and software requirements for Immich:
## Software
Immich requires [**Docker**](https://docs.docker.com/get-started/get-docker/) with the **Docker Compose plugin**:
- **Docker Engine**: This CLI variant is suitable for Linux servers (or Windows via WSL2).
- **Docker Desktop**: This GUI variant is suitable for Linux desktop (or Windows or macOS).
The Compose plugin will be installed by both Docker Engine and Desktop by following the linked installation guides; it can also be [separately installed](https://docs.docker.com/compose/install/).
- [Docker](https://docs.docker.com/get-docker/)
- [Docker Compose](https://docs.docker.com/compose/install/)
:::note
Immich requires the command `docker compose`; the similarly named `docker-compose` is [deprecated](https://docs.docker.com/compose/migrate/) and is no longer supported by Immich.
Immich requires the command `docker compose` - the similarly named `docker-compose` is [deprecated](https://docs.docker.com/compose/migrate/) and is no longer compatible with Immich.
:::
## Hardware
- **OS**: Recommended Linux operating system (Ubuntu, Debian, etc).
- Non-Linux OSes tend to provide a poor Docker experience and are strongly discouraged.
Our ability to assist with setup or troubleshooting on non-Linux OSes will be severely reduced.
If you still want to try to use a non-Linux OS, you can set it up as follows:
- Windows: [Docker Desktop on Windows](https://docs.docker.com/desktop/install/windows-install/) or [WSL 2](https://docs.docker.com/desktop/wsl/).
- macOS: [Docker Desktop on Mac](https://docs.docker.com/desktop/install/mac-install/).
- Windows is supported with [Docker Desktop on Windows](https://docs.docker.com/desktop/install/windows-install/) or [WSL 2](https://docs.docker.com/desktop/wsl/).
- macOS is supported with [Docker Desktop on Mac](https://docs.docker.com/desktop/install/mac-install/).
- **RAM**: Minimum 4GB, recommended 6GB.
- **CPU**: Minimum 2 cores, recommended 4 cores.
- **Storage**: Recommended Unix-compatible filesystem (EXT4, ZFS, APFS, etc.) with support for user/group ownership and permissions.
- This can present an issue for Windows users. See [here](/docs/install/environment-variables#supported-filesystems)
for more details and alternatives.
- The generation of thumbnails and transcoded video can increase the size of the photo library by 10-20% on average.
:::tip
Good performance and a stable connection to the Postgres database is critical to a smooth Immich experience.
The Postgres database files are typically between 1-3 GB in size.
For this reason, the Postgres database (`DB_DATA_LOCATION`) should ideally use local SSD storage, and never a network share of any kind.
Additionally, if Docker resource limits are used, the Postgres database requires at least 2GB of RAM.
Windows users may run into issues with non-Unix-compatible filesystems, see below for more details.
:::
### Special requirements for Windows users
<details>
<summary>Database storage on Windows systems</summary>
The Immich Postgres database (`DB_DATA_LOCATION`) must be located on a filesystem that supports user/group
ownership and permissions (EXT2/3/4, ZFS, APFS, BTRFS, XFS, etc.). It will not work on any filesystem formatted in NTFS or ex/FAT/32.
It will not work in WSL (Windows Subsystem for Linux) when using a mounted host directory (commonly under `/mnt`).
If this is an issue, you can change the bind mount to a Docker volume instead as follows:
Make the following change to `.env`:
```diff
- DB_DATA_LOCATION=./postgres
+ DB_DATA_LOCATION=pgdata
```
Add the following line to the bottom of `docker-compose.yml`:
```diff
volumes:
model-cache:
+ pgdata:
```
</details>
- Network shares are supported for the storage of image and video assets only.

Some files were not shown because too many files have changed in this diff Show More