Compare commits

..

3 Commits

Author SHA1 Message Date
martabal
59300d2097 feat: preload textual model 2024-09-25 18:22:54 +02:00
martabal
d34d631dd4 feat: preload textual model 2024-09-25 17:39:55 +02:00
martabal
708a53a1eb feat: preload textual model 2024-09-16 17:53:43 +02:00
1587 changed files with 69809 additions and 93413 deletions

View File

@@ -1,2 +0,0 @@
.env
library

View File

@@ -1,16 +0,0 @@
ARG BASEIMAGE=mcr.microsoft.com/devcontainers/typescript-node:22@sha256:9791f4aa527774bc370c6bd2f6705ce5a686f1e6f204badd8dfaacce28c631ae
FROM ${BASEIMAGE}
# Flutter SDK
# https://flutter.dev/docs/development/tools/sdk/releases?tab=linux
ENV FLUTTER_CHANNEL="stable"
ENV FLUTTER_VERSION="3.24.5"
ENV FLUTTER_HOME=/flutter
ENV PATH=${PATH}:${FLUTTER_HOME}/bin
# Flutter SDK
RUN mkdir -p ${FLUTTER_HOME} \
&& curl -C - --output flutter.tar.xz https://storage.googleapis.com/flutter_infra_release/releases/${FLUTTER_CHANNEL}/linux/flutter_linux_${FLUTTER_VERSION}-${FLUTTER_CHANNEL}.tar.xz \
&& tar -xf flutter.tar.xz --strip-components=1 -C ${FLUTTER_HOME} \
&& rm flutter.tar.xz \
&& chown -R 1000:1000 ${FLUTTER_HOME}

View File

@@ -1,26 +0,0 @@
{
"name": "Immich",
"service": "immich-devcontainer",
"dockerComposeFile": [
"docker-compose.yml",
"../docker/docker-compose.dev.yml"
],
"customizations": {
"vscode": {
"extensions": [
"Dart-Code.dart-code",
"Dart-Code.flutter",
"dbaeumer.vscode-eslint",
"dcmdev.dcm-vscode-extension",
"esbenp.prettier-vscode",
"svelte.svelte-vscode"
]
}
},
"forwardPorts": [],
"initializeCommand": "bash .devcontainer/scripts/initializeCommand.sh",
"onCreateCommand": "bash .devcontainer/scripts/onCreateCommand.sh",
"overrideCommand": true,
"workspaceFolder": "/immich",
"remoteUser": "node"
}

View File

@@ -1,8 +0,0 @@
services:
immich-devcontainer:
build:
dockerfile: Dockerfile
extra_hosts:
- 'host.docker.internal:host-gateway'
volumes:
- ..:/immich:cached

View File

@@ -1,6 +0,0 @@
#!/bin/bash
# If .env file does not exist, create it by copying example.env from the docker folder
if [ ! -f ".devcontainer/.env" ]; then
cp docker/example.env .devcontainer/.env
fi

View File

@@ -1,25 +0,0 @@
#!/bin/bash
# Enable multiarch for arm64 if necessary
if [ "$(dpkg --print-architecture)" = "arm64" ]; then
sudo dpkg --add-architecture amd64 && \
sudo apt-get update && \
sudo apt-get install -y --no-install-recommends \
qemu-user-static \
libc6:amd64 \
libstdc++6:amd64 \
libgcc1:amd64
fi
# Install DCM
wget -qO- https://dcm.dev/pgp-key.public | sudo gpg --dearmor -o /usr/share/keyrings/dcm.gpg
sudo echo 'deb [signed-by=/usr/share/keyrings/dcm.gpg arch=amd64] https://dcm.dev/debian stable main' | sudo tee /etc/apt/sources.list.d/dart_stable.list
sudo apt-get update
sudo apt-get install dcm
dart --disable-analytics
# Install immich
cd /immich || exit
make install-all

7
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,7 @@
version: 2
updates:
# Maintain dependencies for GitHub Actions
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"

4
.github/release.yml vendored
View File

@@ -4,10 +4,6 @@ changelog:
labels:
- changelog:breaking-change
- title: 🫥 Deprecated Changes
labels:
- changelog:deprecated
- title: 🔒 Security
labels:
- changelog:security

View File

@@ -56,10 +56,10 @@ jobs:
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3.3.0
uses: docker/setup-qemu-action@v3.2.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3.8.0
uses: docker/setup-buildx-action@v3.6.1
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
@@ -88,7 +88,7 @@ jobs:
type=raw,value=latest,enable=${{ github.event_name == 'release' }}
- name: Build and push image
uses: docker/build-push-action@v6.12.0
uses: docker/build-push-action@v6.7.0
with:
file: cli/Dockerfile
platforms: linux/amd64,linux/arm64

View File

@@ -22,7 +22,7 @@ concurrency:
jobs:
cleanup-images:
name: Cleanup Stale Images Tags for ${{ matrix.primary-name }}
runs-on: ubuntu-24.04
runs-on: ubuntu-22.04
strategy:
fail-fast: false
matrix:
@@ -35,7 +35,7 @@ jobs:
steps:
- name: Clean temporary images
if: "${{ env.TOKEN != '' }}"
uses: stumpylog/image-cleaner-action/ephemeral@v0.9.0
uses: stumpylog/image-cleaner-action/ephemeral@v0.8.0
with:
token: "${{ env.TOKEN }}"
owner: "immich-app"
@@ -48,7 +48,7 @@ jobs:
cleanup-untagged-images:
name: Cleanup Untagged Images Tags for ${{ matrix.primary-name }}
runs-on: ubuntu-24.04
runs-on: ubuntu-22.04
needs:
- cleanup-images
strategy:
@@ -64,7 +64,7 @@ jobs:
steps:
- name: Clean untagged images
if: "${{ env.TOKEN != '' }}"
uses: stumpylog/image-cleaner-action/untagged@v0.9.0
uses: stumpylog/image-cleaner-action/untagged@v0.8.0
with:
token: "${{ env.TOKEN }}"
owner: "immich-app"

View File

@@ -33,7 +33,6 @@ jobs:
- 'server/**'
- 'openapi/**'
- 'web/**'
- 'i18n/**'
machine-learning:
- 'machine-learning/**'
@@ -122,10 +121,10 @@ jobs:
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3.3.0
uses: docker/setup-qemu-action@v3.2.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3.8.0
uses: docker/setup-buildx-action@v3.6.1
- name: Login to Docker Hub
# Only push to Docker Hub when making a release
@@ -174,7 +173,7 @@ jobs:
fi
- name: Build and push image
uses: docker/build-push-action@v6.12.0
uses: docker/build-push-action@v6.7.0
with:
context: ${{ env.context }}
file: ${{ env.file }}
@@ -213,10 +212,10 @@ jobs:
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3.3.0
uses: docker/setup-qemu-action@v3.2.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3.8.0
uses: docker/setup-buildx-action@v3.6.1
- name: Login to Docker Hub
# Only push to Docker Hub when making a release
@@ -265,7 +264,7 @@ jobs:
fi
- name: Build and push image
uses: docker/build-push-action@v6.12.0
uses: docker/build-push-action@v6.7.0
with:
context: ${{ env.context }}
file: ${{ env.file }}

View File

@@ -27,7 +27,7 @@ jobs:
- 'docs/**'
- name: Check if we should force jobs to run
id: should_force
run: echo "should_force=${{ github.event_name == 'release' || github.ref_name == 'main' }}" >> "$GITHUB_OUTPUT"
run: echo "should_force=${{ github.event_name == 'release' }}" >> "$GITHUB_OUTPUT"
build:
name: Docs Build

View File

@@ -23,7 +23,7 @@ jobs:
tg_version: "0.58.12"
tofu_version: "1.7.1"
tg_dir: "deployment/modules/cloudflare/docs"
tg_command: "destroy -refresh=false"
tg_command: "destroy"
- name: Comment
uses: actions-cool/maintain-one-comment@v3

View File

@@ -1,52 +0,0 @@
name: Fix formatting
on:
pull_request:
types: [labeled]
jobs:
fix-formatting:
runs-on: ubuntu-latest
if: ${{ github.event.label.name == 'fix:formatting' }}
permissions:
pull-requests: write
steps:
- name: Generate a token
id: generate-token
uses: actions/create-github-app-token@v1
with:
app-id: ${{ secrets.PUSH_O_MATIC_APP_ID }}
private-key: ${{ secrets.PUSH_O_MATIC_APP_KEY }}
- name: 'Checkout'
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.ref }}
token: ${{ steps.generate-token.outputs.token }}
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version-file: './server/.nvmrc'
- name: Fix formatting
run: make install-all && make format-all
- name: Commit and push
uses: EndBug/add-and-commit@v9
with:
default_author: github_actions
message: 'chore: fix formatting'
- name: Remove label
uses: actions/github-script@v7
if: always()
with:
script: |
github.rest.issues.removeLabel({
issue_number: context.payload.pull_request.number,
owner: context.repo.owner,
repo: context.repo.repo,
name: 'fix:formatting'
})

View File

@@ -9,7 +9,7 @@ jobs:
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
pull-requests: read
steps:
- name: Require PR to have a changelog label
uses: mheap/github-action-required-labels@v5
@@ -19,4 +19,3 @@ jobs:
use_regex: true
labels: "changelog:.*"
add_comment: true
message: "Label error. Requires {{errorString}} {{count}} of: {{ provided }}. Found: {{ applied }}. A maintainer will add the required label."

View File

@@ -9,7 +9,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: PR Conventional Commit Validation
uses: ytanikin/PRConventionalCommits@1.3.0
uses: ytanikin/PRConventionalCommits@1.2.0
with:
task_types: '["feat","fix","docs","test","ci","refactor","perf","chore","revert"]'
add_label: 'false'

View File

@@ -68,17 +68,10 @@ jobs:
needs: build_mobile
steps:
- name: Generate a token
id: generate-token
uses: actions/create-github-app-token@v1
with:
app-id: ${{ secrets.PUSH_O_MATIC_APP_ID }}
private-key: ${{ secrets.PUSH_O_MATIC_APP_KEY }}
- name: Checkout
uses: actions/checkout@v4
with:
token: ${{ steps.generate-token.outputs.token }}
token: ${{ secrets.ORG_RELEASE_TOKEN }}
- name: Download APK
uses: actions/download-artifact@v4

View File

@@ -30,7 +30,6 @@ jobs:
filters: |
web:
- 'web/**'
- 'i18n/**'
- 'open-api/typescript-sdk/**'
server:
- 'server/**'
@@ -81,7 +80,7 @@ jobs:
run: npm run check
if: ${{ !cancelled() }}
- name: Run small tests & coverage
- name: Run unit tests & coverage
run: npm run test:cov
if: ${{ !cancelled() }}
@@ -244,26 +243,6 @@ jobs:
run: npm run check
if: ${{ !cancelled() }}
medium-tests-server:
name: Medium Tests (Server)
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_server == 'true' }}
runs-on: mich
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
submodules: 'recursive'
- name: Production build
if: ${{ !cancelled() }}
run: docker compose -f e2e/docker-compose.yml build
- name: Run medium tests
if: ${{ !cancelled() }}
run: make test-medium
e2e-tests-server-cli:
name: End-to-End Tests (Server & CLI)
needs: pre-job

2
.gitignore vendored
View File

@@ -21,5 +21,3 @@ mobile/openapi/.openapi-generator/FILES
open-api/typescript-sdk/build
mobile/android/fastlane/report.xml
mobile/ios/fastlane/report.xml
vite.config.js.timestamp-*

View File

@@ -41,4 +41,4 @@
"explorer.fileNesting.patterns": {
"*.ts": "${capture}.spec.ts,${capture}.mock.ts"
}
}
}

View File

@@ -39,7 +39,7 @@ attach-server:
renovate:
LOG_LEVEL=debug npx renovate --platform=local --repository-cache=reset
MODULES = e2e server web cli sdk docs
MODULES = e2e server web cli sdk
audit-%:
npm --prefix $(subst sdk,open-api/typescript-sdk,$*) audit fix
@@ -48,9 +48,11 @@ install-%:
build-cli: build-sdk
build-web: build-sdk
build-%: install-%
npm --prefix $(subst sdk,open-api/typescript-sdk,$*) run build
npm --prefix $(subst sdk,open-api/typescript-sdk,$*) run | grep 'build' >/dev/null \
&& npm --prefix $(subst sdk,open-api/typescript-sdk,$*) run build || true
format-%:
npm --prefix $* run format:fix
npm --prefix $(subst sdk,open-api/typescript-sdk,$*) run | grep 'format:fix' >/dev/null \
&& npm --prefix $(subst sdk,open-api/typescript-sdk,$*) run format:fix || true
lint-%:
npm --prefix $* run lint:fix
check-%:
@@ -64,27 +66,15 @@ test-e2e:
docker compose -f ./e2e/docker-compose.yml build
npm --prefix e2e run test
npm --prefix e2e run test:web
test-medium:
docker run \
--rm \
-v ./server/src:/usr/src/app/src \
-v ./server/test:/usr/src/app/test \
-v ./server/vitest.config.medium.mjs:/usr/src/app/vitest.config.medium.mjs \
-v ./server/tsconfig.json:/usr/src/app/tsconfig.json \
-e NODE_ENV=development \
immich-server:latest \
-c "npm ci && npm run test:medium -- --run"
test-medium-dev:
docker exec -it immich_server /bin/sh -c "npm run test:medium"
build-all: $(foreach M,$(filter-out e2e,$(MODULES)),build-$M) ;
build-all: $(foreach M,$(MODULES),build-$M) ;
install-all: $(foreach M,$(MODULES),install-$M) ;
check-all: $(foreach M,$(filter-out sdk cli docs,$(MODULES)),check-$M) ;
lint-all: $(foreach M,$(filter-out sdk docs,$(MODULES)),lint-$M) ;
format-all: $(foreach M,$(filter-out sdk,$(MODULES)),format-$M) ;
check-all: $(foreach M,$(MODULES),check-$M) ;
lint-all: $(foreach M,$(MODULES),lint-$M) ;
format-all: $(foreach M,$(MODULES),format-$M) ;
audit-all: $(foreach M,$(MODULES),audit-$M) ;
hygiene-all: lint-all format-all check-all sql audit-all;
test-all: $(foreach M,$(filter-out sdk docs,$(MODULES)),test-$M) ;
test-all: $(foreach M,$(MODULES),test-$M) ;
clean:
find . -name "node_modules" -type d -prune -exec rm -rf '{}' +

View File

@@ -17,25 +17,23 @@
<img src="design/immich-screenshots.png" title="Main Screenshot">
</a>
<br/>
<p align="center">
<a href="readme_i18n/README_ca_ES.md">Català</a>
<a href="readme_i18n/README_es_ES.md">Español</a>
<a href="readme_i18n/README_fr_FR.md">Français</a>
<a href="readme_i18n/README_it_IT.md">Italiano</a>
<a href="readme_i18n/README_ja_JP.md">日本語</a>
<a href="readme_i18n/README_ko_KR.md">한국어</a>
<a href="readme_i18n/README_de_DE.md">Deutsch</a>
<a href="readme_i18n/README_nl_NL.md">Nederlands</a>
<a href="readme_i18n/README_tr_TR.md">Türkçe</a>
<a href="readme_i18n/README_zh_CN.md">中文</a>
<a href="readme_i18n/README_uk_UA.md">Українська</a>
<a href="readme_i18n/README_ru_RU.md">Русский</a>
<a href="readme_i18n/README_pt_BR.md">Português Brasileiro</a>
<a href="readme_i18n/README_sv_SE.md">Svenska</a>
<a href="readme_i18n/README_ar_JO.md">العربية</a>
<a href="readme_i18n/README_vi_VN.md">Tiếng Việt</a>
<a href="readme_i18n/README_th_TH.md">ภาษาไทย</a>
<a href="readme_i18n/README_ca_ES.md">Català</a>
<a href="readme_i18n/README_es_ES.md">Español</a>
<a href="readme_i18n/README_fr_FR.md">Français</a>
<a href="readme_i18n/README_it_IT.md">Italiano</a>
<a href="readme_i18n/README_ja_JP.md">日本語</a>
<a href="readme_i18n/README_ko_KR.md">한국어</a>
<a href="readme_i18n/README_de_DE.md">Deutsch</a>
<a href="readme_i18n/README_nl_NL.md">Nederlands</a>
<a href="readme_i18n/README_tr_TR.md">Türkçe</a>
<a href="readme_i18n/README_zh_CN.md">中文</a>
<a href="readme_i18n/README_ru_RU.md">Русский</a>
<a href="readme_i18n/README_pt_BR.md">Português Brasileiro</a>
<a href="readme_i18n/README_sv_SE.md">Svenska</a>
<a href="readme_i18n/README_ar_JO.md">العربية</a>
</p>
## Disclaimer
@@ -103,8 +101,6 @@ For the mobile app, you can use `https://demo.immich.app/api` for the `Server En
| Offline support | Yes | No |
| Read-only gallery | Yes | Yes |
| Stacked Photos | Yes | Yes |
| Tags | No | Yes |
| Folder View | No | Yes |
## Translations

View File

@@ -1 +1 @@
22.13.1
20.17.0

View File

@@ -1,4 +1,4 @@
FROM node:22.13.1-alpine3.20@sha256:c52e20859a92b3eccbd3a36c5e1a90adc20617d8d421d65e8a622e87b5dac963 AS core
FROM node:20.17.0-alpine3.20@sha256:2d07db07a2df6830718ae2a47db6fedce6745f5bcd174c398f2acdda90a11c03 AS core
WORKDIR /usr/src/open-api/typescript-sdk
COPY open-api/typescript-sdk/package*.json open-api/typescript-sdk/tsconfig*.json ./

1535
cli/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{
"name": "@immich/cli",
"version": "2.2.43",
"version": "2.2.19",
"description": "Command Line Interface (CLI) for Immich",
"type": "module",
"exports": "./dist/index.js",
@@ -20,26 +20,26 @@
"@types/cli-progress": "^3.11.0",
"@types/lodash-es": "^4.17.12",
"@types/mock-fs": "^4.13.1",
"@types/node": "^22.10.9",
"@typescript-eslint/eslint-plugin": "^8.15.0",
"@typescript-eslint/parser": "^8.15.0",
"@vitest/coverage-v8": "^3.0.0",
"@types/node": "^20.16.5",
"@typescript-eslint/eslint-plugin": "^8.0.0",
"@typescript-eslint/parser": "^8.0.0",
"@vitest/coverage-v8": "^2.0.5",
"byte-size": "^9.0.0",
"cli-progress": "^3.12.0",
"commander": "^12.0.0",
"eslint": "^9.14.0",
"eslint-config-prettier": "^10.0.0",
"eslint": "^9.0.0",
"eslint-config-prettier": "^9.1.0",
"eslint-plugin-prettier": "^5.1.3",
"eslint-plugin-unicorn": "^56.0.1",
"eslint-plugin-unicorn": "^55.0.0",
"globals": "^15.9.0",
"mock-fs": "^5.2.0",
"prettier": "^3.2.5",
"prettier-plugin-organize-imports": "^4.0.0",
"typescript": "^5.3.3",
"vite": "^6.0.0",
"vite": "^5.0.12",
"vite-tsconfig-paths": "^5.0.0",
"vitest": "^3.0.0",
"vitest-fetch-mock": "^0.4.0",
"vitest": "^2.0.5",
"vitest-fetch-mock": "^0.3.0",
"yaml": "^2.3.1"
},
"scripts": {
@@ -67,6 +67,6 @@
"lodash-es": "^4.17.21"
},
"volta": {
"node": "22.13.1"
"node": "20.17.0"
}
}

View File

@@ -1,6 +1,5 @@
import {
Action,
AssetBulkUploadCheckItem,
AssetBulkUploadCheckResult,
AssetMediaResponseDto,
AssetMediaStatus,
@@ -12,7 +11,7 @@ import {
getSupportedMediaTypes,
} from '@immich/sdk';
import byteSize from 'byte-size';
import { MultiBar, Presets, SingleBar } from 'cli-progress';
import { Presets, SingleBar } from 'cli-progress';
import { chunk } from 'lodash-es';
import { Stats, createReadStream } from 'node:fs';
import { stat, unlink } from 'node:fs/promises';
@@ -91,23 +90,23 @@ export const checkForDuplicates = async (files: string[], { concurrency, skipHas
return { newFiles: files, duplicates: [] };
}
const multiBar = new MultiBar(
{ format: '{message} | {bar} | {percentage}% | ETA: {eta}s | {value}/{total} assets' },
const progressBar = new SingleBar(
{ format: 'Checking files | {bar} | {percentage}% | ETA: {eta}s | {value}/{total} assets' },
Presets.shades_classic,
);
const hashProgressBar = multiBar.create(files.length, 0, { message: 'Hashing files ' });
const checkProgressBar = multiBar.create(files.length, 0, { message: 'Checking for duplicates' });
progressBar.start(files.length, 0);
const newFiles: string[] = [];
const duplicates: Asset[] = [];
const checkBulkUploadQueue = new Queue<AssetBulkUploadCheckItem[], void>(
async (assets: AssetBulkUploadCheckItem[]) => {
const response = await checkBulkUpload({ assetBulkUploadCheckDto: { assets } });
const queue = new Queue<string[], AssetBulkUploadCheckResults>(
async (filepaths: string[]) => {
const dto = await Promise.all(
filepaths.map(async (filepath) => ({ id: filepath, checksum: await sha1(filepath) })),
);
const response = await checkBulkUpload({ assetBulkUploadCheckDto: { assets: dto } });
const results = response.results as AssetBulkUploadCheckResults;
for (const { id: filepath, assetId, action } of results) {
if (action === Action.Accept) {
newFiles.push(filepath);
@@ -116,46 +115,19 @@ export const checkForDuplicates = async (files: string[], { concurrency, skipHas
duplicates.push({ id: assetId as string, filepath });
}
}
checkProgressBar.increment(assets.length);
},
{ concurrency, retry: 3 },
);
const results: { id: string; checksum: string }[] = [];
let checkBulkUploadRequests: AssetBulkUploadCheckItem[] = [];
const queue = new Queue<string, AssetBulkUploadCheckItem[]>(
async (filepath: string): Promise<AssetBulkUploadCheckItem[]> => {
const dto = { id: filepath, checksum: await sha1(filepath) };
results.push(dto);
checkBulkUploadRequests.push(dto);
if (checkBulkUploadRequests.length === 5000) {
const batch = checkBulkUploadRequests;
checkBulkUploadRequests = [];
void checkBulkUploadQueue.push(batch);
}
hashProgressBar.increment();
progressBar.increment(filepaths.length);
return results;
},
{ concurrency, retry: 3 },
);
for (const item of files) {
void queue.push(item);
for (const items of chunk(files, concurrency)) {
await queue.push(items);
}
await queue.drained();
if (checkBulkUploadRequests.length > 0) {
void checkBulkUploadQueue.push(checkBulkUploadRequests);
}
await checkBulkUploadQueue.drained();
multiBar.stop();
progressBar.stop();
console.log(`Found ${newFiles.length} new files and ${duplicates.length} duplicate${s(duplicates.length)}`);
@@ -229,8 +201,8 @@ export const uploadFiles = async (files: string[], { dryRun, concurrency }: Uplo
{ concurrency, retry: 3 },
);
for (const item of files) {
void queue.push(item);
for (const filepath of files) {
await queue.push(filepath);
}
await queue.drained();

View File

@@ -72,8 +72,8 @@ export class Queue<T, R> {
* @returns Promise<void> - The returned Promise will be resolved when all tasks in the queue have been processed by a worker.
* This promise could be ignored as it will not lead to a `unhandledRejection`.
*/
drained(): Promise<void> {
return this.queue.drained();
async drained(): Promise<void> {
await this.queue.drain();
}
/**

View File

@@ -6,7 +6,6 @@ interface Test {
test: string;
options: Omit<CrawlOptions, 'extensions'>;
files: Record<string, boolean>;
skipOnWin32?: boolean;
}
const cwd = process.cwd();
@@ -49,18 +48,6 @@ const tests: Test[] = [
'/photos/image.jpg': true,
},
},
{
test: 'should crawl folders with quotes',
options: {
pathsToCrawl: ["/photo's/", '/photo"s/', '/photo`s/'],
},
files: {
"/photo's/image1.jpg": true,
'/photo"s/image2.jpg': true,
'/photo`s/image3.jpg': true,
},
skipOnWin32: true, // single quote interferes with mockfs root on Windows
},
{
test: 'should crawl a single file',
options: {
@@ -128,7 +115,17 @@ const tests: Test[] = [
'/albums/image3.jpg': true,
},
},
{
test: 'should support globbing paths',
options: {
pathsToCrawl: ['/photos*'],
},
files: {
'/photos1/image1.jpg': true,
'/photos2/image2.jpg': true,
'/images/image3.jpg': false,
},
},
{
test: 'should crawl a single path without trailing slash',
options: {
@@ -283,12 +280,8 @@ describe('crawl', () => {
});
describe('crawl', () => {
for (const { test: name, options, files, skipOnWin32 } of tests) {
if (process.platform === 'win32' && skipOnWin32) {
test.skip(name);
continue;
}
it(name, async () => {
for (const { test, options, files } of tests) {
it(test, async () => {
// The file contents is the same as the path.
mockfs(Object.fromEntries(Object.keys(files).map((file) => [file, file])));

View File

@@ -141,21 +141,25 @@ export const crawl = async (options: CrawlOptions): Promise<string[]> => {
}
}
if (patterns.length === 0) {
let searchPattern: string;
if (patterns.length === 1) {
searchPattern = patterns[0];
} else if (patterns.length === 0) {
return crawledFiles;
} else {
searchPattern = '{' + patterns.join(',') + '}';
}
const searchPatterns = patterns.map((pattern) => {
let escapedPattern = pattern.replaceAll("'", "[']").replaceAll('"', '["]').replaceAll('`', '[`]');
if (recursive) {
escapedPattern = escapedPattern + '/**';
}
return `${escapedPattern}/*.{${extensions.join(',')}}`;
});
if (recursive) {
searchPattern = searchPattern + '/**/';
}
const globbedFiles = await glob(searchPatterns, {
searchPattern = `${searchPattern}/*.{${extensions.join(',')}}`;
const globbedFiles = await glob(searchPattern, {
absolute: true,
caseSensitiveMatch: false,
onlyFiles: true,
dot: includeHidden,
ignore: [`**/${exclusionPattern}`],
});

View File

@@ -2,37 +2,37 @@
# Manual edits may be lost in future updates.
provider "registry.opentofu.org/cloudflare/cloudflare" {
version = "4.50.0"
constraints = "4.50.0"
version = "4.41.0"
constraints = "4.41.0"
hashes = [
"h1:0qvD5ZKn2tMZ8cOjQrUSITIC9tKCZbrSaSswV9lOyiU=",
"h1:4N0gplrZ0zOsJv3Kx1VfIx2FwrZHbYU0Un2yfiLZIGQ=",
"h1:81AMQq4kNKU/35U8ElQegUxG4E6xB0erIjG5xVmjIyo=",
"h1:EEQNADUmV3IL6x00yzy04i7OCSLeOMgM9XQkV3w71gA=",
"h1:HD0KI7td6oiSSAnJNn8UPSGf+hKiTo4JVQYfAiU1SqM=",
"h1:Hl+o5LtcvZg2f3l1hh9vaG/DFK6k+dTIZSeM0lXyfpo=",
"h1:ZUO2oIJ6jtZdvl816h0cEIiIeZ/fFCF64+abGEVxZZM=",
"h1:Zio80fnEeUKdlSOhTVskMEFSLUQ6TMsMKnXc+Dy2P2A=",
"h1:aLLvg36evTyqjtXGV2MjAV8imktXFmry7p/xCu9GQC4=",
"h1:azL05eWyy2V8SWkbZZImPWvv8ynG4eqmrbZhjXBDFug=",
"h1:ckMysHY4fJmr7o58XMi+DdgOTB/U/Mf1u1JA9ly3g/I=",
"h1:jxOwjDNjt5WCb4YjjiMsman91O8Y+MAPz6UwJ4a6F+0=",
"h1:u4OfnjSLa4Wk1IUFAzrvMnGgr8MvRHEWVDHEScPK2E8=",
"h1:wQkR1oeSkzlHn3rnVuLJRJLBHlg4EHt7Y64DeTjfkjQ=",
"zh:0ef99ed39472a94e6a0d6fa733cf0a46bce3bf66eba2873efae8846efdddc237",
"zh:2929cbbffcead171d45c88e4a7a59e9c013ea775dafa68b10da8db7cd04b6140",
"zh:462601c87118088e1a718842e367af7d8e7620598d426980a6d6b33de759865e",
"zh:56766eb62a74a9d88d9efb8486dd3a0c5c9db873d0a980ae9ef1e8af27d74231",
"zh:6b4e8810d99498a5a20a5872982a0f1354e79cfc4a7dfe7cc656f1c7eaae47d8",
"zh:6d65bdb4ec94b6eecc8abe26d94e2ca09262dc1e7a9934db829f418be0119920",
"zh:71adeaf31e41a358ec6095004062e43f56ee7d4b2504e5613ab351d511695641",
"h1:0mc+YrjQrcctGrGYDmzlcqcgSv9MYB74rvMaZylIKC8=",
"h1:0zUx4vk4jOORQqn6xHBF7dO6N6bielFHdJ0mgF4Obn8=",
"h1:AsIZW3uLFNOZO7kL/K7/Y/S0IYxUV9Hz85NNk/3TTsA=",
"h1:FSgYM4+LHMbX/a4Y1kx7FPPWmXqS3/MQYzvjMJHHHWM=",
"h1:Tx6Nh3BWP1x9L3KK/Eyi+ET0T26g3+jf1jyiuqpNIis=",
"h1:VRI9wu8P43xxfpeTndRwsisLnqncfnmEYMOEH5zH4pQ=",
"h1:YxQqmiES/Yanq/VfGqBEqg+VIO7FGhO88aKoWFHyGIg=",
"h1:ZWHiaesjgDLKWlfdNj0oKyj/DWdxcfsO6NINu39zfpY=",
"h1:a2aCgDDBz3ccrr8YstIMl7VFnKo1xZAp+rOv59PPJ7U=",
"h1:aRyv8tB6wBAF9lKsLEdiHyCqnK5LfZq0FqMXCcUB4UU=",
"h1:lXpuO7zv2uD2GzPE1ARxznreRAh+QHTc2lAJ7iOoFgY=",
"h1:sA1xq0QNQ4fH8SHXouYNq50xirVD18SamKQwPsBQrrY=",
"h1:v7sHvKq7oqMYPn47ULHFyIQsKD9o+6Xg/uHbxQUixEw=",
"h1:wo/x4atWyXuWGlfR6h5nH0YwBAmBwTRY27HtWP8ycLo=",
"zh:339d26e06dc6fb299ea8aad9476a60fd65bb1d40631ae8eeb81cddf2dd2bebc8",
"zh:3dec2ad96ac2c283fd34ce65781b55c4edbb4d5c5cb53da8e31537176c0ed562",
"zh:5f63a5f8080319a2fff09d4d49944829fa708723436520787cfb60725ced80cf",
"zh:67162c28ccea71cb8141ed15c0637e35621354ebe14878e0b75a8f160fc5505d",
"zh:6ac1e07f5347b6395aca690ed22101bb25e957d25f986f760ff673a7adfd5ef6",
"zh:70282a723c7b52fcabde2baad41c864ed3a8d69f0c4d27a6b6933cac434cffc6",
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
"zh:89761c15908ccc2cf9c50bb5cb3be45d3ad0c45fc7c608c6b95f48c0288b7160",
"zh:8cc5d7c5939da89cfd01f3e51c84f3576564783acea9db86bd9e32049805ed96",
"zh:987cff8225b1dd436cdcb4fc6228689ae7e4281de6896412a2a9a3325c49f05e",
"zh:991e83ebb89867d71e01a1c215ed159efb425683b0a44707be8579eb0a337f06",
"zh:ab8177ae2d8f5cfa90043a6f867435012cae115f6061b832a7e2462e0ae87a67",
"zh:d1ca34df1398f201274a6a18102975148c10ca15aa43cfc56cc9897620929509",
"zh:d34946f70201baf6dda03e3b294c6bbe40d95d0278e97b9f636ded94822b24ac",
"zh:924cd23abc326c6b3914e2cd9c94c7832c2552e1e9ae258fb9fd9aedaa5f7ce7",
"zh:a4b75e4c239879296259e7d54f1befbc7fdc16da2d62d1294e9f73add4cae61e",
"zh:a6ceb08feb63b00c7141783b31e45a154c76fd8cdebbdf371074805f0053572d",
"zh:afae1843f9ba85f2f6d94108c65cf43a457e83531a632d44d863e935160cb2ba",
"zh:bd6628ce60c778960a5755f7010b7e2cc5c6ff0341a21c175341b28058ec843d",
"zh:cd30866a1ff99d72b5fa1699db582fa4f25562e6ab21dcc6870324f3056108e0",
"zh:df5924cca691a8220aaaebb5cb55c3d6c32ff0a881f198695eff28155eb12b54",
"zh:e78d0696c941aba58df1cb36b8a0d25cd5f3963f01d9338fdbda74db58afdd49",
]
}

View File

@@ -5,7 +5,7 @@ terraform {
required_providers {
cloudflare = {
source = "cloudflare/cloudflare"
version = "4.50.0"
version = "4.41.0"
}
}
}

View File

@@ -2,37 +2,37 @@
# Manual edits may be lost in future updates.
provider "registry.opentofu.org/cloudflare/cloudflare" {
version = "4.50.0"
constraints = "4.50.0"
version = "4.41.0"
constraints = "4.41.0"
hashes = [
"h1:0qvD5ZKn2tMZ8cOjQrUSITIC9tKCZbrSaSswV9lOyiU=",
"h1:4N0gplrZ0zOsJv3Kx1VfIx2FwrZHbYU0Un2yfiLZIGQ=",
"h1:81AMQq4kNKU/35U8ElQegUxG4E6xB0erIjG5xVmjIyo=",
"h1:EEQNADUmV3IL6x00yzy04i7OCSLeOMgM9XQkV3w71gA=",
"h1:HD0KI7td6oiSSAnJNn8UPSGf+hKiTo4JVQYfAiU1SqM=",
"h1:Hl+o5LtcvZg2f3l1hh9vaG/DFK6k+dTIZSeM0lXyfpo=",
"h1:ZUO2oIJ6jtZdvl816h0cEIiIeZ/fFCF64+abGEVxZZM=",
"h1:Zio80fnEeUKdlSOhTVskMEFSLUQ6TMsMKnXc+Dy2P2A=",
"h1:aLLvg36evTyqjtXGV2MjAV8imktXFmry7p/xCu9GQC4=",
"h1:azL05eWyy2V8SWkbZZImPWvv8ynG4eqmrbZhjXBDFug=",
"h1:ckMysHY4fJmr7o58XMi+DdgOTB/U/Mf1u1JA9ly3g/I=",
"h1:jxOwjDNjt5WCb4YjjiMsman91O8Y+MAPz6UwJ4a6F+0=",
"h1:u4OfnjSLa4Wk1IUFAzrvMnGgr8MvRHEWVDHEScPK2E8=",
"h1:wQkR1oeSkzlHn3rnVuLJRJLBHlg4EHt7Y64DeTjfkjQ=",
"zh:0ef99ed39472a94e6a0d6fa733cf0a46bce3bf66eba2873efae8846efdddc237",
"zh:2929cbbffcead171d45c88e4a7a59e9c013ea775dafa68b10da8db7cd04b6140",
"zh:462601c87118088e1a718842e367af7d8e7620598d426980a6d6b33de759865e",
"zh:56766eb62a74a9d88d9efb8486dd3a0c5c9db873d0a980ae9ef1e8af27d74231",
"zh:6b4e8810d99498a5a20a5872982a0f1354e79cfc4a7dfe7cc656f1c7eaae47d8",
"zh:6d65bdb4ec94b6eecc8abe26d94e2ca09262dc1e7a9934db829f418be0119920",
"zh:71adeaf31e41a358ec6095004062e43f56ee7d4b2504e5613ab351d511695641",
"h1:0mc+YrjQrcctGrGYDmzlcqcgSv9MYB74rvMaZylIKC8=",
"h1:0zUx4vk4jOORQqn6xHBF7dO6N6bielFHdJ0mgF4Obn8=",
"h1:AsIZW3uLFNOZO7kL/K7/Y/S0IYxUV9Hz85NNk/3TTsA=",
"h1:FSgYM4+LHMbX/a4Y1kx7FPPWmXqS3/MQYzvjMJHHHWM=",
"h1:Tx6Nh3BWP1x9L3KK/Eyi+ET0T26g3+jf1jyiuqpNIis=",
"h1:VRI9wu8P43xxfpeTndRwsisLnqncfnmEYMOEH5zH4pQ=",
"h1:YxQqmiES/Yanq/VfGqBEqg+VIO7FGhO88aKoWFHyGIg=",
"h1:ZWHiaesjgDLKWlfdNj0oKyj/DWdxcfsO6NINu39zfpY=",
"h1:a2aCgDDBz3ccrr8YstIMl7VFnKo1xZAp+rOv59PPJ7U=",
"h1:aRyv8tB6wBAF9lKsLEdiHyCqnK5LfZq0FqMXCcUB4UU=",
"h1:lXpuO7zv2uD2GzPE1ARxznreRAh+QHTc2lAJ7iOoFgY=",
"h1:sA1xq0QNQ4fH8SHXouYNq50xirVD18SamKQwPsBQrrY=",
"h1:v7sHvKq7oqMYPn47ULHFyIQsKD9o+6Xg/uHbxQUixEw=",
"h1:wo/x4atWyXuWGlfR6h5nH0YwBAmBwTRY27HtWP8ycLo=",
"zh:339d26e06dc6fb299ea8aad9476a60fd65bb1d40631ae8eeb81cddf2dd2bebc8",
"zh:3dec2ad96ac2c283fd34ce65781b55c4edbb4d5c5cb53da8e31537176c0ed562",
"zh:5f63a5f8080319a2fff09d4d49944829fa708723436520787cfb60725ced80cf",
"zh:67162c28ccea71cb8141ed15c0637e35621354ebe14878e0b75a8f160fc5505d",
"zh:6ac1e07f5347b6395aca690ed22101bb25e957d25f986f760ff673a7adfd5ef6",
"zh:70282a723c7b52fcabde2baad41c864ed3a8d69f0c4d27a6b6933cac434cffc6",
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
"zh:89761c15908ccc2cf9c50bb5cb3be45d3ad0c45fc7c608c6b95f48c0288b7160",
"zh:8cc5d7c5939da89cfd01f3e51c84f3576564783acea9db86bd9e32049805ed96",
"zh:987cff8225b1dd436cdcb4fc6228689ae7e4281de6896412a2a9a3325c49f05e",
"zh:991e83ebb89867d71e01a1c215ed159efb425683b0a44707be8579eb0a337f06",
"zh:ab8177ae2d8f5cfa90043a6f867435012cae115f6061b832a7e2462e0ae87a67",
"zh:d1ca34df1398f201274a6a18102975148c10ca15aa43cfc56cc9897620929509",
"zh:d34946f70201baf6dda03e3b294c6bbe40d95d0278e97b9f636ded94822b24ac",
"zh:924cd23abc326c6b3914e2cd9c94c7832c2552e1e9ae258fb9fd9aedaa5f7ce7",
"zh:a4b75e4c239879296259e7d54f1befbc7fdc16da2d62d1294e9f73add4cae61e",
"zh:a6ceb08feb63b00c7141783b31e45a154c76fd8cdebbdf371074805f0053572d",
"zh:afae1843f9ba85f2f6d94108c65cf43a457e83531a632d44d863e935160cb2ba",
"zh:bd6628ce60c778960a5755f7010b7e2cc5c6ff0341a21c175341b28058ec843d",
"zh:cd30866a1ff99d72b5fa1699db582fa4f25562e6ab21dcc6870324f3056108e0",
"zh:df5924cca691a8220aaaebb5cb55c3d6c32ff0a881f198695eff28155eb12b54",
"zh:e78d0696c941aba58df1cb36b8a0d25cd5f3963f01d9338fdbda74db58afdd49",
]
}

View File

@@ -5,7 +5,7 @@ terraform {
required_providers {
cloudflare = {
source = "cloudflare/cloudflare"
version = "4.50.0"
version = "4.41.0"
}
}
}

View File

@@ -36,18 +36,14 @@ services:
IMMICH_BUILD_URL: https://github.com/immich-app/immich/actions/runs/9654404849
IMMICH_BUILD_IMAGE: development
IMMICH_BUILD_IMAGE_URL: https://github.com/immich-app/immich/pkgs/container/immich-server
IMMICH_THIRD_PARTY_SOURCE_URL: https://github.com/immich-app/immich/
IMMICH_THIRD_PARTY_BUG_FEATURE_URL: https://github.com/immich-app/immich/issues
IMMICH_THIRD_PARTY_DOCUMENTATION_URL: https://immich.app/docs
IMMICH_THIRD_PARTY_SUPPORT_URL: https://immich.app/docs/third-party
ulimits:
nofile:
soft: 1048576
hard: 1048576
ports:
- 3001:3001
- 9230:9230
- 9231:9231
- 2283:2283
depends_on:
- redis
- database
@@ -57,21 +53,17 @@ services:
immich-web:
container_name: immich_web
image: immich-web-dev:latest
# Needed for rootless docker setup, see https://github.com/moby/moby/issues/45919
# user: 0:0
build:
context: ../web
command: ['/usr/src/app/bin/immich-web']
env_file:
- .env
ports:
- 3000:3000
- 2283:3000
- 24678:24678
volumes:
- ../web:/usr/src/app
- ../i18n:/usr/src/i18n
- ../open-api/:/usr/src/open-api/
# - ../../ui:/usr/ui
- /usr/src/app/node_modules
ulimits:
nofile:
@@ -107,7 +99,7 @@ services:
redis:
container_name: immich_redis
image: redis:6.2-alpine@sha256:905c4ee67b8e0aa955331960d2aa745781e6bd89afc44a8584bfd13bc890f0ae
image: redis:6.2-alpine@sha256:2d1463258f2764328496376f5d965f20c6a67f66ea2b06dc42af351f75248792
healthcheck:
test: redis-cli ping || exit 1
@@ -126,25 +118,28 @@ services:
ports:
- 5432:5432
healthcheck:
test: >-
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1;
Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align
--command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')";
echo "checksum failure count is $$Chksum";
[ "$$Chksum" = '0' ] || exit 1
test: pg_isready --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command: >-
postgres
-c shared_preload_libraries=vectors.so
-c 'search_path="$$user", public, vectors'
-c logging_collector=on
-c max_wal_size=2GB
-c shared_buffers=512MB
-c wal_compression=on
command:
[
'postgres',
'-c',
'shared_preload_libraries=vectors.so',
'-c',
'search_path="$$user", public, vectors',
'-c',
'logging_collector=on',
'-c',
'max_wal_size=2GB',
'-c',
'shared_buffers=512MB',
'-c',
'wal_compression=on',
]
# set IMMICH_TELEMETRY_INCLUDE=all in .env to enable metrics
# set IMMICH_METRICS=true in .env to enable metrics
# immich-prometheus:
# container_name: immich_prometheus
# ports:

View File

@@ -16,7 +16,7 @@ services:
env_file:
- .env
ports:
- 2283:2283
- 2283:3001
depends_on:
- redis
- database
@@ -47,7 +47,7 @@ services:
redis:
container_name: immich_redis
image: redis:6.2-alpine@sha256:905c4ee67b8e0aa955331960d2aa745781e6bd89afc44a8584bfd13bc890f0ae
image: redis:6.2-alpine@sha256:2d1463258f2764328496376f5d965f20c6a67f66ea2b06dc42af351f75248792
healthcheck:
test: redis-cli ping || exit 1
restart: always
@@ -67,31 +67,19 @@ services:
ports:
- 5432:5432
healthcheck:
test: >-
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1;
Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align
--command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')";
echo "checksum failure count is $$Chksum";
[ "$$Chksum" = '0' ] || exit 1
test: pg_isready --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command: >-
postgres
-c shared_preload_libraries=vectors.so
-c 'search_path="$$user", public, vectors'
-c logging_collector=on
-c max_wal_size=2GB
-c shared_buffers=512MB
-c wal_compression=on
command: ["postgres", "-c", "shared_preload_libraries=vectors.so", "-c", 'search_path="$$user", public, vectors', "-c", "logging_collector=on", "-c", "max_wal_size=2GB", "-c", "shared_buffers=512MB", "-c", "wal_compression=on"]
restart: always
# set IMMICH_TELEMETRY_INCLUDE=all in .env to enable metrics
# set IMMICH_METRICS=true in .env to enable metrics
immich-prometheus:
container_name: immich_prometheus
ports:
- 9090:9090
image: prom/prometheus@sha256:6559acbd5d770b15bb3c954629ce190ac3cbbdb2b7f1c30f0385c4e05104e218
image: prom/prometheus@sha256:f6639335d34a77d9d9db382b92eeb7fc00934be8eae81dbc03b31cfe90411a94
volumes:
- ./prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus-data:/prometheus
@@ -103,7 +91,7 @@ services:
command: ['./run.sh', '-disable-reporting']
ports:
- 3000:3000
image: grafana/grafana:11.4.0-ubuntu@sha256:afccec22ba0e4815cca1d2bf3836e414322390dc78d77f1851976ffa8d61051c
image: grafana/grafana:11.2.0-ubuntu@sha256:8e2c13739563c3da9d45de96c6bcb63ba617cac8c571c060112c7fc8ad6914e9
volumes:
- grafana-data:/var/lib/grafana

View File

@@ -22,7 +22,7 @@ services:
env_file:
- .env
ports:
- '2283:2283'
- 2283:3001
depends_on:
- redis
- database
@@ -48,7 +48,7 @@ services:
redis:
container_name: immich_redis
image: docker.io/redis:6.2-alpine@sha256:905c4ee67b8e0aa955331960d2aa745781e6bd89afc44a8584bfd13bc890f0ae
image: docker.io/redis:6.2-alpine@sha256:2d1463258f2764328496376f5d965f20c6a67f66ea2b06dc42af351f75248792
healthcheck:
test: redis-cli ping || exit 1
restart: always
@@ -65,23 +65,11 @@ services:
# Do not edit the next line. If you want to change the database storage location on your system, edit the value of DB_DATA_LOCATION in the .env file
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data
healthcheck:
test: >-
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1;
Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align
--command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')";
echo "checksum failure count is $$Chksum";
[ "$$Chksum" = '0' ] || exit 1
test: pg_isready --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command: >-
postgres
-c shared_preload_libraries=vectors.so
-c 'search_path="$$user", public, vectors'
-c logging_collector=on
-c max_wal_size=2GB
-c shared_buffers=512MB
-c wal_compression=on
command: ["postgres", "-c", "shared_preload_libraries=vectors.so", "-c", 'search_path="$$user", public, vectors', "-c", "logging_collector=on", "-c", "max_wal_size=2GB", "-c", "shared_buffers=512MB", "-c", "wal_compression=on"]
restart: always
volumes:

View File

@@ -1 +1 @@
22.13.1
20.17.0

View File

@@ -49,22 +49,13 @@ The behaviors differ based on your device manufacturer and operating system, but
On iOS (iPhone and iPad), the operating system determines if a particular app can invoke background tasks based on multiple factors, most of which the Immich app has no control over. To increase the likelihood that the background backup task is run, follow the steps below:
- Enable Background App Refresh for Immich in the iOS settings at `Settings > General > Background App Refresh`.
- Disable **Low Power Mode** when not needed, as this can prevent apps from running in the background.
- Disable Background App Refresh for apps that don't need background tasks to run. This will reduce the competition for background task invocation for Immich.
- Use the Immich app more often.
### Why are features in the mobile app not working with a self-signed certificate, Basic Auth, custom headers, or mutual TLS?
### Why are features not working with a self-signed cert or mTLS?
These network features are experimental. They often do not work with video playback, asset upload or download, and other features.
Many of these limitations are tracked in [#15230](https://github.com/immich-app/immich/issues/15230).
Instead of these experimental features, we recommend using the URL switching feature, a VPN, or a [free trusted SSL certificate](https://letsencrypt.org/) for your domain.
We are not actively developing these features and will not be able to provide support, but welcome contributions to improve them.
Please discuss any large PRs with our dev team to ensure your time is not wasted.
### Why isn't the mobile app updated yet?
The app stores can take a few days to approve new builds of the app. If you're impatient, android APKs can be downloaded from the GitHub releases.
Due to limitations in the upstream app/video library, using a self-signed TLS certificate or mutual TLS may break video playback or asset upload (both foreground and/or background).
We recommend using a real SSL certificate from a free provider, for example [Let's Encrypt](https://letsencrypt.org/).
---
@@ -78,8 +69,7 @@ However, Immich will delete original files that have been trashed when the trash
### Why do my file names appear as a random string in the file manager?
When Storage Template is off (default) Immich saves the file names in a random string (also known as random UUIDs) to prevent duplicate file names.
To retrieve the original file names, you must enable the Storage Template and then run the STORAGE TEMPLATE MIGRATION job.
When Storage Template is off (default) Immich saves the file names in a random string (also known as random UUIDs) to prevent duplicate file names. To retrieve the original file names, you must enable the Storage Template and then run the STORAGE TEMPLATE MIGRATION job.
It is recommended to read about [Storage Template](https://immich.app/docs/administration/storage-template) before activation.
### Can I add my existing photo library?
@@ -92,20 +82,11 @@ Template changes will only apply to _new_ assets. To retroactively apply the tem
### Why are only photos and not videos being uploaded to Immich?
This often happens when using a reverse proxy in front of Immich.
Make sure to [set your reverse proxy](/docs/administration/reverse-proxy/) to allow large requests.
Also, check the disk space of your reverse proxy.
In some cases, proxies cache requests to disk before passing them on, and if disk space runs out, the request fails.
If you are using Cloudflare Tunnel, please know that they set a maxiumum filesize of 100 MB that cannot be changed.
At times, files larger than this may work, potentially up to 1 GB. However, the official limit is 100 MB.
If you are having issues, we recommend switching to a different network deployment.
This often happens when using a reverse proxy (such as Nginx or Cloudflare tunnel) in front of Immich. Make sure to set your reverse proxy to allow large `POST` requests. In `nginx`, set `client_max_body_size 50000M;` or similar. Also, check the disk space of your reverse proxy. In some cases, proxies cache requests to disk before passing them on, and if disk space runs out, the request fails.
### Why are some photos stored in the file system with the wrong date?
There are a few different scenarios that can lead to this situation. The solution is to rerun the storage migration job.
The job is only automatically run once per asset after upload. If metadata extraction originally failed, the jobs were cleared/canceled, etc.,
the job may not have run automatically the first time.
There are a few different scenarios that can lead to this situation. The solution is to rerun the storage migration job. The job is only automatically run once per asset after upload. If metadata extraction originally failed, the jobs were cleared/canceled, etc., the job may not have run automatically the first time.
### How can I hide photos from the timeline?
@@ -135,8 +116,7 @@ Also, there are additional jobs for person (face) thumbnails.
### Why do files from WhatsApp not appear with the correct date?
Files sent on WhatsApp are saved without metadata on the file. Therefore, Immich has no way of knowing the original date of the file when files are uploaded from WhatsApp,
not the order of arrival on the device. [See #9116](https://github.com/immich-app/immich/discussions/9116).
Files sent on WhatsApp are saved without metadata on the file. Therefore, Immich has no way of knowing the original date of the file when files are uploaded from WhatsApp, not the order of arrival on the device. [See #3527](https://github.com/immich-app/immich/issues/3527).
### What happens if an asset exists in more than one account?
@@ -164,35 +144,6 @@ For example, say you have existing transcodes with the policy "Videos higher tha
No. Our design principle is that the original assets should always be untouched.
### How can I mount a CIFS/Samba volume within Docker?
If you aren't able to or prefer not to mount Samba on the host (such as Windows environment), you can mount the volume within Docker.
Below is an example in the `docker-compose.yml`.
Change your username, password, local IP, and share name, and see below where the line `- originals:/usr/src/app/originals`,
corrolates to the section where the volume `originals` was created. You can call this whatever you like, and map it to the docker container as you like.
For example you could change `originals:` to `Photos:`, and change `- originals:/usr/src/app/originals` to `Photos:/usr/src/app/photos`.
```diff
...
services:
immich-server:
...
volumes:
# Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file
- ${UPLOAD_LOCATION}:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
+ - originals:/usr/src/app/originals
...
volumes:
model-cache:
+ originals:
+ driver_opts:
+ type: cifs
+ o: 'iocharset=utf8,username=USERNAMEHERE,password=PASSWORDHERE,rw' # change to `ro` if read only desired
+ device: '//localipaddress/sharename'
```
---
## Albums
@@ -236,11 +187,11 @@ However, when the trash is emptied, the files will re-appear in the main timelin
### How does smart search work?
Immich uses CLIP models. An ML model converts each image to an "embedding", which is essentially a string of numbers that semantically encodes what is in the image. The same is done for the text that you enter when you do a search, and that text embedding is then compared with those of the images to find similar ones. As such, there are no "tags", "labels", or "descriptions" generated that you can look at. For more information about CLIP and its capabilities, read about it [here](https://openai.com/research/clip).
Immich uses CLIP models. For more information about CLIP and its capabilities, read about it [here](https://openai.com/research/clip).
### How does facial recognition work?
See [How Facial Recognition Works](/docs/features/facial-recognition#how-facial-recognition-works) for details.
See [How Facial Recognition Works](/docs/features/facial-recognition#How-Facial-Recognition-Works) for details.
### How can I disable machine learning?
@@ -262,7 +213,7 @@ No, this is not supported. Only models listed in the [Hugging Face][huggingface]
### I want to be able to search in other languages besides English. How can I do that?
You can change to a multilingual CLIP model. See [here](/docs/features/searching#clip-model) for instructions.
You can change to a multilingual CLIP model. See [here](/docs/features/smart-search#CLIP-model) for instructions.
### Does Immich support Facial Recognition for videos?
@@ -273,7 +224,7 @@ Scanning the entire video for faces may be implemented in the future.
No.
:::tip
You can use [Smart Search](/docs/features/searching.md) for this to some extent. For example, if you have a Golden Retriever and a Chihuahua, type these words in the smart search and watch the results.
You can use [Smart Search](/docs/features/smart-search.md) for this to some extent. For example, if you have a Golden Retriever and a Chihuahua, type these words in the smart search and watch the results.
:::
### I'm getting a lot of "faces" that aren't faces, what can I do?
@@ -315,7 +266,7 @@ The initial backup is the most intensive due to the number of jobs running. The
- For facial recognition on new images to work properly, You must re-run the Face Detection job for all images after this.
- At the container level, you can [set resource constraints](/docs/FAQ#can-i-limit-cpu-and-ram-usage) to lower usage further.
- It's recommended to only apply these constraints _after_ taking some of the measures here for best performance.
- If these changes are not enough, see [above](/docs/FAQ#how-can-i-disable-machine-learning) for instructions on how to disable machine learning.
- If these changes are not enough, see [below](/docs/FAQ#how-can-i-disable-machine-learning) for instructions on how to disable machine learning.
### Can I limit CPU and RAM usage?
@@ -357,7 +308,7 @@ Do not exaggerate with the job concurrency because you're probably thoroughly ov
### My server shows Server Status Offline | Version Unknown. What can I do?
You need to [enable WebSockets](/docs/administration/reverse-proxy/) on your reverse proxy.
You need to enable WebSockets on your reverse proxy.
---
@@ -382,13 +333,9 @@ You may need to add mount points or docker volumes for the following internal co
- `immich-machine-learning:/.cache`
- `redis:/data`
The non-root user/group needs read/write access to the volume mounts, including `UPLOAD_LOCATION` and `/cache` for machine-learning.
The non-root user/group needs read/write access to the volume mounts, including `UPLOAD_LOCATION`.
:::note Docker Compose Volumes
The Docker Compose top level volume element does not support non-root access, all of the above volumes must be local volume mounts.
:::
For a further hardened system, you can add the following block to every container.
For a further hardened system, you can add the following block to every container except for `immich_postgres`.
<details>
<summary>docker-compose.yml</summary>
@@ -437,28 +384,29 @@ If the error says the worker is exiting, then this is normal. This is a feature
There are a few reasons why this can happen.
If the error mentions SIGKILL or error code 137, it most likely means the service is running out of memory.
Consider either increasing the server's RAM or moving the service to a server with more RAM.
If the error mentions SIGKILL or error code 137, it most likely means the service is running out of memory. Consider either increasing the server's RAM or moving the service to a server with more RAM.
If it mentions SIGILL (note the lack of a K) or error code 132, it most likely means your server's CPU is incompatible with Immich.
If it mentions SIGILL (note the lack of a K) or error code 132, it most likely means your server's CPU is incompatible. This is unlikely to occur on version 1.92.0 or later. Consider upgrading if your version of Immich is below that.
If your version of Immich is below 1.92.0 and the crash occurs after logs about tracing or exporting a model, consider either upgrading or disabling the Tag Objects job.
## Database
### Why am I getting database ownership errors?
If you get database errors such as `FATAL: data directory "/var/lib/postgresql/data" has wrong ownership` upon database startup, this is likely due to an issue with your filesystem.
NTFS and ex/FAT/32 filesystems are not supported. See [here](/docs/install/requirements#special-requirements-for-windows-users) for more details.
NTFS and ex/FAT/32 filesystems are not supported. See [here](/docs/install/environment-variables#supported-filesystems) for more details.
### How can I verify the integrity of my database?
Database checksums are enabled by default for new installations since v1.104.0. You can check if they are enabled by running the following command.
If you installed Immich using v1.104.0 or later, you likely have database checksums enabled by default. You can check this by running the following command.
A result of `on` means that checksums are enabled.
<details>
<summary>Check if checksums are enabled</summary>
```bash
docker exec -it immich_postgres psql --dbname=postgres --username=<DB_USERNAME> --command="show data_checksums"
docker exec -it immich_postgres psql --dbname=immich --username=<DB_USERNAME> --command="show data_checksums"
data_checksums
----------------
on
@@ -467,13 +415,13 @@ docker exec -it immich_postgres psql --dbname=postgres --username=<DB_USERNAME>
</details>
If checksums are enabled, you can check the status of the database with the following command. A normal result is all `0`s.
If checksums are enabled, you can check the status of the database with the following command. A normal result is all zeroes.
<details>
<summary>Check for database corruption</summary>
```bash
docker exec -it immich_postgres psql --dbname=postgres --username=<DB_USERNAME> --command="SELECT datname, checksum_failures, checksum_last_failure FROM pg_stat_database WHERE datname IS NOT NULL"
docker exec -it immich_postgres psql --dbname=immich --username=<DB_USERNAME> --command="SELECT datname, checksum_failures, checksum_last_failure FROM pg_stat_database WHERE datname IS NOT NULL"
datname | checksum_failures | checksum_last_failure
-----------+-------------------+-----------------------
postgres | 0 |
@@ -485,24 +433,6 @@ docker exec -it immich_postgres psql --dbname=postgres --username=<DB_USERNAME>
</details>
You can also scan the Postgres database file structure for errors:
<details>
<summary>Scan for file structure errors</summary>
```bash
docker exec -it immich_postgres pg_amcheck --username=postgres --heapallindexed --parent-check --rootdescend --progress --all --install-missing
```
A normal result will end something like this and return with an exit code of `0`:
```bash
7470/8832 relations (84%), 730829/734735 pages (99%)
8425/8832 relations (95%), 734367/734735 pages (99%)
8832/8832 relations (100%), 734735/734735 pages (100%)
```
</details>
If corruption is detected, you should immediately make a backup before performing any other work in the database.
To do so, you may need to set the `zero_damaged_pages=on` flag for the database server to allow `pg_dumpall` to succeed.
After taking a backup, the recommended next step is to restore the database from a healthy backup before corruption was detected.

View File

@@ -5,10 +5,6 @@ import TabItem from '@theme/TabItem';
A [3-2-1 backup strategy](https://www.backblaze.com/blog/the-3-2-1-backup-strategy/) is recommended to protect your data. You should keep copies of your uploaded photos/videos as well as the Immich database for a comprehensive backup solution. This page provides an overview on how to backup the database and the location of user-uploaded pictures and videos. A template bash script that can be run as a cron job is provided [here](/docs/guides/template-backup-script.md)
:::danger
The instructions on this page show you how to prepare your Immich instance to be backed up, and which files to take a backup of. You still need to take care of using an actual backup tool to make a backup yourself.
:::
## Database
:::caution
@@ -19,22 +15,12 @@ Immich saves [file paths in the database](https://github.com/immich-app/immich/d
Refer to the official [postgres documentation](https://www.postgresql.org/docs/current/backup.html) for details about backing up and restoring a postgres database.
:::
The recommended way to backup and restore the Immich database is to use the `pg_dumpall` command. When restoring, you need to delete the `DB_DATA_LOCATION` folder (if it exists) to reset the database.
:::caution
It is not recommended to directly backup the `DB_DATA_LOCATION` folder. Doing so while the database is running can lead to a corrupted backup that cannot be restored.
:::
### Automatic Database Backups
For convenience, Immich will automatically create database backups by default. The backups are stored in `UPLOAD_LOCATION/backups`.
As mentioned above, you should make your own backup of these together with the asset folders as noted below.
You can adjust the schedule and amount of kept backups in the [admin settings](http://my.immich.app/admin/system-settings?isOpen=backup).
By default, Immich will keep the last 14 backups and create a new backup every day at 2:00 AM.
#### Restoring
We hope to make restoring simpler in future versions, for now you can find the backups in the `UPLOAD_LOCATION/backups` folder on your host.
Then please follow the steps in the following section for restoring the database.
### Manual Backup and Restore
<Tabs>
@@ -48,61 +34,94 @@ docker exec -t immich_postgres pg_dumpall --clean --if-exists --username=postgre
docker compose down -v # CAUTION! Deletes all Immich data to start from scratch
## Uncomment the next line and replace DB_DATA_LOCATION with your Postgres path to permanently reset the Postgres database
# rm -rf DB_DATA_LOCATION # CAUTION! Deletes all Immich data to start from scratch
docker compose pull # Update to latest version of Immich (if desired)
docker compose create # Create Docker containers for Immich apps without running them
docker compose pull # Update to latest version of Immich (if desired)
docker compose create # Create Docker containers for Immich apps without running them
docker start immich_postgres # Start Postgres server
sleep 10 # Wait for Postgres server to start up
# Check the database user if you deviated from the default
sleep 10 # Wait for Postgres server to start up
gunzip < "/path/to/backup/dump.sql.gz" \
| sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" \
| docker exec -i immich_postgres psql --dbname=postgres --username=<DB_USERNAME> # Restore Backup
docker compose up -d # Start remainder of Immich apps
| docker exec -i immich_postgres psql --username=postgres # Restore Backup
docker compose up -d # Start remainder of Immich apps
```
</TabItem>
<TabItem value="Windows system (PowerShell)" label="Windows system (PowerShell)">
```powershell title='Backup'
[System.IO.File]::WriteAllLines("C:\absolute\path\to\backup\dump.sql", (docker exec -t immich_postgres pg_dumpall --clean --if-exists --username=postgres))
docker exec -t immich_postgres pg_dumpall --clean --if-exists --username=postgres | Set-Content -Encoding utf8 "C:\path\to\backup\dump.sql"
```
```powershell title='Restore'
docker compose down -v # CAUTION! Deletes all Immich data to start from scratch
## Uncomment the next line and replace DB_DATA_LOCATION with your Postgres path to permanently reset the Postgres database
# Remove-Item -Recurse -Force DB_DATA_LOCATION # CAUTION! Deletes all Immich data to start from scratch
## You should mount the backup (as a volume, example: `- 'C:\path\to\backup\dump.sql:/dump.sql'`) into the immich_postgres container using the docker-compose.yml
docker compose pull # Update to latest version of Immich (if desired)
docker compose create # Create Docker containers for Immich apps without running them
docker start immich_postgres # Start Postgres server
sleep 10 # Wait for Postgres server to start up
docker exec -it immich_postgres bash # Enter the Docker shell and run the following command
# Check the database user if you deviated from the default. If your backup ends in `.gz`, replace `cat` with `gunzip`
cat < "/dump.sql" \
| sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" \
| psql --dbname=postgres --username=<DB_USERNAME> # Restore Backup
exit # Exit the Docker shell
docker compose up -d # Start remainder of Immich apps
docker compose pull # Update to latest version of Immich (if desired)
docker compose create # Create Docker containers for Immich apps without running them
docker start immich_postgres # Start Postgres server
sleep 10 # Wait for Postgres server to start up
gc "C:\path\to\backup\dump.sql" | docker exec -i immich_postgres psql --username=postgres # Restore Backup
docker compose up -d # Start remainder of Immich apps
```
</TabItem>
</Tabs>
Note that for the database restore to proceed properly, it requires a completely fresh install (i.e. the Immich server has never run since creating the Docker containers). If the Immich app has run, Postgres conflicts may be encountered upon database restoration (relation already exists, violated foreign key constraints, multiple primary keys, etc.), in which case you need to delete the `DB_DATA_LOCATION` folder to reset the database.
Note that for the database restore to proceed properly, it requires a completely fresh install (i.e. the Immich server has never run since creating the Docker containers). If the Immich app has run, Postgres conflicts may be encountered upon database restoration (relation already exists, violated foreign key constraints, multiple primary keys, etc.).
:::tip
Some deployment methods make it difficult to start the database without also starting the server. In these cases, you may set the environment variable `DB_SKIP_MIGRATIONS=true` before starting the services. This will prevent the server from running migrations that interfere with the restore process. Be sure to remove this variable and restart the services after the database is restored.
Some deployment methods make it difficult to start the database without also starting the server or microservices. In these cases, you may set the environmental variable `DB_SKIP_MIGRATIONS=true` before starting the services. This will prevent the server from running migrations that interfere with the restore process. Note that both the server and microservices must have this variable set to prevent the migrations from running. Be sure to remove this variable and restart the services after the database is restored.
:::
### Automatic Database Backups
The database dumps can also be automated (using [this image](https://github.com/prodrigestivill/docker-postgres-backup-local)) by editing the docker compose file to match the following:
```yaml
services:
...
backup:
container_name: immich_db_dumper
image: prodrigestivill/postgres-backup-local:14
restart: always
env_file:
- .env
environment:
POSTGRES_HOST: database
POSTGRES_CLUSTER: 'TRUE'
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_DB: ${DB_DATABASE_NAME}
SCHEDULE: "@daily"
POSTGRES_EXTRA_OPTS: '--clean --if-exists'
BACKUP_DIR: /db_dumps
volumes:
- ./db_dumps:/db_dumps
depends_on:
- database
```
Then you can restore with the same command but pointed at the latest dump.
```bash title='Automated Restore'
gunzip < db_dumps/last/immich-latest.sql.gz \
| sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" \
| docker exec -i immich_postgres psql --username=postgres
```
:::note
If you see the error `ERROR: type "earth" does not exist`, or you have problems with Reverse Geocoding after a restore, add the following `sed` fragment to your restore command.
Example: `gunzip < "/path/to/backup/dump.sql.gz" | sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" | docker exec -i immich_postgres psql --username=postgres`
:::
## Filesystem
Immich stores two types of content in the filesystem: (a) original, unmodified assets (photos and videos), and (b) generated content. We recommend backing up the entire contents of `UPLOAD_LOCATION`, but only the original content is critical, which is stored in the following folders:
Immich stores two types of content in the filesystem: (1) original, unmodified assets (photos and videos), and (2) generated content. Only the original content needs to be backed-up, which is stored in the following folders:
1. `UPLOAD_LOCATION/library`
2. `UPLOAD_LOCATION/upload`
3. `UPLOAD_LOCATION/profile`
If you choose to back up only those folders, you will need to rerun the transcoding and thumbnail generation jobs for all assets after you restore from a backup.
:::caution
If you moved some of these folders onto a different storage device, such as `profile/`, make sure to adjust the backup path to match your setup
:::
@@ -178,7 +197,7 @@ When you turn off the storage template engine, it will leave the assets in `UPLO
- Stored in `UPLOAD_LOCATION/profile/<userID>`.
- **Thumbs Images:**
- Preview images (blurred, small, large) for each asset and thumbnails for recognized faces.
- Stored in `UPLOAD_LOCATION/thumbs/<userID>`.
- Stored in `UPLOCAD_LOCATION/thumbs/<userID>`.
- **Encoded Assets:**
- Videos that have been re-encoded from the original for wider compatibility. The original is not removed.
- Stored in `UPLOAD_LOCATION/encoded-video/<userID>`.

View File

@@ -18,10 +18,4 @@ You can use [this guide](/docs/guides/smtp-gmail) to use Gmail's SMTP server.
Users can manage their email notification settings from their account settings page on the web. They can choose to turn email notifications on or off for the following events:
<img src={require('./img/user-notifications-settings.webp').default} width="80%" title="User notification settings" />
## Notification templates
You can override the default notification text with custom templates in HTML format. You can use tags to show dynamic tags in your templates.
<img src={require('./img/user-notifications-templates.webp').default} width="80%" title="User notification templates" />
<img src={require('./img/user-notifications-settings.png').default} width="80%" title="User notification settings" />

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 167 KiB

After

Width:  |  Height:  |  Size: 79 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 68 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 46 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 47 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 109 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 55 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

View File

@@ -22,7 +22,7 @@ Copy the entire `immich-server` block as a new service and make the following ch
- container_name: immich_server
...
- ports:
- - 2283:2283
- - 2283:3001
+ immich-microservices:
+ container_name: immich_microservices
```
@@ -48,4 +48,8 @@ When a new asset is uploaded it kicks off a series of jobs, which include metada
Additionally, some jobs run on a schedule, which is every night at midnight. This schedule, with the exception of [External Libraries](/docs/features/libraries) scanning, cannot be changed.
:::info
Storage Migration job can be run after changing the [Storage Template](/docs/administration/storage-template.mdx), in order to apply the change to the existing library.
:::
<img src={require('./img/admin-jobs.webp').default} width="60%" title="Admin jobs" />

View File

@@ -11,7 +11,7 @@ Unable to set `app.immich:///oauth-callback` as a valid redirect URI? See [Mobil
Immich supports 3rd party authentication via [OpenID Connect][oidc] (OIDC), an identity layer built on top of OAuth2. OIDC is supported by most identity providers, including:
- [Authentik](https://goauthentik.io/integrations/sources/oauth/#openid-connect)
- [Authelia](https://www.authelia.com/integration/openid-connect/immich/)
- [Authelia](https://www.authelia.com/configuration/identity-providers/openid-connect/clients/)
- [Okta](https://www.okta.com/openid-connect/)
- [Google](https://developers.google.com/identity/openid-connect/openid-connect)

View File

@@ -13,9 +13,9 @@ Running with a pre-existing Postgres server can unlock powerful administrative f
You must install pgvecto.rs into your instance of Postgres using their [instructions][vectors-install]. After installation, add `shared_preload_libraries = 'vectors.so'` to your `postgresql.conf`. If you already have some `shared_preload_libraries` set, you can separate each extension with a comma. For example, `shared_preload_libraries = 'pg_stat_statements, vectors.so'`.
:::note
Immich is known to work with Postgres versions 14, 15, and 16. Earlier versions are unsupported. Postgres 17 is nominally compatible, but pgvecto.rs does not have prebuilt images or packages for it as of writing.
Immich is known to work with Postgres versions 14, 15, and 16. Earlier versions are unsupported.
Make sure the installed version of pgvecto.rs is compatible with your version of Immich. The current accepted range for pgvecto.rs is `>= 0.2.0, < 0.4.0`.
Make sure the installed version of pgvecto.rs is compatible with your version of Immich. For example, if your Immich version uses the dedicated database image `tensorchord/pgvecto-rs:pg14-v0.2.1`, you must install pgvecto.rs `>= 0.2.1, < 0.3.0`.
:::
## Specifying the connection URL
@@ -42,10 +42,6 @@ Typically Immich expects superuser permission in the database, which you can gra
This method is recommended for **advanced users only** and often requires manual intervention when updating Immich.
:::
:::danger
Currently, automated backups require superuser permission due to the usage of `pg_dumpall`.
:::
Immich can run without superuser permissions by following the below instructions at the `psql` prompt to prepare the database.
```sql title="Set up Postgres for Immich"

View File

@@ -0,0 +1,27 @@
# Repair Page
The repair page is designed to give information to the system administrator about files that are not tracked, or offline paths.
## Natural State
In this situation, everything is in its place and there is no problem that the system administrator should be aware of.
<img src={require('./img/repair-page.png').default} title="server statistic" />
## Any Other Situation
:::note RAM Usage
Several users report a situation where the page fails to load. In order to solve this problem you should try to allocate more RAM to Immich, if the problem continues, you should stop using the reverse proxy while loading the page.
:::
In any other situation, there are 3 different options that can appear:
- MATCHES - These files are matched by their checksums.
- OFFLINE PATHS - These files are the result of manually deleting files from immich or a failed file move in the past (losing track of a file).
- UNTRACKED FILES - These files are not tracked by the application. They can be the result of failed moves, interrupted uploads, or left behind due to a bug.
In addition, you can download the information from a page, mark everything (in order to check hashing) and correct the problem if a match is found in the hashing.
<img src={require('./img/repair-page-1.png').default} title="server statistic" />

View File

@@ -6,10 +6,6 @@ Users can deploy a custom reverse proxy that forwards requests to Immich. This w
The Repair page can take a long time to load. To avoid server timeouts or errors, we recommend specifying a timeout of at least 10 minutes on your proxy server.
:::
:::caution
Immich does not support being served on a sub-path such as `location /immich {`. It has to be served on the root path of a (sub)domain.
:::
### Nginx example config
Below is an example config for nginx. Make sure to set `public_url` to the front-facing URL of your instance, and `backend_url` to the path of the Immich server.
@@ -44,26 +40,6 @@ server {
}
```
#### Compatibility with Let's Encrypt
In the event that your nginx configuration includes a section for Let's Encrypt, it's likely that you have a segment similar to the following:
```nginx
location ~ /.well-known {
...
}
```
This particular `location` directive can inadvertently prevent mobile clients from reaching the `/.well-known/immich` path, which is crucial for discovery. Usual error message for this case is: "Your app major version is not compatible with the server". To remedy this, you should introduce an additional location block specifically for this path, ensuring that requests are correctly proxied to the Immich server:
```nginx
location = /.well-known/immich {
proxy_pass http://<backend_url>:2283;
}
```
By doing so, you'll maintain the functionality of Let's Encrypt while allowing mobile clients to access the necessary Immich path without obstruction.
### Caddy example config
As an alternative to nginx, you can also use [Caddy](https://caddyserver.com/) as a reverse proxy (with automatic HTTPS configuration). Below is an example config.
@@ -123,7 +99,7 @@ services:
# increase readingTimeouts for the entrypoint used here
traefik.http.routers.immich.entrypoints: websecure
traefik.http.routers.immich.rule: Host(`immich.your-domain.com`)
traefik.http.services.immich.loadbalancer.server.port: 2283
traefik.http.services.immich.loadbalancer.server.port: 3001
```
Keep in mind, that Traefik needs to communicate with the network where immich is in, usually done

View File

@@ -1,9 +1,13 @@
# Server Stats
Server statistics to show the total number of videos, photos, usage and quota per user.
Server statistics to show the total number of videos, photos, and usage per user.
:::info External library
External libraries are not included in the storage quota due to custom mount points.
:::info
If a storage quota has been defined for the user, the usage number will be displayed as a percentage of the total storage quota allocated to them.
:::
<img src={require('./img/server-stats.webp').default} title="server statistic" />
:::info External library
External library is not included in the storage quota.
:::
<img src={require('./img/server-stats.png').default} title="server statistic" />

View File

@@ -1,49 +0,0 @@
# System Integrity
## Folder checks
:::info
The folders considered for these checks include: `upload/`, `library/`, `thumbs/`, `encoded-video/`, `profile/`, `backups/`
:::
When Immich starts, it performs a series of checks in order to validate that it can read and write files to the volume mounts used by the storage system. If it cannot perform all the required operations, it will fail to start. The checks include:
- Creating an initial hidden file (`.immich`) in each folder
- Reading a hidden file (`.immich`) in each folder
- Overwriting a hidden file (`.immich`) in each folder
The checks are designed to catch the following situations:
- Incorrect permissions (cannot read/write files)
- Missing volume mount (`.immich` files should exist, but are missing)
### Common issues
:::note
`.immich` files serve as markers and help keep track of volume mounts being used by Immich. Except for the situations listed below, they should never be manually created or deleted.
:::
#### Missing `.immich` files
```
Verifying system mount folder checks (enabled=true)
...
ENOENT: no such file or directory, open 'upload/encoded-video/.immich'
```
The above error messages show that the server has previously (successfully) written `.immich` files to each folder, but now does not detect them. This could be because any of the following:
- Permission error - unable to read the file, but it exists
- File does not exist - volume mount has changed and should be corrected
- File does not exist - user manually deleted it and should be manually re-created (`touch .immich`)
- File does not exist - user restored from a backup, but did not restore each folder (user should restore all folders or manually create `.immich` in any missing folders)
### Ignoring the checks
:::warning
The checks are designed to catch common problems that we have seen users have in the past, and often indicate there's something wrong that you should solve. If you know what you're doing and you want to disable them you can set the following environment variable:
:::
```
IMMICH_IGNORE_MOUNT_CHECK_ERRORS=true
```

View File

@@ -1,6 +1,10 @@
# System Settings
The admin user can manage settings for the Immich instance here.
On the system settings page, the administrator can manage global settings for the Immich instance.
:::note
Viewing and modifying the system settings is restricted to the Administrator.
:::
:::tip
You can always return to the default settings by clicking the `Reset to default` button.
@@ -100,7 +104,8 @@ You can choose to disable a certain type of machine learning, for example smart
### Smart Search
The [smart search](/docs/features/searching) settings allow you to change the [CLIP model](https://openai.com/research/clip). Larger models will typically provide [more accurate search results](https://github.com/immich-app/immich/discussions/11862) but consume more processing power and RAM. When [changing the CLIP model](/docs/FAQ#can-i-use-a-custom-clip-model) it is mandatory to re-run the Smart Search job on all images to fully apply the change.
The [smart search](/docs/features/smart-search) settings are designed to allow the search tool to be used using [CLIP](https://openai.com/research/clip) models that [can be changed](/docs/FAQ#can-i-use-a-custom-clip-model), different models will necessarily give better results but may consume more processing power, when changing a model it is mandatory to re-run the
Smart Search job on all images to fully apply the change.
:::info Internet connection
Changing models requires a connection to the Internet to download the model.
@@ -152,10 +157,6 @@ Immich supports [Reverse Geocoding](/docs/features/reverse-geocoding) using data
SMTP server setup, for user creation notifications, new albums, etc. More information can be found [here](/docs/administration/email-notification)
## Notification Templates
Override the default notifications text with notification templates. More information can be found [here](/docs/administration/email-notification)
## Server Settings
### External Domain
@@ -204,68 +205,4 @@ When this option is enabled the `immich-server` will periodically make requests
## Video Transcoding Settings
The system administrator can configure which video files will be converted to different formats. The settings can be changed in depth, to learn more about the terminology used here, refer to FFmpeg documentation for [H.264](https://trac.ffmpeg.org/wiki/Encode/H.264) codec, [HEVC](https://trac.ffmpeg.org/wiki/Encode/H.265) codec and [VP9](https://trac.ffmpeg.org/wiki/Encode/VP9) codec.
Which streams of a video file will be transcoded is determined by the [Transcode Policy](#ffmpeg.transcode). Streams that are transcoded use the following settings (config file name in brackets). Streams that are not transcoded are untouched and preserve their original settings.
### Accepted containers (`ffmpeg.acceptedContainers`) {#ffmpeg.acceptedContainers}
If the video asset's container format is not in this list, it will be remuxed to MP4 even if no streams need to be transcoded.
The default set of accepted container formats is `mov`, `ogg` and `webm`.
### Preset (`ffmpeg.preset`) {#ffmpeg.preset}
The amount of "compute effort" to put into transcoding. These use [the preset names from h264](https://trac.ffmpeg.org/wiki/Encode/H.264#Preset) and will be converted to appropriate values for encoders that configure effort in different ways.
The default value is `ultrafast`.
### Audio codec (`ffmpeg.targetAudioCodec`) {#ffmpeg.targetAudioCodec}
Which audio codec to use when the audio stream is being transcoded. Can be one of `mp3`, `aac`, `libopus`.
The default value is `aac`.
### Video Codec (`ffmpeg.targetVideoCodec`) {#ffmpeg.targetVideoCodec}
Which video codec to use when the video stream is being transcoded. Can be one of `h264`, `hevc`, `vp9` or `av1`.
The default value is `h264`.
### Target resolution (`ffmpeg.targetResolution`) {#ffmpeg.targetResolution}
When transcoding a video stream, downscale the largest dimension to this value while preserving aspect ratio. Videos are never upscaled.
The default value is `720`.
### Transcode policy (`ffmpeg.transcode`) {#ffmpeg.transcode}
The transcoding policy configures which streams of a video asset will be transcoded. The transcoding decision is made independently for video streams and audio streams. This means that if a video stream needs to be transcoded, but an audio stream does not, then the video stream will be transcoded while the audio stream will be copied. If the transcoding policy does not require any stream to be transcoded and does not require the video to be remuxed, then no separate video file will be created.
The default policy is `required`.
#### All videos (`all`) {#ffmpeg.transcode-all}
Videos are always transcoded. This ensures consistency during video playback.
#### Don't transcode any videos (`disabled`) {#ffmpeg.transcode-disabled}
Videos are never transcoded. This saves space and resources on the server, but may prevent playback on devices that don't support the source format (especially web browsers) or result in high bandwidth usage when playing high-bitrate files.
#### Only videos not in an accepted format (`required`) {#ffmpeg.transcode-required}
Video streams are transcoded when any of the following conditions are met:
- The video is HDR.
- The video is not in the yuv420p pixel format.
- The video codec is not in `acceptedVideoCodecs`.
Audio is transcoded if the audio codec is not in `acceptedAudioCodecs`.
#### Videos higher than max bitrate or not in an accepted format (`bitrate`) {#ffmpeg.transcode-bitrate}
In addition to the conditions in `required`, video streams are also transcoded if their bitrate is over `maxBitrate`.
#### Videos higher than target resolution or not in an accepted format (`optimal`) {#ffmpeg.transcode-optimal}
In addition to the conditions in `required`, video streams are also transcoded if the horizontal **and** vertical dimensions are higher than [`targetResolution`](#ffmpeg.targetResolution).
The system administrator can define parameters according to which video files will be converted to different formats (depending on the settings). The settings can be changed in depth, to learn more about the terminology used here, refer to FFmpeg documentation for [H.264](https://trac.ffmpeg.org/wiki/Encode/H.264) codec, [HEVC](https://trac.ffmpeg.org/wiki/Encode/H.265) codec and [VP9](https://trac.ffmpeg.org/wiki/Encode/VP9) codec.

View File

@@ -41,7 +41,7 @@ The system administrator can see the usage quota percentage of all users in Serv
External libraries don't take up space from the storage quota.
:::
<img src={require('./img/user-quota-size.webp').default} width="40%" title="Set Quota Size" />
<img src={require('./img/user-quota-size.png').default} width="40%" title="Set Quota Size" />
## Set Storage Label For User
@@ -51,13 +51,13 @@ To apply a storage template, go to the Administration page -> click on the penci
To apply the Storage Label to previously uploaded assets, run the Storage Migration Job.
:::
<img src={require('./img/user-storage-label.webp').default} width="40%" title="Delete User" />
<img src={require('./img/user-storage-label.png').default} width="40%" title="Delete User" />
## Password Reset
To reset a user's password, click the pencil icon to edit a user, then click "Reset Password". The user's password will be reset to random password and they have to change it next time the sign in.
<img src={require('./img/user-management-update.webp').default} width="40%" title="Reset Password" />
<img src={require('./img/user-management-update.png').default} width="40%" title="Reset Password" />
## Delete a User
@@ -72,7 +72,7 @@ You can customize the time of the deletion of the users from the Administration
The user deletion job runs at midnight to check for users that are ready for deletion. Changes to this setting will be evaluated at the next execution.
:::
<img src={require('./img/customize-delete-user.webp').default} width="80%" title="Customize Delete User" />
<img src={require('./img/customize-delete-user.png').default} width="80%" title="Customize Delete User" />
### Immediately Remove User
@@ -80,4 +80,4 @@ You can choose to delete a user immediately by checking the box
`Queue user and assets for immediate deletion` in the deletion process, this will immediately remove the user and all assets.
This cannot be undone and the files cannot be recovered.
<img src={require('./img/immediately-remove-user.webp').default} width="40%" title="Customize Delete User" />
<img src={require('./img/immediately-remove-user.png').default} width="40%" title="Customize Delete User" />

View File

@@ -2,7 +2,7 @@
sidebar_position: 1
---
import AppArchitecture from './img/app-architecture.webp';
import AppArchitecture from './img/app-architecture.png';
import MobileArchitecture from './img/immich_mobile_architecture.svg';
# Architecture

View File

@@ -15,7 +15,7 @@ Our [GitHub Repository](https://github.com/immich-app/immich) is a [monorepo](ht
| `design/` | Screenshots and logos for the README |
| `docs/` | Source code for the [https://immich.app](https://immich.app) website |
| `machine-learning/` | Source code for the `immich-machine-learning` docker image |
| `misc/release/` | Scripts for version bumps and draft releases |
| `misc/release/` | Scripts for version pumps and draft releases |
| `mobile/` | Source code for the mobile app, both Android and iOS |
| `server/` | Source code for the `immich-server` docker image |
| `web/` | Source code for the `web` |

Binary file not shown.

After

Width:  |  Height:  |  Size: 55 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 17 KiB

View File

@@ -1,9 +1,5 @@
# PR Checklist
A minimal devcontainer is supplied with this repository. All commands can be executed directly inside this container to avoid tedious installation of the environment.
:::warning
The provided devcontainer isn't complete at the moment. At least all dockerized steps in the Makefile won't work (`make dev`, ....). Feel free to contribute!
:::
When contributing code through a pull request, please check the following:
## Web Checks
@@ -11,7 +7,6 @@ When contributing code through a pull request, please check the following:
- [ ] `npm run lint` (linting via ESLint)
- [ ] `npm run format` (formatting via Prettier)
- [ ] `npm run check:svelte` (Type checking via SvelteKit)
- [ ] `npm run check:typescript` (check typescript)
- [ ] `npm test` (unit tests)
## Documentation

View File

@@ -39,16 +39,13 @@ All the services are packaged to run as with single Docker Compose command.
make dev # required Makefile installed on the system.
```
5. Access the dev instance in your browser at http://localhost:3000, or connect via the mobile app.
5. Access the dev instance in your browser at http://localhost:2283, or connect via the mobile app.
All the services will be started with hot-reloading enabled for a quick feedback loop.
You can access the web from `http://your-machine-ip:3000` or `http://localhost:3000` and access the server from the mobile app at `http://your-machine-ip:3000/api`
You can access the web from `http://your-machine-ip:2283` or `http://localhost:2283` and access the server from the mobile app at `http://your-machine-ip:2283/api`
**Notes:**
- The "web" development container runs with uid 1000. If that uid does not have read/write permissions on the mounted volumes, you may encounter errors
- In case of rootless docker setup, you need to use root within the container, otherwise you will encounter read/write permission related errors, see comments in `docker/docker-compose.dev.yml`.
**Note:** the "web" development container runs with uid 1000. If that uid does not have read/write permissions on the mounted volumes, you may encounter errors
#### Connect web to a remote backend
@@ -63,17 +60,6 @@ If you only want to do web development connected to an existing, remote backend,
IMMICH_SERVER_URL=https://demo.immich.app/ npm run dev
```
#### `@immich/ui`
To see local changes to `@immich/ui` in Immich, do the following:
1. Install `@immich/ui` as a sibling to `immich/`, for example `/home/user/immich` and `/home/user/ui`
1. Build the `@immich/ui` project via `npm run build`
1. Uncomment the corresponding volume in web service of the `docker/docker-compose.dev.yaml` file (`../../ui:/usr/ui`)
1. Uncomment the corresponding alias in the `web/vite.config.js` file (`'@immich/ui': path.resolve(\_\_dirname, '../../ui')`)
1. Start up the stack via `make dev`
1. After making changes in `@immich/ui`, rebuild it (`npm run build`)
### Mobile app
The mobile app `(/mobile)` will required Flutter toolchain 3.13.x to be installed on your system.
@@ -90,7 +76,7 @@ Setting these in the IDE give a better developer experience, auto-formatting cod
### Dart Code Metrics
The mobile app uses DCM (Dart Code Metrics) for linting and metrics calculation. Please refer to the [Getting Started](https://dcm.dev/docs/) page for more information on setting up DCM
The mobile app uses DCM (Dart Code Metrics) for linting and metrics calculation. Please refer to the [Getting Started](https://dcm.dev/docs/getting-started/#installation) page for more information on setting up DCM
Note: Activating the license is not required.

View File

@@ -1,14 +1,8 @@
# Automatic Backup
Immich supports uploading photos and videos from your mobile device to the server automatically.
A guide on how the foreground and background automatic backup works.
---
You can enable the settings by accessing the upload options from the upload page
<img src={require('./img/backup-settings-access.webp').default} width="50%" title="Backup option selection" />
<img src={require('./img/background-foreground-backup.webp').default} width="50%" title="Foreground&Background Backup" />
<img src={require('./img/background-foreground-backup.png').default} width="50%" title="Foreground&Background Backup" />
## Foreground backup
@@ -16,9 +10,9 @@ If foreground backup is enabled: whenever the app is opened or resumed, it will
## Background backup
This feature is intended for everyday use. For initial bulk uploading, please use the foreground upload feature. For more information on why background upload is not working as expected, please refer to the [FAQ](/docs/FAQ#why-does-foreground-backup-stop-when-i-navigate-away-from-the-app-shouldnt-it-transfer-the-job-to-background-backup).
Background backup is available thanks to the contribution effort of [@fyfrey](https://github.com/fyfrey) and [@martyfuhry](https://github.com/martyfuhry).
If background backup is enabled. The app will periodically check if there are any new photos or videos in the selected album(s) to be uploaded to the server. If there are, it will upload them to the cloud in the background.
If background backup is enabled. The app will periodically check if there are any new photos or videos in the selected album(s) to be uploaded to the cloud. If there are, it will upload them to the cloud in the background.
:::info Note
@@ -36,7 +30,7 @@ If background backup is enabled. The app will periodically check if there are an
- You must enable **Background App Refresh** for the app to work in the background. You can enable it in the Settings app under General > Background App Refresh.
<div style={{textAlign: 'center'}}>
<img src={require('./img/background-app-refresh.webp').default} width="30%" title="background-app-refresh" />
<img src={require('./img/background-app-refresh.png').default} width="30%" title="background-app-refresh" />
</div>
:::

View File

@@ -170,4 +170,4 @@ immich upload --include-hidden --recursive directory/
The API key can be obtained in the user setting panel on the web interface.
![Obtain Api Key](./img/obtain-api-key.webp)
![Obtain Api Key](./img/obtain-api-key.png)

View File

@@ -6,15 +6,15 @@ Immich recognizes faces in your photos and videos and groups them together into
The list of people is shown in the Explore page.
<img src={require('./img/facial-recognition-1.webp').default} title='Facial Recognition 1' />
<img src={require('./img/facial-recognition-1.png').default} title='Facial Recognition 1' />
Upon clicking on a person, a list of assets that contain their face will be shown.
<img src={require('./img/facial-recognition-2.webp').default} title='Facial Recognition 2' />
<img src={require('./img/facial-recognition-2.png').default} title='Facial Recognition 2' />
The asset detail view will also show the faces that are recognized in the asset.
<img src={require('./img/facial-recognition-3.webp').default} title='Facial Recognition 3' />
<img src={require('./img/facial-recognition-3.png').default} title='Facial Recognition 3' />
## Actions
@@ -28,7 +28,7 @@ Additional actions you can do include:
It can be found from the app bar when you access the detail view of a person.
<img src={require('./img/facial-recognition-4.webp').default} title='Facial Recognition 4' width="70%"/>
<img src={require('./img/facial-recognition-4.png').default} title='Facial Recognition 4' width="70%"/>
## How Face Detection Works
@@ -77,7 +77,7 @@ There are a few different models available; the default is typically considered
### Minimum detection score
This setting affects whether a result from the face detection model is filtered out as a false positive. It may seem tempting to set this low to detect more faces, but it can lead to false positives that are difficult to deal with and can harm facial recognition. It is strongly recommended not to go below 0.5 for this setting. Setting it to a very high number like 0.9 is also not recommended: the default is already biased toward precision, so a threshold that high leads to many undetected faces.
This setting affects whether a result from the face detecton model is filtered out as a false positive. It may seem tempting to set this low to detect more faces, but it can lead to false positives that are difficult to deal with and can harm facial recognition. It is strongly recommended not to go below 0.5 for this setting. Setting it to a very high number like 0.9 is also not recommended: the default is already biased toward precision, so a threshold that high leads to many undetected faces.
After changing this setting, it will only apply to new face detection jobs. To apply the new setting to all assets, you need to re-run face detection for all assets.

View File

@@ -1,17 +0,0 @@
# Folder View
Folder view provides an additional view besides the timeline that is similar to a file explorer. It allows you to navigate through the folders and files in the library. This feature is handy for a highly curated and customized external library or a nicely configured storage template.
You can enable this feature under [`Account Settings > Features > Folder View`](https://my.immich.app/user-settings?isOpen=feature+folders)
## Enable folder view
<img src={require('./img/folder-view-enable.webp').default} width="80%" title='Folder view enable' />
## Usage
You can then navigate to the view from the sidebar to explore the folders and files in your library.
<img src={require('./img/folder-access.webp').default} width="30%" title='Folder view access' />
<img src={require('./img/folder-view-1.webp').default} width="100%" title='Folder view' />

View File

@@ -1,7 +1,7 @@
# Hardware Transcoding
# Hardware Transcoding [Experimental]
This feature allows you to use a GPU to accelerate transcoding and reduce CPU load.
Note that hardware transcoding produces significantly larger videos than software transcoding with similar settings, typically with lower quality. Using slow presets and preferring more efficient codecs can narrow this gap.
Note that hardware transcoding is much less efficient for file sizes.
As this is a new feature, it is still experimental and may not work on all systems.
:::info
@@ -23,7 +23,7 @@ You do not need to redo any transcoding jobs after enabling hardware acceleratio
- Raspberry Pi is currently not supported.
- Two-pass mode is only supported for NVENC. Other APIs will ignore this setting.
- By default, only encoding is currently hardware accelerated. This means the CPU is still used for software decoding and tone-mapping.
- You can benefit from end-to-end acceleration by enabling hardware decoding in the video transcoding settings.
- NVENC and RKMPP can be fully accelerated by enabling hardware decoding in the video transcoding settings.
- Hardware dependent
- Codec support varies, but H.264 and HEVC are usually supported.
- Notably, NVIDIA and AMD GPUs do not support VP9 encoding.
@@ -49,7 +49,7 @@ For RKMPP to work:
- You must have a supported Rockchip ARM SoC.
- Only RK3588 supports hardware tonemapping, other SoCs use slower software tonemapping while still using hardware encoding.
- Tonemapping requires `/usr/lib/aarch64-linux-gnu/libmali.so.1` to be present on your host system. Install the [`libmali`][libmali-rockchip] release that corresponds to your Mali GPU (`libmali-valhall-g610-g13p0-gbm` on RK3588) and modify the [`hwaccel.transcoding.yml`][hw-file] file:
- Tonemapping requires `/usr/lib/aarch64-linux-gnu/libmali.so.1` to be present on your host system. Install [`libmali-valhall-g610-g6p0-gbm`][libmali-rockchip] and modify the [`hwaccel.transcoding.yml`][hw-file] file:
- under `rkmpp` uncomment the 3 lines required for OpenCL tonemapping by removing the `#` symbol at the beginning of each line
- `- /dev/mali0:/dev/mali0`
- `- /etc/OpenCL:/etc/OpenCL:ro`
@@ -62,14 +62,11 @@ For RKMPP to work:
1. If you do not already have it, download the latest [`hwaccel.transcoding.yml`][hw-file] file and ensure it's in the same folder as the `docker-compose.yml`.
2. In the `docker-compose.yml` under `immich-server`, uncomment the `extends` section and change `cpu` to the appropriate backend.
Note: For VAAPI on WSL2, be sure to use `vaapi-wsl` rather than `vaapi`
- For VAAPI on WSL2, be sure to use `vaapi-wsl` rather than `vaapi`
3. Redeploy the `immich-server` container with these updated settings.
4. In the Admin page under `Video transcoding settings`, change the hardware acceleration setting to the appropriate option and save.
Note: For Jasper Lake and Elkhart Lake CPUs, you will need to set the `Hardware Acceleration` -> `Constant quality mode` to `CQP`
5. (Optional) Enable hardware decoding for optimal performance.
5. (Optional) If using a compatible backend, you may enable hardware decoding for optimal performance.
#### Single Compose File
@@ -92,7 +89,16 @@ immich-server:
devices:
- /dev/dri:/dev/dri
volumes:
...
- ${UPLOAD_LOCATION}:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
env_file:
- .env
ports:
- 2283:3001
depends_on:
- redis
- database
restart: always
```
Once this is done, you can continue to step 3 of "Basic Setup".

Binary file not shown.

After

Width:  |  Height:  |  Size: 63 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 220 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 43 KiB

Some files were not shown because too many files have changed in this diff Show More