Compare commits

..

1 Commits

Author SHA1 Message Date
Alex Tran
ad3e92fff0 feat(server): license verification 2024-06-05 01:56:37 -05:00
2228 changed files with 84909 additions and 200330 deletions

View File

@@ -1,2 +0,0 @@
.env
library

View File

@@ -1,16 +0,0 @@
ARG BASEIMAGE=mcr.microsoft.com/devcontainers/typescript-node:22@sha256:9791f4aa527774bc370c6bd2f6705ce5a686f1e6f204badd8dfaacce28c631ae
FROM ${BASEIMAGE}
# Flutter SDK
# https://flutter.dev/docs/development/tools/sdk/releases?tab=linux
ENV FLUTTER_CHANNEL="stable"
ENV FLUTTER_VERSION="3.24.5"
ENV FLUTTER_HOME=/flutter
ENV PATH=${PATH}:${FLUTTER_HOME}/bin
# Flutter SDK
RUN mkdir -p ${FLUTTER_HOME} \
&& curl -C - --output flutter.tar.xz https://storage.googleapis.com/flutter_infra_release/releases/${FLUTTER_CHANNEL}/linux/flutter_linux_${FLUTTER_VERSION}-${FLUTTER_CHANNEL}.tar.xz \
&& tar -xf flutter.tar.xz --strip-components=1 -C ${FLUTTER_HOME} \
&& rm flutter.tar.xz \
&& chown -R 1000:1000 ${FLUTTER_HOME}

View File

@@ -1,26 +0,0 @@
{
"name": "Immich",
"service": "immich-devcontainer",
"dockerComposeFile": [
"docker-compose.yml",
"../docker/docker-compose.dev.yml"
],
"customizations": {
"vscode": {
"extensions": [
"Dart-Code.dart-code",
"Dart-Code.flutter",
"dbaeumer.vscode-eslint",
"dcmdev.dcm-vscode-extension",
"esbenp.prettier-vscode",
"svelte.svelte-vscode"
]
}
},
"forwardPorts": [],
"initializeCommand": "bash .devcontainer/scripts/initializeCommand.sh",
"onCreateCommand": "bash .devcontainer/scripts/onCreateCommand.sh",
"overrideCommand": true,
"workspaceFolder": "/immich",
"remoteUser": "node"
}

View File

@@ -1,8 +0,0 @@
services:
immich-devcontainer:
build:
dockerfile: Dockerfile
extra_hosts:
- 'host.docker.internal:host-gateway'
volumes:
- ..:/immich:cached

View File

@@ -1,6 +0,0 @@
#!/bin/bash
# If .env file does not exist, create it by copying example.env from the docker folder
if [ ! -f ".devcontainer/.env" ]; then
cp docker/example.env .devcontainer/.env
fi

View File

@@ -1,25 +0,0 @@
#!/bin/bash
# Enable multiarch for arm64 if necessary
if [ "$(dpkg --print-architecture)" = "arm64" ]; then
sudo dpkg --add-architecture amd64 && \
sudo apt-get update && \
sudo apt-get install -y --no-install-recommends \
qemu-user-static \
libc6:amd64 \
libstdc++6:amd64 \
libgcc1:amd64
fi
# Install DCM
wget -qO- https://dcm.dev/pgp-key.public | sudo gpg --dearmor -o /usr/share/keyrings/dcm.gpg
sudo echo 'deb [signed-by=/usr/share/keyrings/dcm.gpg arch=amd64] https://dcm.dev/debian stable main' | sudo tee /etc/apt/sources.list.d/dart_stable.list
sudo apt-get update
sudo apt-get install dcm
dart --disable-analytics
# Install immich
cd /immich || exit
make install-all

View File

@@ -4,7 +4,6 @@
design/
docker/
!docker/scripts
docs/
e2e/
fastlane/
@@ -22,7 +21,6 @@ open-api/typescript-sdk/node_modules/
server/coverage/
server/node_modules/
server/upload/
server/src/queries
server/dist/
server/www/
@@ -30,4 +28,3 @@ web/node_modules/
web/coverage/
web/.svelte-kit
web/build/
web/.env

View File

@@ -1,13 +1,11 @@
title: "[Feature] feature-name-goes-here"
title: "[Feature] <feature-name-goes-here>"
labels: ["feature"]
body:
- type: markdown
attributes:
value: |
Please use this form to request new feature for Immich.
Stick to only a single feature per request. If you list multiple different features at once,
your request will be closed.
Please use this form to request new feature for Immich
- type: checkboxes
attributes:

1
.github/FUNDING.yml vendored
View File

@@ -1 +0,0 @@
custom: ['https://buy.immich.app']

View File

@@ -83,6 +83,7 @@ body:
2.
3.
...
render: bash
validations:
required: true

View File

@@ -1,14 +1,11 @@
blank_issues_enabled: false
contact_links:
- name: I have a question or need support
url: https://discord.immich.app
- name: I have a question or need support
url: https://discord.gg/D8JsnBEuKb
about: We use GitHub for tracking bugs, please check out our Discord channel for freaky fast support.
- name: 📷 My photo or video has a date, time, or timezone problem
url: https://github.com/immich-app/immich/discussions/12650
about: Upload a sample file to this discussion and we will take a look
- name: 🌟 Feature request
- name: Feature Request
url: https://github.com/immich-app/immich/discussions/new?category=feature-request
about: Please use our GitHub Discussion for making feature requests.
- name: 🫣 I'm unsure where to go
url: https://discord.immich.app
- name: I'm unsure where to go
url: https://discord.gg/D8JsnBEuKb
about: If you are unsure where to go, then joining our Discord is recommended; Just ask!

7
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,7 @@
version: 2
updates:
# Maintain dependencies for GitHub Actions
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"

38
.github/labeler.yml vendored
View File

@@ -1,38 +0,0 @@
cli:
- changed-files:
- any-glob-to-any-file:
- cli/src/**
documentation:
- changed-files:
- any-glob-to-any-file:
- docs/blob/**
- docs/docs/**
- docs/src/**
- docs/static/**
🖥web:
- changed-files:
- any-glob-to-any-file:
- web/src/**
- web/static/**
📱mobile:
- changed-files:
- any-glob-to-any-file:
- mobile/lib/**
- mobile/test/**
🗄server:
- changed-files:
- any-glob-to-any-file:
- server/src/**
- server/test/**
🧠machine-learning:
- changed-files:
- any-glob-to-any-file:
- machine-learning/app/**
changelog:translation:
- head-branch: ['^chore/translations$']

40
.github/release.yml vendored
View File

@@ -1,33 +1,41 @@
changelog:
categories:
- title: 🚨 Breaking Changes
- title: ⚠️ Breaking Changes
labels:
- changelog:breaking-change
- breaking-change
- title: 🫥 Deprecated Changes
- title: 🗄️ Server
labels:
- changelog:deprecated
- 🗄server
- title: 🔒 Security
- title: 📱 Mobile
labels:
- changelog:security
- 📱mobile
- title: 🚀 Features
- title: 🖥️ Web
labels:
- changelog:feature
- 🖥web
- title: 🌟 Enhancements
- title: 🧠 Machine Learning
labels:
- changelog:enhancement
- 🧠machine-learning
- title: 🐛 Bug fixes
- title: ⚡ CLI
labels:
- changelog:bugfix
- cli
- title: 📚 Documentation
- title: 📓 Documentation
labels:
- changelog:documentation
- documentation
- title: 🌐 Translations
- title: 🔨 Maintenance
labels:
- changelog:translation
- deployment
- dependencies
- renovate
- maintenance
- tech-debt
- title: Other changes
labels:
- "*"

View File

@@ -16,28 +16,10 @@ concurrency:
cancel-in-progress: true
jobs:
pre-job:
runs-on: ubuntu-latest
outputs:
should_run: ${{ steps.found_paths.outputs.mobile == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- id: found_paths
uses: dorny/paths-filter@v3
with:
filters: |
mobile:
- 'mobile/**'
- name: Check if we should force jobs to run
id: should_force
run: echo "should_force=${{ github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' }}" >> "$GITHUB_OUTPUT"
build-sign-android:
name: Build and sign Android
needs: pre-job
# Skip when PR from a fork
if: ${{ !github.event.pull_request.head.repo.fork && github.actor != 'dependabot[bot]' && needs.pre-job.outputs.should_run == 'true' }}
if: ${{ !github.event.pull_request.head.repo.fork && github.actor != 'dependabot[bot]' }}
runs-on: macos-14
steps:

View File

@@ -1,17 +1,16 @@
name: CLI Build
on:
workflow_dispatch:
push:
branches: [main]
paths:
- 'cli/**'
- '.github/workflows/cli.yml'
- "cli/**"
- ".github/workflows/cli.yml"
pull_request:
branches: [main]
paths:
- 'cli/**'
- '.github/workflows/cli.yml'
release:
types: [published]
- "cli/**"
- ".github/workflows/cli.yml"
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
@@ -22,7 +21,7 @@ permissions:
jobs:
publish:
name: CLI Publish
name: Publish
runs-on: ubuntu-latest
defaults:
run:
@@ -33,8 +32,8 @@ jobs:
# Setup .npmrc file to publish to npm
- uses: actions/setup-node@v4
with:
node-version-file: './cli/.nvmrc'
registry-url: 'https://registry.npmjs.org'
node-version: "20.x"
registry-url: "https://registry.npmjs.org"
- name: Prepare SDK
run: npm ci --prefix ../open-api/typescript-sdk/
- name: Build SDK
@@ -42,7 +41,7 @@ jobs:
- run: npm ci
- run: npm run build
- run: npm publish
if: ${{ github.event_name == 'release' }}
if: ${{ github.event_name == 'workflow_dispatch' }}
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
@@ -56,10 +55,10 @@ jobs:
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3.3.0
uses: docker/setup-qemu-action@v3.0.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3.8.0
uses: docker/setup-buildx-action@v3.3.0
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
@@ -84,15 +83,15 @@ jobs:
images: |
name=ghcr.io/${{ github.repository_owner }}/immich-cli
tags: |
type=raw,value=${{ steps.package-version.outputs.version }},enable=${{ github.event_name == 'release' }}
type=raw,value=latest,enable=${{ github.event_name == 'release' }}
type=raw,value=${{ steps.package-version.outputs.version }},enable=${{ github.event_name == 'workflow_dispatch' }}
type=raw,value=latest,enable=${{ github.event_name == 'workflow_dispatch' }}
- name: Build and push image
uses: docker/build-push-action@v6.12.0
uses: docker/build-push-action@v5.3.0
with:
file: cli/Dockerfile
platforms: linux/amd64,linux/arm64
push: ${{ github.event_name == 'release' }}
push: ${{ github.event_name == 'workflow_dispatch' }}
cache-from: type=gha
cache-to: type=gha,mode=max
tags: ${{ steps.metadata.outputs.tags }}

View File

@@ -22,7 +22,7 @@ concurrency:
jobs:
cleanup-images:
name: Cleanup Stale Images Tags for ${{ matrix.primary-name }}
runs-on: ubuntu-24.04
runs-on: ubuntu-22.04
strategy:
fail-fast: false
matrix:
@@ -35,7 +35,7 @@ jobs:
steps:
- name: Clean temporary images
if: "${{ env.TOKEN != '' }}"
uses: stumpylog/image-cleaner-action/ephemeral@v0.9.0
uses: stumpylog/image-cleaner-action/ephemeral@v0.7.0
with:
token: "${{ env.TOKEN }}"
owner: "immich-app"
@@ -48,7 +48,7 @@ jobs:
cleanup-untagged-images:
name: Cleanup Untagged Images Tags for ${{ matrix.primary-name }}
runs-on: ubuntu-24.04
runs-on: ubuntu-22.04
needs:
- cleanup-images
strategy:
@@ -64,7 +64,7 @@ jobs:
steps:
- name: Clean untagged images
if: "${{ env.TOKEN != '' }}"
uses: stumpylog/image-cleaner-action/untagged@v0.9.0
uses: stumpylog/image-cleaner-action/untagged@v0.7.0
with:
token: "${{ env.TOKEN }}"
owner: "immich-app"

View File

@@ -17,206 +17,56 @@ permissions:
packages: write
jobs:
pre-job:
build_and_push:
name: Build and Push
runs-on: ubuntu-latest
outputs:
should_run_server: ${{ steps.found_paths.outputs.server == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run_ml: ${{ steps.found_paths.outputs.machine-learning == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- id: found_paths
uses: dorny/paths-filter@v3
with:
filters: |
server:
- 'server/**'
- 'openapi/**'
- 'web/**'
- 'i18n/**'
machine-learning:
- 'machine-learning/**'
- name: Check if we should force jobs to run
id: should_force
run: echo "should_force=${{ github.event_name == 'workflow_dispatch' || github.event_name == 'release' }}" >> "$GITHUB_OUTPUT"
retag_ml:
name: Re-Tag ML
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_ml == 'false' && !github.event.pull_request.head.repo.fork }}
runs-on: ubuntu-latest
strategy:
matrix:
suffix: ["", "-cuda", "-openvino", "-armnn"]
steps:
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Re-tag image
run: |
REGISTRY_NAME="ghcr.io"
REPOSITORY=${{ github.repository_owner }}/immich-machine-learning
TAG_OLD=main${{ matrix.suffix }}
TAG_NEW=${{ github.event.number == 0 && github.ref_name || format('pr-{0}', github.event.number) }}${{ matrix.suffix }}
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_NEW $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
retag_server:
name: Re-Tag Server
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_server == 'false' && !github.event.pull_request.head.repo.fork }}
runs-on: ubuntu-latest
strategy:
matrix:
suffix: [""]
steps:
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Re-tag image
run: |
REGISTRY_NAME="ghcr.io"
REPOSITORY=${{ github.repository_owner }}/immich-server
TAG_OLD=main${{ matrix.suffix }}
TAG_NEW=${{ github.event.number == 0 && github.ref_name || format('pr-{0}', github.event.number) }}${{ matrix.suffix }}
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_NEW $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
build_and_push_ml:
name: Build and Push ML
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_ml == 'true' }}
runs-on: ubuntu-latest
env:
image: immich-machine-learning
context: machine-learning
file: machine-learning/Dockerfile
strategy:
# Prevent a failure in one image from stopping the other builds
fail-fast: false
matrix:
include:
- platforms: linux/amd64,linux/arm64
- image: immich-machine-learning
context: machine-learning
file: machine-learning/Dockerfile
platforms: linux/amd64,linux/arm64
device: cpu
- platforms: linux/amd64
- image: immich-machine-learning
context: machine-learning
file: machine-learning/Dockerfile
platforms: linux/amd64
device: cuda
suffix: -cuda
- platforms: linux/amd64
- image: immich-machine-learning
context: machine-learning
file: machine-learning/Dockerfile
platforms: linux/amd64
device: openvino
suffix: -openvino
- platforms: linux/arm64
- image: immich-machine-learning
context: machine-learning
file: machine-learning/Dockerfile
platforms: linux/arm64
device: armnn
suffix: -armnn
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3.3.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3.8.0
- name: Login to Docker Hub
# Only push to Docker Hub when making a release
if: ${{ github.event_name == 'release' }}
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
# Skip when PR from a fork
if: ${{ !github.event.pull_request.head.repo.fork }}
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Generate docker image tags
id: metadata
uses: docker/metadata-action@v5
with:
flavor: |
# Disable latest tag
latest=false
images: |
name=ghcr.io/${{ github.repository_owner }}/${{env.image}}
name=altran1502/${{env.image}},enable=${{ github.event_name == 'release' }}
tags: |
# Tag with branch name
type=ref,event=branch,suffix=${{ matrix.suffix }}
# Tag with pr-number
type=ref,event=pr,suffix=${{ matrix.suffix }}
# Tag with git tag on release
type=ref,event=tag,suffix=${{ matrix.suffix }}
type=raw,value=release,enable=${{ github.event_name == 'release' }},suffix=${{ matrix.suffix }}
- name: Determine build cache output
id: cache-target
run: |
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
# Essentially just ignore the cache output (PR can't write to registry cache)
echo "cache-to=type=local,dest=/tmp/discard,ignore-error=true" >> $GITHUB_OUTPUT
else
echo "cache-to=type=registry,mode=max,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{ env.image }}" >> $GITHUB_OUTPUT
fi
- name: Build and push image
uses: docker/build-push-action@v6.12.0
with:
context: ${{ env.context }}
file: ${{ env.file }}
platforms: ${{ matrix.platforms }}
# Skip pushing when PR from a fork
push: ${{ !github.event.pull_request.head.repo.fork }}
cache-from: type=registry,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{env.image}}
cache-to: ${{ steps.cache-target.outputs.cache-to }}
tags: ${{ steps.metadata.outputs.tags }}
labels: ${{ steps.metadata.outputs.labels }}
build-args: |
DEVICE=${{ matrix.device }}
BUILD_ID=${{ github.run_id }}
BUILD_IMAGE=${{ github.event_name == 'release' && github.ref_name || steps.metadata.outputs.tags }}
BUILD_SOURCE_REF=${{ github.ref_name }}
BUILD_SOURCE_COMMIT=${{ github.sha }}
build_and_push_server:
name: Build and Push Server
runs-on: ubuntu-latest
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_server == 'true' }}
env:
image: immich-server
context: .
file: server/Dockerfile
strategy:
fail-fast: false
matrix:
include:
- platforms: linux/amd64,linux/arm64
- image: immich-server
context: .
file: server/Dockerfile
platforms: linux/amd64,linux/arm64
device: cpu
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3.3.0
uses: docker/setup-qemu-action@v3.0.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3.8.0
uses: docker/setup-buildx-action@v3.3.0
- name: Login to Docker Hub
# Only push to Docker Hub when making a release
@@ -243,8 +93,8 @@ jobs:
# Disable latest tag
latest=false
images: |
name=ghcr.io/${{ github.repository_owner }}/${{env.image}}
name=altran1502/${{env.image}},enable=${{ github.event_name == 'release' }}
name=ghcr.io/${{ github.repository_owner }}/${{matrix.image}}
name=altran1502/${{matrix.image}},enable=${{ github.event_name == 'release' }}
tags: |
# Tag with branch name
type=ref,event=branch,suffix=${{ matrix.suffix }}
@@ -261,50 +111,20 @@ jobs:
# Essentially just ignore the cache output (PR can't write to registry cache)
echo "cache-to=type=local,dest=/tmp/discard,ignore-error=true" >> $GITHUB_OUTPUT
else
echo "cache-to=type=registry,mode=max,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{ env.image }}" >> $GITHUB_OUTPUT
echo "cache-to=type=registry,mode=max,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{ matrix.image }}" >> $GITHUB_OUTPUT
fi
- name: Build and push image
uses: docker/build-push-action@v6.12.0
uses: docker/build-push-action@v5.3.0
with:
context: ${{ env.context }}
file: ${{ env.file }}
context: ${{ matrix.context }}
file: ${{ matrix.file }}
platforms: ${{ matrix.platforms }}
# Skip pushing when PR from a fork
push: ${{ !github.event.pull_request.head.repo.fork }}
cache-from: type=registry,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{env.image}}
cache-from: type=registry,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{matrix.image}}
cache-to: ${{ steps.cache-target.outputs.cache-to }}
tags: ${{ steps.metadata.outputs.tags }}
labels: ${{ steps.metadata.outputs.labels }}
build-args: |
DEVICE=${{ matrix.device }}
BUILD_ID=${{ github.run_id }}
BUILD_IMAGE=${{ github.event_name == 'release' && github.ref_name || steps.metadata.outputs.tags }}
BUILD_SOURCE_REF=${{ github.ref_name }}
BUILD_SOURCE_COMMIT=${{ github.sha }}
success-check-server:
name: Docker Build & Push Server Success
needs: [build_and_push_server, retag_server]
runs-on: ubuntu-latest
if: always()
steps:
- name: Any jobs failed?
if: ${{ contains(needs.*.result, 'failure') }}
run: exit 1
- name: All jobs passed or skipped
if: ${{ !(contains(needs.*.result, 'failure')) }}
run: echo "All jobs passed or skipped" && echo "${{ toJSON(needs.*.result) }}"
success-check-ml:
name: Docker Build & Push ML Success
needs: [build_and_push_ml, retag_ml]
runs-on: ubuntu-latest
if: always()
steps:
- name: Any jobs failed?
if: ${{ contains(needs.*.result, 'failure') }}
run: exit 1
- name: All jobs passed or skipped
if: ${{ !(contains(needs.*.result, 'failure')) }}
run: echo "All jobs passed or skipped" && echo "${{ toJSON(needs.*.result) }}"
tags: ${{ steps.metadata.outputs.tags }}
labels: ${{ steps.metadata.outputs.labels }}

View File

@@ -2,8 +2,12 @@ name: Docs build
on:
push:
branches: [main]
paths:
- "docs/**"
pull_request:
branches: [main]
paths:
- "docs/**"
release:
types: [published]
@@ -12,27 +16,7 @@ concurrency:
cancel-in-progress: true
jobs:
pre-job:
runs-on: ubuntu-latest
outputs:
should_run: ${{ steps.found_paths.outputs.docs == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- id: found_paths
uses: dorny/paths-filter@v3
with:
filters: |
docs:
- 'docs/**'
- name: Check if we should force jobs to run
id: should_force
run: echo "should_force=${{ github.event_name == 'release' || github.ref_name == 'main' }}" >> "$GITHUB_OUTPUT"
build:
name: Docs Build
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run == 'true' }}
runs-on: ubuntu-latest
defaults:
run:
@@ -42,11 +26,6 @@ jobs:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version-file: './docs/.nvmrc'
- name: Run npm install
run: npm ci

View File

@@ -7,32 +7,13 @@ on:
jobs:
checks:
name: Docs Deploy Checks
runs-on: ubuntu-latest
outputs:
parameters: ${{ steps.parameters.outputs.result }}
artifact: ${{ steps.get-artifact.outputs.result }}
steps:
- if: ${{ github.event.workflow_run.conclusion != 'success' }}
run: echo 'The triggering workflow did not succeed' && exit 1
- name: Get artifact
id: get-artifact
uses: actions/github-script@v7
with:
script: |
let allArtifacts = await github.rest.actions.listWorkflowRunArtifacts({
owner: context.repo.owner,
repo: context.repo.repo,
run_id: context.payload.workflow_run.id,
});
let matchArtifact = allArtifacts.data.artifacts.filter((artifact) => {
return artifact.name == "docs-build-output"
})[0];
if (!matchArtifact) {
console.log("No artifact found with the name docs-build-output, build job was skipped")
return { found: false };
}
return { found: true, id: matchArtifact.id };
- if: ${{ github.event.workflow_run.conclusion == 'failure' }}
run: echo 'The triggering workflow failed' && exit 1
- name: Determine deploy parameters
id: parameters
uses: actions/github-script@v7
@@ -92,10 +73,9 @@ jobs:
return parameters;
deploy:
name: Docs Deploy
runs-on: ubuntu-latest
needs: checks
if: ${{ fromJson(needs.checks.outputs.artifact).found && fromJson(needs.checks.outputs.parameters).shouldDeploy }}
if: ${{ fromJson(needs.checks.outputs.parameters).shouldDeploy }}
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -118,11 +98,18 @@ jobs:
uses: actions/github-script@v7
with:
script: |
let artifact = ${{ needs.checks.outputs.artifact }};
let allArtifacts = await github.rest.actions.listWorkflowRunArtifacts({
owner: context.repo.owner,
repo: context.repo.repo,
run_id: context.payload.workflow_run.id,
});
let matchArtifact = allArtifacts.data.artifacts.filter((artifact) => {
return artifact.name == "docs-build-output"
})[0];
let download = await github.rest.actions.downloadArtifact({
owner: context.repo.owner,
repo: context.repo.repo,
artifact_id: artifact.id,
artifact_id: matchArtifact.id,
archive_format: 'zip',
});
let fs = require('fs');

View File

@@ -5,7 +5,6 @@ on:
jobs:
deploy:
name: Docs Destroy
runs-on: ubuntu-latest
steps:
- name: Checkout code
@@ -23,7 +22,7 @@ jobs:
tg_version: "0.58.12"
tofu_version: "1.7.1"
tg_dir: "deployment/modules/cloudflare/docs"
tg_command: "destroy -refresh=false"
tg_command: "destroy"
- name: Comment
uses: actions-cool/maintain-one-comment@v3

View File

@@ -1,52 +0,0 @@
name: Fix formatting
on:
pull_request:
types: [labeled]
jobs:
fix-formatting:
runs-on: ubuntu-latest
if: ${{ github.event.label.name == 'fix:formatting' }}
permissions:
pull-requests: write
steps:
- name: Generate a token
id: generate-token
uses: actions/create-github-app-token@v1
with:
app-id: ${{ secrets.PUSH_O_MATIC_APP_ID }}
private-key: ${{ secrets.PUSH_O_MATIC_APP_KEY }}
- name: 'Checkout'
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.ref }}
token: ${{ steps.generate-token.outputs.token }}
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version-file: './server/.nvmrc'
- name: Fix formatting
run: make install-all && make format-all
- name: Commit and push
uses: EndBug/add-and-commit@v9
with:
default_author: github_actions
message: 'chore: fix formatting'
- name: Remove label
uses: actions/github-script@v7
if: always()
with:
script: |
github.rest.issues.removeLabel({
issue_number: context.payload.pull_request.number,
owner: context.repo.owner,
repo: context.repo.repo,
name: 'fix:formatting'
})

View File

@@ -1,22 +0,0 @@
name: PR Label Validation
on:
pull_request_target:
types: [opened, labeled, unlabeled, synchronize]
jobs:
validate-release-label:
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- name: Require PR to have a changelog label
uses: mheap/github-action-required-labels@v5
with:
mode: exactly
count: 1
use_regex: true
labels: "changelog:.*"
add_comment: true
message: "Label error. Requires {{errorString}} {{count}} of: {{ provided }}. Found: {{ applied }}. A maintainer will add the required label."

View File

@@ -1,12 +0,0 @@
name: "Pull Request Labeler"
on:
- pull_request_target
jobs:
labeler:
permissions:
contents: read
pull-requests: write
runs-on: ubuntu-latest
steps:
- uses: actions/labeler@v5

View File

@@ -9,7 +9,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: PR Conventional Commit Validation
uses: ytanikin/PRConventionalCommits@1.3.0
uses: ytanikin/PRConventionalCommits@1.2.0
with:
task_types: '["feat","fix","docs","test","ci","refactor","perf","chore","revert"]'
add_label: 'false'

13
.github/workflows/pr-require-label.yml vendored Normal file
View File

@@ -0,0 +1,13 @@
name: Enforce PR labels
on:
pull_request:
types: [labeled, unlabeled, opened, edited, synchronize]
jobs:
enforce-label:
name: Enforce label
runs-on: ubuntu-latest
steps:
- if: toJson(github.event.pull_request.labels) == '[]'
run: exit 1

View File

@@ -29,17 +29,10 @@ jobs:
ref: ${{ steps.push-tag.outputs.commit_long_sha }}
steps:
- name: Generate a token
id: generate-token
uses: actions/create-github-app-token@v1
with:
app-id: ${{ secrets.PUSH_O_MATIC_APP_ID }}
private-key: ${{ secrets.PUSH_O_MATIC_APP_KEY }}
- name: Checkout
uses: actions/checkout@v4
with:
token: ${{ steps.generate-token.outputs.token }}
token: ${{ secrets.ORG_RELEASE_TOKEN }}
- name: Install Poetry
run: pipx install poetry
@@ -51,8 +44,10 @@ jobs:
id: push-tag
uses: EndBug/add-and-commit@v9
with:
default_author: github_actions
message: 'chore: version ${{ env.IMMICH_VERSION }}'
author_name: Alex The Bot
author_email: alex.tran1502@gmail.com
default_author: user_info
message: 'Version ${{ env.IMMICH_VERSION }}'
tag: ${{ env.IMMICH_VERSION }}
push: true
@@ -68,17 +63,10 @@ jobs:
needs: build_mobile
steps:
- name: Generate a token
id: generate-token
uses: actions/create-github-app-token@v1
with:
app-id: ${{ secrets.PUSH_O_MATIC_APP_ID }}
private-key: ${{ secrets.PUSH_O_MATIC_APP_KEY }}
- name: Checkout
uses: actions/checkout@v4
with:
token: ${{ steps.generate-token.outputs.token }}
token: ${{ secrets.ORG_RELEASE_TOKEN }}
- name: Download APK
uses: actions/download-artifact@v4

View File

@@ -19,7 +19,7 @@ jobs:
# Setup .npmrc file to publish to npm
- uses: actions/setup-node@v4
with:
node-version-file: './open-api/typescript-sdk/.nvmrc'
node-version: '20.x'
registry-url: 'https://registry.npmjs.org'
- name: Install deps
run: npm ci

View File

@@ -10,27 +10,8 @@ concurrency:
cancel-in-progress: true
jobs:
pre-job:
runs-on: ubuntu-latest
outputs:
should_run: ${{ steps.found_paths.outputs.mobile == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- id: found_paths
uses: dorny/paths-filter@v3
with:
filters: |
mobile:
- 'mobile/**'
- name: Check if we should force jobs to run
id: should_force
run: echo "should_force=${{ github.event_name == 'release' }}" >> "$GITHUB_OUTPUT"
mobile-dart-analyze:
name: Run Dart Code Analysis
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run == 'true' }}
runs-on: ubuntu-latest
@@ -56,10 +37,6 @@ jobs:
run: dart format lib/ --set-exit-if-changed
working-directory: ./mobile
- name: Run dart custom_lint
run: dart run custom_lint
working-directory: ./mobile
# Enable after riverpod generator migration is completed
# - name: Run dart custom lint
# run: dart run custom_lint

View File

@@ -10,48 +10,8 @@ concurrency:
cancel-in-progress: true
jobs:
pre-job:
runs-on: ubuntu-latest
outputs:
should_run_web: ${{ steps.found_paths.outputs.web == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run_server: ${{ steps.found_paths.outputs.server == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run_cli: ${{ steps.found_paths.outputs.cli == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run_e2e: ${{ steps.found_paths.outputs.e2e == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run_mobile: ${{ steps.found_paths.outputs.mobile == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run_ml: ${{ steps.found_paths.outputs.machine-learning == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run_e2e_web: ${{ steps.found_paths.outputs.e2e == 'true' || steps.found_paths.outputs.web == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run_e2e_server_cli: ${{ steps.found_paths.outputs.e2e == 'true' || steps.found_paths.outputs.server == 'true' || steps.found_paths.outputs.cli == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- id: found_paths
uses: dorny/paths-filter@v3
with:
filters: |
web:
- 'web/**'
- 'i18n/**'
- 'open-api/typescript-sdk/**'
server:
- 'server/**'
cli:
- 'cli/**'
- 'open-api/typescript-sdk/**'
e2e:
- 'e2e/**'
mobile:
- 'mobile/**'
machine-learning:
- 'machine-learning/**'
- name: Check if we should force jobs to run
id: should_force
run: echo "should_force=${{ github.event_name == 'workflow_dispatch' }}" >> "$GITHUB_OUTPUT"
server-unit-tests:
name: Test & Lint Server
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_server == 'true' }}
name: Server
runs-on: ubuntu-latest
defaults:
run:
@@ -61,11 +21,6 @@ jobs:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version-file: './server/.nvmrc'
- name: Run npm install
run: npm ci
@@ -81,14 +36,12 @@ jobs:
run: npm run check
if: ${{ !cancelled() }}
- name: Run small tests & coverage
- name: Run unit tests & coverage
run: npm run test:cov
if: ${{ !cancelled() }}
cli-unit-tests:
name: Unit Test CLI
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_cli == 'true' }}
name: CLI
runs-on: ubuntu-latest
defaults:
run:
@@ -101,7 +54,7 @@ jobs:
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version-file: './cli/.nvmrc'
node-version: 20
- name: Setup typescript-sdk
run: npm ci && npm run build
@@ -126,44 +79,8 @@ jobs:
run: npm run test:cov
if: ${{ !cancelled() }}
cli-unit-tests-win:
name: Unit Test CLI (Windows)
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_cli == 'true' }}
runs-on: windows-latest
defaults:
run:
working-directory: ./cli
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version-file: './cli/.nvmrc'
- name: Setup typescript-sdk
run: npm ci && npm run build
working-directory: ./open-api/typescript-sdk
- name: Install deps
run: npm ci
# Skip linter & formatter in Windows test.
- name: Run tsc
run: npm run check
if: ${{ !cancelled() }}
- name: Run unit tests & coverage
run: npm run test:cov
if: ${{ !cancelled() }}
web-unit-tests:
name: Test & Lint Web
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_web == 'true' }}
name: Web
runs-on: ubuntu-latest
defaults:
run:
@@ -173,11 +90,6 @@ jobs:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version-file: './web/.nvmrc'
- name: Run setup typescript-sdk
run: npm ci && npm run build
working-directory: ./open-api/typescript-sdk
@@ -205,10 +117,8 @@ jobs:
run: npm run test:cov
if: ${{ !cancelled() }}
e2e-tests-lint:
name: End-to-End Lint
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_e2e == 'true' }}
e2e-tests:
name: End-to-End Tests
runs-on: ubuntu-latest
defaults:
run:
@@ -217,17 +127,24 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
submodules: 'recursive'
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version-file: './e2e/.nvmrc'
node-version: 20
- name: Run setup typescript-sdk
run: npm ci && npm run build
working-directory: ./open-api/typescript-sdk
if: ${{ !cancelled() }}
- name: Run setup cli
run: npm ci && npm run build
working-directory: ./cli
if: ${{ !cancelled() }}
- name: Install dependencies
run: npm ci
if: ${{ !cancelled() }}
@@ -244,58 +161,8 @@ jobs:
run: npm run check
if: ${{ !cancelled() }}
medium-tests-server:
name: Medium Tests (Server)
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_server == 'true' }}
runs-on: mich
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
submodules: 'recursive'
- name: Production build
if: ${{ !cancelled() }}
run: docker compose -f e2e/docker-compose.yml build
- name: Run medium tests
if: ${{ !cancelled() }}
run: make test-medium
e2e-tests-server-cli:
name: End-to-End Tests (Server & CLI)
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_e2e_server_cli == 'true' }}
runs-on: mich
defaults:
run:
working-directory: ./e2e
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
submodules: 'recursive'
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version-file: './e2e/.nvmrc'
- name: Run setup typescript-sdk
run: npm ci && npm run build
working-directory: ./open-api/typescript-sdk
if: ${{ !cancelled() }}
- name: Run setup cli
run: npm ci && npm run build
working-directory: ./cli
if: ${{ !cancelled() }}
- name: Install dependencies
run: npm ci
- name: Install Playwright Browsers
run: npx playwright install --with-deps chromium
if: ${{ !cancelled() }}
- name: Docker build
@@ -306,51 +173,12 @@ jobs:
run: npm run test
if: ${{ !cancelled() }}
e2e-tests-web:
name: End-to-End Tests (Web)
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_e2e_web == 'true' }}
runs-on: mich
defaults:
run:
working-directory: ./e2e
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
submodules: 'recursive'
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version-file: './e2e/.nvmrc'
- name: Run setup typescript-sdk
run: npm ci && npm run build
working-directory: ./open-api/typescript-sdk
if: ${{ !cancelled() }}
- name: Install dependencies
run: npm ci
if: ${{ !cancelled() }}
- name: Install Playwright Browsers
run: npx playwright install --with-deps chromium
if: ${{ !cancelled() }}
- name: Docker build
run: docker compose build
if: ${{ !cancelled() }}
- name: Run e2e tests (web)
run: npx playwright test
if: ${{ !cancelled() }}
mobile-unit-tests:
name: Unit Test Mobile
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_mobile == 'true' }}
name: Mobile
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
@@ -364,9 +192,7 @@ jobs:
run: flutter test -j 1
ml-unit-tests:
name: Unit Test ML
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_ml == 'true' }}
name: Machine Learning
runs-on: ubuntu-latest
defaults:
run:
@@ -415,11 +241,6 @@ jobs:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version-file: './server/.nvmrc'
- name: Install server dependencies
run: npm --prefix=server ci
@@ -470,11 +291,6 @@ jobs:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version-file: './server/.nvmrc'
- name: Install server dependencies
run: npm ci

2
.gitignore vendored
View File

@@ -21,5 +21,3 @@ mobile/openapi/.openapi-generator/FILES
open-api/typescript-sdk/build
mobile/android/fastlane/report.xml
mobile/ios/fastlane/report.xml
vite.config.js.timestamp-*

2
.gitmodules vendored
View File

@@ -1,6 +1,6 @@
[submodule "mobile/.isar"]
path = mobile/.isar
url = https://github.com/isar/isar
[submodule "e2e/test-assets"]
[submodule "server/test/assets"]
path = e2e/test-assets
url = https://github.com/immich-app/test-assets

8
.vscode/launch.json vendored
View File

@@ -5,8 +5,8 @@
"type": "node",
"request": "attach",
"restart": true,
"port": 9231,
"name": "Immich API Server",
"port": 9230,
"name": "Immich Server",
"remoteRoot": "/usr/src/app",
"localRoot": "${workspaceFolder}/server"
},
@@ -14,8 +14,8 @@
"type": "node",
"request": "attach",
"restart": true,
"port": 9230,
"name": "Immich Workers",
"port": 9231,
"name": "Immich Microservices",
"remoteRoot": "/usr/src/app",
"localRoot": "${workspaceFolder}/server"
}

View File

@@ -41,4 +41,4 @@
"explorer.fileNesting.patterns": {
"*.ts": "${capture}.spec.ts,${capture}.mock.ts"
}
}
}

View File

@@ -131,4 +131,4 @@ conduct enforcement ladder](https://github.com/mozilla/diversity).
For answers to common questions about this code of conduct, see the
FAQ at https://www.contributor-covenant.org/faq. Translations are
available at https://www.contributor-covenant.org/translations.
available at https://www.contributor-covenant.org/translations.

View File

@@ -10,6 +10,12 @@ dev-update:
dev-scale:
docker compose -f ./docker/docker-compose.dev.yml up --build -V --scale immich-server=3 --remove-orphans
stage:
docker compose -f ./docker/docker-compose.staging.yml up --build -V --remove-orphans
pull-stage:
docker compose -f ./docker/docker-compose.staging.yml pull
.PHONY: e2e
e2e:
docker compose -f ./e2e/docker-compose.yml up --build -V --remove-orphans
@@ -35,61 +41,3 @@ sql:
attach-server:
docker exec -it docker_immich-server_1 sh
renovate:
LOG_LEVEL=debug npx renovate --platform=local --repository-cache=reset
MODULES = e2e server web cli sdk docs
audit-%:
npm --prefix $(subst sdk,open-api/typescript-sdk,$*) audit fix
install-%:
npm --prefix $(subst sdk,open-api/typescript-sdk,$*) i
build-cli: build-sdk
build-web: build-sdk
build-%: install-%
npm --prefix $(subst sdk,open-api/typescript-sdk,$*) run build
format-%:
npm --prefix $* run format:fix
lint-%:
npm --prefix $* run lint:fix
check-%:
npm --prefix $* run check
check-web:
npm --prefix web run check:typescript
npm --prefix web run check:svelte
test-%:
npm --prefix $* run test
test-e2e:
docker compose -f ./e2e/docker-compose.yml build
npm --prefix e2e run test
npm --prefix e2e run test:web
test-medium:
docker run \
--rm \
-v ./server/src:/usr/src/app/src \
-v ./server/test:/usr/src/app/test \
-v ./server/vitest.config.medium.mjs:/usr/src/app/vitest.config.medium.mjs \
-v ./server/tsconfig.json:/usr/src/app/tsconfig.json \
-e NODE_ENV=development \
immich-server:latest \
-c "npm ci && npm run test:medium -- --run"
test-medium-dev:
docker exec -it immich_server /bin/sh -c "npm run test:medium"
build-all: $(foreach M,$(filter-out e2e,$(MODULES)),build-$M) ;
install-all: $(foreach M,$(MODULES),install-$M) ;
check-all: $(foreach M,$(filter-out sdk cli docs,$(MODULES)),check-$M) ;
lint-all: $(foreach M,$(filter-out sdk docs,$(MODULES)),lint-$M) ;
format-all: $(foreach M,$(filter-out sdk,$(MODULES)),format-$M) ;
audit-all: $(foreach M,$(MODULES),audit-$M) ;
hygiene-all: lint-all format-all check-all sql audit-all;
test-all: $(foreach M,$(filter-out sdk docs,$(MODULES)),test-$M) ;
clean:
find . -name "node_modules" -type d -prune -exec rm -rf '{}' +
find . -name "dist" -type d -prune -exec rm -rf '{}' +
find . -name "build" -type d -prune -exec rm -rf '{}' +
find . -name "svelte-kit" -type d -prune -exec rm -rf '{}' +
docker compose -f ./docker/docker-compose.dev.yml rm -v -f || true
docker compose -f ./e2e/docker-compose.yml rm -v -f || true

View File

@@ -1,7 +1,7 @@
<p align="center">
<br/>
<a href="https://opensource.org/license/agpl-v3"><img src="https://img.shields.io/badge/License-AGPL_v3-blue.svg?color=3F51B5&style=for-the-badge&label=License&logoColor=000000&labelColor=ececec" alt="License: AGPLv3"></a>
<a href="https://discord.immich.app">
<a href="https://discord.gg/D8JsnBEuKb">
<img src="https://img.shields.io/discord/979116623879368755.svg?label=Discord&logo=Discord&style=for-the-badge&logoColor=000000&labelColor=ececec" alt="Discord"/>
</a>
<br/>
@@ -17,8 +17,8 @@
<img src="design/immich-screenshots.png" title="Main Screenshot">
</a>
<br/>
<p align="center">
<a href="readme_i18n/README_ca_ES.md">Català</a>
<a href="readme_i18n/README_es_ES.md">Español</a>
<a href="readme_i18n/README_fr_FR.md">Français</a>
@@ -29,13 +29,9 @@
<a href="readme_i18n/README_nl_NL.md">Nederlands</a>
<a href="readme_i18n/README_tr_TR.md">Türkçe</a>
<a href="readme_i18n/README_zh_CN.md">中文</a>
<a href="readme_i18n/README_uk_UA.md">Українська</a>
<a href="readme_i18n/README_ru_RU.md">Русский</a>
<a href="readme_i18n/README_pt_BR.md">Português Brasileiro</a>
<a href="readme_i18n/README_sv_SE.md">Svenska</a>
<a href="readme_i18n/README_ar_JO.md">العربية</a>
<a href="readme_i18n/README_vi_VN.md">Tiếng Việt</a>
<a href="readme_i18n/README_th_TH.md">ภาษาไทย</a>
</p>
## Disclaimer
@@ -45,36 +41,45 @@
- ⚠️ **Do not use the app as the only way to store your photos and videos.**
- ⚠️ Always follow [3-2-1](https://www.backblaze.com/blog/the-3-2-1-backup-strategy/) backup plan for your precious photos and videos!
> [!NOTE]
> You can find the main documentation, including installation guides, at https://immich.app/.
## Content
## Links
- [Documentation](https://immich.app/docs)
- [About](https://immich.app/docs/overview/introduction)
- [Installation](https://immich.app/docs/install/requirements)
- [Roadmap](https://immich.app/roadmap)
- [Official Documentation](https://immich.app/docs)
- [Roadmap](https://github.com/orgs/immich-app/projects/1)
- [Demo](#demo)
- [Features](#features)
- [Translations](https://immich.app/docs/developer/translations)
- [Contributing](https://immich.app/docs/overview/support-the-project)
- [Introduction](https://immich.app/docs/overview/introduction)
- [Installation](https://immich.app/docs/install/requirements)
- [Contribution Guidelines](https://immich.app/docs/overview/support-the-project)
## Documentation
You can find the main documentation, including installation guides, at https://immich.app/.
## Demo
Access the demo [here](https://demo.immich.app). The demo is running on a Free-tier Oracle VM in Amsterdam with a 2.4Ghz quad-core ARM64 CPU and 24GB RAM.
You can access the web demo at https://demo.immich.app
For the mobile app, you can use `https://demo.immich.app/api` for the `Server Endpoint URL`
### Login credentials
```bash title="Demo Credential"
The credential
email: demo@immich.app
password: demo
```
| Email | Password |
| --------------- | -------- |
| demo@immich.app | demo |
```
Spec: Free-tier Oracle VM - Amsterdam - 2.4Ghz quad-core ARM64 CPU, 24GB RAM
```
## Activities
![Activities](https://repobeats.axiom.co/api/embed/9e86d9dc3ddd137161f2f6d2e758d7863b1789cb.svg "Repobeats analytics image")
## Features
| Features | Mobile | Web |
| :------------------------------------------- | ------ | --- |
| :--------------------------------------------- | -------- | ----- |
| Upload and view videos and photos | Yes | Yes |
| Auto backup when the app is opened | Yes | N/A |
| Prevent duplication of assets | Yes | Yes |
@@ -94,7 +99,7 @@ For the mobile app, you can use `https://demo.immich.app/api` for the `Server En
| LivePhoto/MotionPhoto backup and playback | Yes | Yes |
| Support 360 degree image display | No | Yes |
| User-defined storage structure | Yes | Yes |
| Public Sharing | Yes | Yes |
| Public Sharing | No | Yes |
| Archive and Favorites | Yes | Yes |
| Global Map | Yes | Yes |
| Partner Sharing | Yes | Yes |
@@ -103,22 +108,14 @@ For the mobile app, you can use `https://demo.immich.app/api` for the `Server En
| Offline support | Yes | No |
| Read-only gallery | Yes | Yes |
| Stacked Photos | Yes | Yes |
| Tags | No | Yes |
| Folder View | No | Yes |
## Translations
## Contributors
Read more about translations [here](https://immich.app/docs/developer/translations).
<a href="https://hosted.weblate.org/engage/immich/">
<img src="https://hosted.weblate.org/widget/immich/immich/multi-auto.svg" alt="Translation status" />
<a href="https://github.com/alextran1502/immich/graphs/contributors">
<img src="https://contrib.rocks/image?repo=immich-app/immich" width="100%"/>
</a>
## Repository activity
![Activities](https://repobeats.axiom.co/api/embed/9e86d9dc3ddd137161f2f6d2e758d7863b1789cb.svg "Repobeats analytics image")
## Star history
## Star History
<a href="https://star-history.com/#immich-app/immich&Date">
<picture>
@@ -127,9 +124,3 @@ Read more about translations [here](https://immich.app/docs/developer/translatio
<img alt="Star History Chart" src="https://api.star-history.com/svg?repos=immich-app/immich&type=Date" width="100%" />
</picture>
</a>
## Contributors
<a href="https://github.com/alextran1502/immich/graphs/contributors">
<img src="https://contrib.rocks/image?repo=immich-app/immich" width="100%"/>
</a>

View File

@@ -2,4 +2,4 @@
## Reporting a Vulnerability
Please report security issues to `security@immich.app`
Please report security issues to `alex.tran1502@gmail.com`

1
cli/.eslintignore Normal file
View File

@@ -0,0 +1 @@
/dist

28
cli/.eslintrc.cjs Normal file
View File

@@ -0,0 +1,28 @@
module.exports = {
parser: '@typescript-eslint/parser',
parserOptions: {
project: 'tsconfig.json',
sourceType: 'module',
tsconfigRootDir: __dirname,
},
plugins: ['@typescript-eslint/eslint-plugin'],
extends: ['plugin:@typescript-eslint/recommended', 'plugin:prettier/recommended', 'plugin:unicorn/recommended'],
root: true,
env: {
node: true,
},
ignorePatterns: ['.eslintrc.js'],
rules: {
'@typescript-eslint/interface-name-prefix': 'off',
'@typescript-eslint/explicit-function-return-type': 'off',
'@typescript-eslint/explicit-module-boundary-types': 'off',
'@typescript-eslint/no-explicit-any': 'off',
'@typescript-eslint/no-floating-promises': 'error',
'unicorn/prefer-module': 'off',
'unicorn/prevent-abbreviations': 'off',
'unicorn/no-process-exit': 'off',
'unicorn/import-style': 'off',
curly: 2,
'prettier/prettier': 0,
},
};

View File

@@ -1 +1 @@
22.13.1
20.14

View File

@@ -1,4 +1,4 @@
FROM node:22.13.1-alpine3.20@sha256:c52e20859a92b3eccbd3a36c5e1a90adc20617d8d421d65e8a622e87b5dac963 AS core
FROM node:20-alpine3.19@sha256:696ae41fb5880949a15ade7879a2deae93b3f0723f757bdb5b8a9e4a744ce27f as core
WORKDIR /usr/src/open-api/typescript-sdk
COPY open-api/typescript-sdk/package*.json open-api/typescript-sdk/tsconfig*.json ./
@@ -16,4 +16,4 @@ RUN npm run build
WORKDIR /import
ENTRYPOINT ["node", "/usr/src/app/dist"]
ENTRYPOINT ["node", "/usr/src/app/dist"]

View File

@@ -4,18 +4,8 @@ Please see the [Immich CLI documentation](https://immich.app/docs/features/comma
# For developers
Before building the CLI, you must build the immich server and the open-api client. To build the server run the following in the server folder:
$ npm install
$ npm run build
Then, to build the open-api client run the following in the open-api folder:
$ ./bin/generate-open-api.sh
To run the Immich CLI from source, run the following in the cli folder:
$ npm install
$ npm run build
$ ts-node .
@@ -27,4 +17,3 @@ You can also build and install the CLI using
$ npm run build
$ npm install -g .
****

View File

@@ -1,61 +0,0 @@
import { FlatCompat } from '@eslint/eslintrc';
import js from '@eslint/js';
import typescriptEslint from '@typescript-eslint/eslint-plugin';
import tsParser from '@typescript-eslint/parser';
import globals from 'globals';
import path from 'node:path';
import { fileURLToPath } from 'node:url';
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
const compat = new FlatCompat({
baseDirectory: __dirname,
recommendedConfig: js.configs.recommended,
allConfig: js.configs.all,
});
export default [
{
ignores: ['eslint.config.mjs', 'dist'],
},
...compat.extends(
'plugin:@typescript-eslint/recommended',
'plugin:prettier/recommended',
'plugin:unicorn/recommended',
),
{
plugins: {
'@typescript-eslint': typescriptEslint,
},
languageOptions: {
globals: {
...globals.node,
},
parser: tsParser,
ecmaVersion: 5,
sourceType: 'module',
parserOptions: {
project: 'tsconfig.json',
tsconfigRootDir: __dirname,
},
},
rules: {
'@typescript-eslint/interface-name-prefix': 'off',
'@typescript-eslint/explicit-function-return-type': 'off',
'@typescript-eslint/explicit-module-boundary-types': 'off',
'@typescript-eslint/no-explicit-any': 'off',
'@typescript-eslint/no-floating-promises': 'error',
'unicorn/prefer-module': 'off',
'unicorn/prevent-abbreviations': 'off',
'unicorn/no-process-exit': 'off',
'unicorn/import-style': 'off',
curly: 2,
'prettier/prettier': 0,
'object-shorthand': ['error', 'always'],
},
},
];

2849
cli/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{
"name": "@immich/cli",
"version": "2.2.47",
"version": "2.2.0",
"description": "Command Line Interface (CLI) for Immich",
"type": "module",
"exports": "./dist/index.js",
@@ -13,33 +13,29 @@
"cli"
],
"devDependencies": {
"@eslint/eslintrc": "^3.1.0",
"@eslint/js": "^9.8.0",
"@immich/sdk": "file:../open-api/typescript-sdk",
"@types/byte-size": "^8.1.0",
"@types/cli-progress": "^3.11.0",
"@types/lodash-es": "^4.17.12",
"@types/mock-fs": "^4.13.1",
"@types/node": "^22.10.9",
"@typescript-eslint/eslint-plugin": "^8.15.0",
"@typescript-eslint/parser": "^8.15.0",
"@vitest/coverage-v8": "^3.0.0",
"byte-size": "^9.0.0",
"@types/node": "^20.3.1",
"@typescript-eslint/eslint-plugin": "^7.0.0",
"@typescript-eslint/parser": "^7.0.0",
"@vitest/coverage-v8": "^1.2.2",
"byte-size": "^8.1.1",
"cli-progress": "^3.12.0",
"commander": "^12.0.0",
"eslint": "^9.14.0",
"eslint-config-prettier": "^10.0.0",
"eslint": "^8.56.0",
"eslint-config-prettier": "^9.1.0",
"eslint-plugin-prettier": "^5.1.3",
"eslint-plugin-unicorn": "^56.0.1",
"globals": "^15.9.0",
"eslint-plugin-unicorn": "^53.0.0",
"mock-fs": "^5.2.0",
"prettier": "^3.2.5",
"prettier-plugin-organize-imports": "^4.0.0",
"prettier-plugin-organize-imports": "^3.2.4",
"typescript": "^5.3.3",
"vite": "^6.0.0",
"vite-tsconfig-paths": "^5.0.0",
"vitest": "^3.0.0",
"vitest-fetch-mock": "^0.4.0",
"vite": "^5.0.12",
"vite-tsconfig-paths": "^4.3.2",
"vitest": "^1.2.2",
"yaml": "^2.3.1"
},
"scripts": {
@@ -63,10 +59,9 @@
},
"dependencies": {
"fast-glob": "^3.3.2",
"fastq": "^1.17.1",
"lodash-es": "^4.17.21"
},
"volta": {
"node": "22.13.1"
"node": "20.14.0"
}
}

View File

@@ -1,201 +0,0 @@
import * as fs from 'node:fs';
import * as os from 'node:os';
import * as path from 'node:path';
import { describe, expect, it, vi } from 'vitest';
import { Action, checkBulkUpload, defaults, Reason } from '@immich/sdk';
import createFetchMock from 'vitest-fetch-mock';
import { checkForDuplicates, getAlbumName, uploadFiles, UploadOptionsDto } from './asset';
vi.mock('@immich/sdk');
describe('getAlbumName', () => {
it('should return a non-undefined value', () => {
if (os.platform() === 'win32') {
// This is meaningless for Unix systems.
expect(getAlbumName(String.raw`D:\test\Filename.txt`, {} as UploadOptionsDto)).toBe('test');
}
expect(getAlbumName('D:/parentfolder/test/Filename.txt', {} as UploadOptionsDto)).toBe('test');
});
it('has higher priority to return `albumName` in `options`', () => {
expect(getAlbumName('/parentfolder/test/Filename.txt', { albumName: 'example' } as UploadOptionsDto)).toBe(
'example',
);
});
});
describe('uploadFiles', () => {
const testDir = fs.mkdtempSync(path.join(os.tmpdir(), 'test-'));
const testFilePath = path.join(testDir, 'test.png');
const testFileData = 'test';
const baseUrl = 'http://example.com';
const apiKey = 'key';
const retry = 3;
const fetchMocker = createFetchMock(vi);
beforeEach(() => {
// Create a test file
fs.writeFileSync(testFilePath, testFileData);
// Defaults
vi.mocked(defaults).baseUrl = baseUrl;
vi.mocked(defaults).headers = { 'x-api-key': apiKey };
fetchMocker.enableMocks();
fetchMocker.resetMocks();
});
it('returns new assets when upload file is successful', async () => {
fetchMocker.doMockIf(new RegExp(`${baseUrl}/assets$`), () => {
return {
status: 200,
body: JSON.stringify({ id: 'fc5621b1-86f6-44a1-9905-403e607df9f5', status: 'created' }),
};
});
await expect(uploadFiles([testFilePath], { concurrency: 1 })).resolves.toEqual([
{
filepath: testFilePath,
id: 'fc5621b1-86f6-44a1-9905-403e607df9f5',
},
]);
});
it('returns new assets when upload file retry is successful', async () => {
let counter = 0;
fetchMocker.doMockIf(new RegExp(`${baseUrl}/assets$`), () => {
counter++;
if (counter < retry) {
throw new Error('Network error');
}
return {
status: 200,
body: JSON.stringify({ id: 'fc5621b1-86f6-44a1-9905-403e607df9f5', status: 'created' }),
};
});
await expect(uploadFiles([testFilePath], { concurrency: 1 })).resolves.toEqual([
{
filepath: testFilePath,
id: 'fc5621b1-86f6-44a1-9905-403e607df9f5',
},
]);
});
it('returns new assets when upload file retry is failed', async () => {
fetchMocker.doMockIf(new RegExp(`${baseUrl}/assets$`), () => {
throw new Error('Network error');
});
await expect(uploadFiles([testFilePath], { concurrency: 1 })).resolves.toEqual([]);
});
});
describe('checkForDuplicates', () => {
const testDir = fs.mkdtempSync(path.join(os.tmpdir(), 'test-'));
const testFilePath = path.join(testDir, 'test.png');
const testFileData = 'test';
const testFileChecksum = 'a94a8fe5ccb19ba61c4c0873d391e987982fbbd3'; // SHA1
const retry = 3;
beforeEach(() => {
// Create a test file
fs.writeFileSync(testFilePath, testFileData);
});
it('checks duplicates', async () => {
vi.mocked(checkBulkUpload).mockResolvedValue({
results: [
{
action: Action.Accept,
id: testFilePath,
},
],
});
await checkForDuplicates([testFilePath], { concurrency: 1 });
expect(checkBulkUpload).toHaveBeenCalledWith({
assetBulkUploadCheckDto: {
assets: [
{
checksum: testFileChecksum,
id: testFilePath,
},
],
},
});
});
it('returns duplicates when check duplicates is rejected', async () => {
vi.mocked(checkBulkUpload).mockResolvedValue({
results: [
{
action: Action.Reject,
id: testFilePath,
assetId: 'fc5621b1-86f6-44a1-9905-403e607df9f5',
reason: Reason.Duplicate,
},
],
});
await expect(checkForDuplicates([testFilePath], { concurrency: 1 })).resolves.toEqual({
duplicates: [
{
filepath: testFilePath,
id: 'fc5621b1-86f6-44a1-9905-403e607df9f5',
},
],
newFiles: [],
});
});
it('returns new assets when check duplicates is accepted', async () => {
vi.mocked(checkBulkUpload).mockResolvedValue({
results: [
{
action: Action.Accept,
id: testFilePath,
},
],
});
await expect(checkForDuplicates([testFilePath], { concurrency: 1 })).resolves.toEqual({
duplicates: [],
newFiles: [testFilePath],
});
});
it('returns results when check duplicates retry is successful', async () => {
let mocked = vi.mocked(checkBulkUpload);
for (let i = 1; i < retry; i++) {
mocked = mocked.mockRejectedValueOnce(new Error('Network error'));
}
mocked.mockResolvedValue({
results: [
{
action: Action.Accept,
id: testFilePath,
},
],
});
await expect(checkForDuplicates([testFilePath], { concurrency: 1 })).resolves.toEqual({
duplicates: [],
newFiles: [testFilePath],
});
});
it('returns results when check duplicates retry is failed', async () => {
vi.mocked(checkBulkUpload).mockRejectedValue(new Error('Network error'));
await expect(checkForDuplicates([testFilePath], { concurrency: 1 })).resolves.toEqual({
duplicates: [],
newFiles: [],
});
});
});

View File

@@ -1,6 +1,5 @@
import {
Action,
AssetBulkUploadCheckItem,
AssetBulkUploadCheckResult,
AssetMediaResponseDto,
AssetMediaStatus,
@@ -12,12 +11,12 @@ import {
getSupportedMediaTypes,
} from '@immich/sdk';
import byteSize from 'byte-size';
import { MultiBar, Presets, SingleBar } from 'cli-progress';
import { Presets, SingleBar } from 'cli-progress';
import { chunk } from 'lodash-es';
import { Stats, createReadStream } from 'node:fs';
import { stat, unlink } from 'node:fs/promises';
import os from 'node:os';
import path, { basename } from 'node:path';
import { Queue } from 'src/queue';
import { BaseOptions, authenticate, crawl, sha1 } from 'src/utils';
const s = (count: number) => (count === 1 ? '' : 's');
@@ -26,7 +25,7 @@ const s = (count: number) => (count === 1 ? '' : 's');
type AssetBulkUploadCheckResults = Array<AssetBulkUploadCheckResult & { id: string }>;
type Asset = { id: string; filepath: string };
export interface UploadOptionsDto {
interface UploadOptionsDto {
recursive?: boolean;
ignore?: string;
dryRun?: boolean;
@@ -85,93 +84,48 @@ const scan = async (pathsToCrawl: string[], options: UploadOptionsDto) => {
return files;
};
export const checkForDuplicates = async (files: string[], { concurrency, skipHash }: UploadOptionsDto) => {
const checkForDuplicates = async (files: string[], { concurrency, skipHash }: UploadOptionsDto) => {
if (skipHash) {
console.log('Skipping hash check, assuming all files are new');
return { newFiles: files, duplicates: [] };
}
const multiBar = new MultiBar(
{ format: '{message} | {bar} | {percentage}% | ETA: {eta}s | {value}/{total} assets' },
const progressBar = new SingleBar(
{ format: 'Checking files | {bar} | {percentage}% | ETA: {eta}s | {value}/{total} assets' },
Presets.shades_classic,
);
const hashProgressBar = multiBar.create(files.length, 0, { message: 'Hashing files ' });
const checkProgressBar = multiBar.create(files.length, 0, { message: 'Checking for duplicates' });
progressBar.start(files.length, 0);
const newFiles: string[] = [];
const duplicates: Asset[] = [];
const checkBulkUploadQueue = new Queue<AssetBulkUploadCheckItem[], void>(
async (assets: AssetBulkUploadCheckItem[]) => {
const response = await checkBulkUpload({ assetBulkUploadCheckDto: { assets } });
try {
// TODO refactor into a queue
for (const items of chunk(files, concurrency)) {
const dto = await Promise.all(items.map(async (filepath) => ({ id: filepath, checksum: await sha1(filepath) })));
const { results } = await checkBulkUpload({ assetBulkUploadCheckDto: { assets: dto } });
const results = response.results as AssetBulkUploadCheckResults;
for (const { id: filepath, assetId, action } of results) {
for (const { id: filepath, assetId, action } of results as AssetBulkUploadCheckResults) {
if (action === Action.Accept) {
newFiles.push(filepath);
} else {
// rejects are always duplicates
duplicates.push({ id: assetId as string, filepath });
}
progressBar.increment();
}
checkProgressBar.increment(assets.length);
},
{ concurrency, retry: 3 },
);
const results: { id: string; checksum: string }[] = [];
let checkBulkUploadRequests: AssetBulkUploadCheckItem[] = [];
const queue = new Queue<string, AssetBulkUploadCheckItem[]>(
async (filepath: string): Promise<AssetBulkUploadCheckItem[]> => {
const dto = { id: filepath, checksum: await sha1(filepath) };
results.push(dto);
checkBulkUploadRequests.push(dto);
if (checkBulkUploadRequests.length === 5000) {
const batch = checkBulkUploadRequests;
checkBulkUploadRequests = [];
void checkBulkUploadQueue.push(batch);
}
hashProgressBar.increment();
return results;
},
{ concurrency, retry: 3 },
);
for (const item of files) {
void queue.push(item);
}
} finally {
progressBar.stop();
}
await queue.drained();
if (checkBulkUploadRequests.length > 0) {
void checkBulkUploadQueue.push(checkBulkUploadRequests);
}
await checkBulkUploadQueue.drained();
multiBar.stop();
console.log(`Found ${newFiles.length} new files and ${duplicates.length} duplicate${s(duplicates.length)}`);
// Report failures
const failedTasks = queue.tasks.filter((task) => task.status === 'failed');
if (failedTasks.length > 0) {
console.log(`Failed to verify ${failedTasks.length} file${s(failedTasks.length)}:`);
for (const task of failedTasks) {
console.log(`- ${task.data} - ${task.error}`);
}
}
return { newFiles, duplicates };
};
export const uploadFiles = async (files: string[], { dryRun, concurrency }: UploadOptionsDto): Promise<Asset[]> => {
const uploadFiles = async (files: string[], { dryRun, concurrency }: UploadOptionsDto): Promise<Asset[]> => {
if (files.length === 0) {
console.log('All assets were already uploaded, nothing to do.');
return [];
@@ -205,52 +159,37 @@ export const uploadFiles = async (files: string[], { dryRun, concurrency }: Uplo
const newAssets: Asset[] = [];
const queue = new Queue<string, AssetMediaResponseDto>(
async (filepath: string) => {
const stats = statsMap.get(filepath);
if (!stats) {
throw new Error(`Stats not found for ${filepath}`);
}
try {
for (const items of chunk(files, concurrency)) {
await Promise.all(
items.map(async (filepath) => {
const stats = statsMap.get(filepath) as Stats;
const response = await uploadFile(filepath, stats);
const response = await uploadFile(filepath, stats);
newAssets.push({ id: response.id, filepath });
if (response.status === AssetMediaStatus.Duplicate) {
duplicateCount++;
duplicateSize += stats.size ?? 0;
} else {
successCount++;
successSize += stats.size ?? 0;
}
newAssets.push({ id: response.id, filepath });
uploadProgress.update(successSize, { value_formatted: byteSize(successSize + duplicateSize) });
if (response.status === AssetMediaStatus.Duplicate) {
duplicateCount++;
duplicateSize += stats.size ?? 0;
} else {
successCount++;
successSize += stats.size ?? 0;
}
return response;
},
{ concurrency, retry: 3 },
);
uploadProgress.update(successSize, { value_formatted: byteSize(successSize + duplicateSize) });
for (const item of files) {
void queue.push(item);
return response;
}),
);
}
} finally {
uploadProgress.stop();
}
await queue.drained();
uploadProgress.stop();
console.log(`Successfully uploaded ${successCount} new asset${s(successCount)} (${byteSize(successSize)})`);
if (duplicateCount > 0) {
console.log(`Skipped ${duplicateCount} duplicate asset${s(duplicateCount)} (${byteSize(duplicateSize)})`);
}
// Report failures
const failedTasks = queue.tasks.filter((task) => task.status === 'failed');
if (failedTasks.length > 0) {
console.log(`Failed to upload ${failedTasks.length} asset${s(failedTasks.length)}:`);
for (const task of failedTasks) {
console.log(`- ${task.data} - ${task.error}`);
}
}
return newAssets;
};
@@ -407,9 +346,7 @@ const updateAlbums = async (assets: Asset[], options: UploadOptionsDto) => {
}
};
// `filepath` valid format:
// - Windows: `D:\\test\\Filename.txt` or `D:/test/Filename.txt`
// - Unix: `/test/Filename.txt`
export const getAlbumName = (filepath: string, options: UploadOptionsDto) => {
return options.albumName ?? path.basename(path.dirname(filepath));
const getAlbumName = (filepath: string, options: UploadOptionsDto) => {
const folderName = os.platform() === 'win32' ? filepath.split('\\').at(-2) : filepath.split('/').at(-2);
return options.albumName ?? folderName;
};

View File

@@ -1,131 +0,0 @@
import * as fastq from 'fastq';
import { uniqueId } from 'lodash-es';
export type Task<T, R> = {
readonly id: string;
status: 'idle' | 'processing' | 'succeeded' | 'failed';
data: T;
error: unknown | undefined;
count: number;
// TODO: Could be useful to adding progress property.
// TODO: Could be useful to adding start_at/end_at/duration properties.
result: undefined | R;
};
export type QueueOptions = {
verbose?: boolean;
concurrency?: number;
retry?: number;
// TODO: Could be useful to adding timeout property for retry.
};
export type ComputedQueueOptions = Required<QueueOptions>;
export const defaultQueueOptions = {
concurrency: 1,
retry: 0,
verbose: false,
};
/**
* An in-memory queue that processes tasks in parallel with a given concurrency.
* @see {@link https://www.npmjs.com/package/fastq}
* @template T - The type of the worker task data.
* @template R - The type of the worker output data.
*/
export class Queue<T, R> {
private readonly queue: fastq.queueAsPromised<string, Task<T, R>>;
private readonly store = new Map<string, Task<T, R>>();
readonly options: ComputedQueueOptions;
readonly worker: (data: T) => Promise<R>;
/**
* Create a new queue.
* @param worker - The worker function that processes the task.
* @param options - The queue options.
*/
constructor(worker: (data: T) => Promise<R>, options?: QueueOptions) {
this.options = { ...defaultQueueOptions, ...options };
this.worker = worker;
this.store = new Map<string, Task<T, R>>();
this.queue = this.buildQueue();
}
get tasks(): Task<T, R>[] {
const tasks: Task<T, R>[] = [];
for (const task of this.store.values()) {
tasks.push(task);
}
return tasks;
}
getTask(id: string): Task<T, R> {
const task = this.store.get(id);
if (!task) {
throw new Error(`Task with id ${id} not found`);
}
return task;
}
/**
* Wait for the queue to be empty.
* @returns Promise<void> - The returned Promise will be resolved when all tasks in the queue have been processed by a worker.
* This promise could be ignored as it will not lead to a `unhandledRejection`.
*/
drained(): Promise<void> {
return this.queue.drained();
}
/**
* Add a task at the end of the queue.
* @see {@link https://www.npmjs.com/package/fastq}
* @param data
* @returns Promise<void> - A Promise that will be fulfilled (rejected) when the task is completed successfully (unsuccessfully).
* This promise could be ignored as it will not lead to a `unhandledRejection`.
*/
async push(data: T): Promise<Task<T, R>> {
const id = uniqueId();
const task: Task<T, R> = { id, status: 'idle', error: undefined, count: 0, data, result: undefined };
this.store.set(id, task);
return this.queue.push(id);
}
// TODO: Support more function delegation to fastq.
private buildQueue(): fastq.queueAsPromised<string, Task<T, R>> {
return fastq.promise((id: string) => {
const task = this.getTask(id);
return this.work(task);
}, this.options.concurrency);
}
private async work(task: Task<T, R>): Promise<Task<T, R>> {
task.count += 1;
task.error = undefined;
task.status = 'processing';
if (this.options.verbose) {
console.log('[task] processing:', task);
}
try {
task.result = await this.worker(task.data);
task.status = 'succeeded';
if (this.options.verbose) {
console.log('[task] succeeded:', task);
}
return task;
} catch (error) {
task.error = error;
task.status = 'failed';
if (this.options.verbose) {
console.log('[task] failed:', task);
}
if (this.options.retry > 0 && task.count < this.options.retry) {
if (this.options.verbose) {
console.log('[task] retry:', task);
}
return this.work(task);
}
return task;
}
}
}

View File

@@ -1,20 +1,14 @@
import mockfs from 'mock-fs';
import { readFileSync } from 'node:fs';
import { CrawlOptions, crawl } from 'src/utils';
interface Test {
test: string;
options: Omit<CrawlOptions, 'extensions'>;
files: Record<string, boolean>;
skipOnWin32?: boolean;
}
const cwd = process.cwd();
const readContent = (path: string) => {
return readFileSync(path).toString();
};
const extensions = [
'.jpg',
'.jpeg',
@@ -49,18 +43,6 @@ const tests: Test[] = [
'/photos/image.jpg': true,
},
},
{
test: 'should crawl folders with quotes',
options: {
pathsToCrawl: ["/photo's/", '/photo"s/', '/photo`s/'],
},
files: {
"/photo's/image1.jpg": true,
'/photo"s/image2.jpg': true,
'/photo`s/image3.jpg': true,
},
skipOnWin32: true, // single quote interferes with mockfs root on Windows
},
{
test: 'should crawl a single file',
options: {
@@ -128,7 +110,17 @@ const tests: Test[] = [
'/albums/image3.jpg': true,
},
},
{
test: 'should support globbing paths',
options: {
pathsToCrawl: ['/photos*'],
},
files: {
'/photos1/image1.jpg': true,
'/photos2/image2.jpg': true,
'/images/image3.jpg': false,
},
},
{
test: 'should crawl a single path without trailing slash',
options: {
@@ -264,8 +256,7 @@ const tests: Test[] = [
{
test: 'should support ignoring absolute paths',
options: {
// Currently, fast-glob has some caveat when dealing with `/`.
pathsToCrawl: ['/*s'],
pathsToCrawl: ['/'],
recursive: true,
exclusionPattern: '/images/**',
},
@@ -283,22 +274,16 @@ describe('crawl', () => {
});
describe('crawl', () => {
for (const { test: name, options, files, skipOnWin32 } of tests) {
if (process.platform === 'win32' && skipOnWin32) {
test.skip(name);
continue;
}
it(name, async () => {
// The file contents is the same as the path.
mockfs(Object.fromEntries(Object.keys(files).map((file) => [file, file])));
for (const { test, options, files } of tests) {
it(test, async () => {
mockfs(Object.fromEntries(Object.keys(files).map((file) => [file, ''])));
const actual = await crawl({ ...options, extensions });
const expected = Object.entries(files)
.filter((entry) => entry[1])
.map(([file]) => file);
// Compare file's content instead of path since a file can be represent in multiple ways.
expect(actual.map((path) => readContent(path)).sort()).toEqual(expected.sort());
expect(actual.sort()).toEqual(expected.sort());
});
}
});

View File

@@ -1,9 +1,8 @@
import { getMyUser, init, isHttpError } from '@immich/sdk';
import { convertPathToPattern, glob } from 'fast-glob';
import { glob } from 'fast-glob';
import { createHash } from 'node:crypto';
import { createReadStream } from 'node:fs';
import { readFile, stat, writeFile } from 'node:fs/promises';
import { platform } from 'node:os';
import { join, resolve } from 'node:path';
import yaml from 'yaml';
@@ -107,11 +106,6 @@ export interface CrawlOptions {
exclusionPattern?: string;
extensions: string[];
}
const convertPathToPatternOnWin = (path: string) => {
return platform() === 'win32' ? convertPathToPattern(path) : path;
};
export const crawl = async (options: CrawlOptions): Promise<string[]> => {
const { extensions: extensionsWithPeriod, recursive, pathsToCrawl, exclusionPattern, includeHidden } = options;
const extensions = extensionsWithPeriod.map((extension) => extension.replace('.', ''));
@@ -130,32 +124,36 @@ export const crawl = async (options: CrawlOptions): Promise<string[]> => {
if (stats.isFile() || stats.isSymbolicLink()) {
crawledFiles.push(absolutePath);
} else {
patterns.push(convertPathToPatternOnWin(absolutePath));
patterns.push(absolutePath);
}
} catch (error: any) {
if (error.code === 'ENOENT') {
patterns.push(convertPathToPatternOnWin(currentPath));
patterns.push(currentPath);
} else {
throw error;
}
}
}
if (patterns.length === 0) {
let searchPattern: string;
if (patterns.length === 1) {
searchPattern = patterns[0];
} else if (patterns.length === 0) {
return crawledFiles;
} else {
searchPattern = '{' + patterns.join(',') + '}';
}
const searchPatterns = patterns.map((pattern) => {
let escapedPattern = pattern.replaceAll("'", "[']").replaceAll('"', '["]').replaceAll('`', '[`]');
if (recursive) {
escapedPattern = escapedPattern + '/**';
}
return `${escapedPattern}/*.{${extensions.join(',')}}`;
});
if (recursive) {
searchPattern = searchPattern + '/**/';
}
const globbedFiles = await glob(searchPatterns, {
searchPattern = `${searchPattern}/*.{${extensions.join(',')}}`;
const globbedFiles = await glob(searchPattern, {
absolute: true,
caseSensitiveMatch: false,
onlyFiles: true,
dot: includeHidden,
ignore: [`**/${exclusionPattern}`],
});

View File

@@ -2,7 +2,6 @@ import { defineConfig } from 'vite';
import tsconfigPaths from 'vite-tsconfig-paths';
export default defineConfig({
resolve: { alias: { src: '/src' } },
build: {
rollupOptions: {
input: 'src/index.ts',

View File

@@ -2,37 +2,37 @@
# Manual edits may be lost in future updates.
provider "registry.opentofu.org/cloudflare/cloudflare" {
version = "4.50.0"
constraints = "4.50.0"
version = "4.34.0"
constraints = "4.34.0"
hashes = [
"h1:0qvD5ZKn2tMZ8cOjQrUSITIC9tKCZbrSaSswV9lOyiU=",
"h1:4N0gplrZ0zOsJv3Kx1VfIx2FwrZHbYU0Un2yfiLZIGQ=",
"h1:81AMQq4kNKU/35U8ElQegUxG4E6xB0erIjG5xVmjIyo=",
"h1:EEQNADUmV3IL6x00yzy04i7OCSLeOMgM9XQkV3w71gA=",
"h1:HD0KI7td6oiSSAnJNn8UPSGf+hKiTo4JVQYfAiU1SqM=",
"h1:Hl+o5LtcvZg2f3l1hh9vaG/DFK6k+dTIZSeM0lXyfpo=",
"h1:ZUO2oIJ6jtZdvl816h0cEIiIeZ/fFCF64+abGEVxZZM=",
"h1:Zio80fnEeUKdlSOhTVskMEFSLUQ6TMsMKnXc+Dy2P2A=",
"h1:aLLvg36evTyqjtXGV2MjAV8imktXFmry7p/xCu9GQC4=",
"h1:azL05eWyy2V8SWkbZZImPWvv8ynG4eqmrbZhjXBDFug=",
"h1:ckMysHY4fJmr7o58XMi+DdgOTB/U/Mf1u1JA9ly3g/I=",
"h1:jxOwjDNjt5WCb4YjjiMsman91O8Y+MAPz6UwJ4a6F+0=",
"h1:u4OfnjSLa4Wk1IUFAzrvMnGgr8MvRHEWVDHEScPK2E8=",
"h1:wQkR1oeSkzlHn3rnVuLJRJLBHlg4EHt7Y64DeTjfkjQ=",
"zh:0ef99ed39472a94e6a0d6fa733cf0a46bce3bf66eba2873efae8846efdddc237",
"zh:2929cbbffcead171d45c88e4a7a59e9c013ea775dafa68b10da8db7cd04b6140",
"zh:462601c87118088e1a718842e367af7d8e7620598d426980a6d6b33de759865e",
"zh:56766eb62a74a9d88d9efb8486dd3a0c5c9db873d0a980ae9ef1e8af27d74231",
"zh:6b4e8810d99498a5a20a5872982a0f1354e79cfc4a7dfe7cc656f1c7eaae47d8",
"zh:6d65bdb4ec94b6eecc8abe26d94e2ca09262dc1e7a9934db829f418be0119920",
"zh:71adeaf31e41a358ec6095004062e43f56ee7d4b2504e5613ab351d511695641",
"h1:+W0+Xe1AUh7yvHjDbgR9T7CY1UbBC3Y6U7Eo+ucLnJM=",
"h1:2+1lKObDDdFZRluvROF3RKtXD66CFT3PfnHOvR6CmfA=",
"h1:7vluN2wmw8D9nI11YwTgoGv3hGDXlkt8xqQ4L/JABeQ=",
"h1:B0Urm8ZKTJ8cXzSCtEpJ+o+LsD8MXaD6LU59qVbh50Q=",
"h1:FpGLCm5oF12FaRti3E4iQJlkVbdCC7toyGVuH8og7KY=",
"h1:FunTmrCMDy+rom7YskY0WiL5/Y164zFrrD9xnBxU5NY=",
"h1:GrxZhEb+5HzmHF/BvZBdGKBJy6Wyjme0+ABVDz/63to=",
"h1:J36dda2K42/oTfHuZ4jKkW5+nI6BTWFRUvo60P17NJg=",
"h1:Kq0Wyn+j6zoQeghMYixbnfnyP9ZSIEJbOCzMbaCiAQQ=",
"h1:TKxunXCiS/z105sN/kBNFwU6tIKD67JKJ3ZKjwzoCuI=",
"h1:TR0URKFQxsRO5/v7bKm5hkD/CTTjsG7aVGllL/Mf25c=",
"h1:V+3Qs0Reb6r+8p4XjE5ZFDWYrOIN0x5SwORz4wvHOJ4=",
"h1:mZB3Ui7V/lPQMQK53eBOjIHcrul74252dT06Kgn3J+s=",
"h1:wJwZrIXxoki8omXLJ7XA7B1KaSrtcLMJp090fRtFRAc=",
"zh:02aa46743c1585ada8faa7db23af68ea614053a506f88f05d1090ff5e0e68076",
"zh:1e1a545e83e6457a0e15357b23139bc288fb4fbd5e9a5ddfedc95a6a0216b08c",
"zh:29eef2621e0b1501f620e615bf73b1b90d5417d745e38af63634bc03250faf87",
"zh:3c20989d7e1e141882e6091384bf85fdc83f70f3d29e3e047c493a07de992095",
"zh:3d39619379ba29c7ffb15196f0ea72a04c84cfcdf4b39ac42ac4cf4c19f3eae2",
"zh:805f4a2774e9279c590b8214aabe6df9dcc22bb995df2530513f2f78c647ce75",
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
"zh:89761c15908ccc2cf9c50bb5cb3be45d3ad0c45fc7c608c6b95f48c0288b7160",
"zh:8cc5d7c5939da89cfd01f3e51c84f3576564783acea9db86bd9e32049805ed96",
"zh:987cff8225b1dd436cdcb4fc6228689ae7e4281de6896412a2a9a3325c49f05e",
"zh:991e83ebb89867d71e01a1c215ed159efb425683b0a44707be8579eb0a337f06",
"zh:ab8177ae2d8f5cfa90043a6f867435012cae115f6061b832a7e2462e0ae87a67",
"zh:d1ca34df1398f201274a6a18102975148c10ca15aa43cfc56cc9897620929509",
"zh:d34946f70201baf6dda03e3b294c6bbe40d95d0278e97b9f636ded94822b24ac",
"zh:8af716f8655a57aa986861a8a7fa1d724594a284bd77c870eaea4db5f8b9732d",
"zh:a3d13c93b4e6ee6004782debaa9a17f990f2fe8ec8ba545c232818bb6064aba9",
"zh:bfa136acf82d3719473c0064446cc16d1b0303d98b06f55f503b7abeebceadb1",
"zh:ca6cf9254ae5436f2efbc01a0e3f7e4aa3c08b45182037b3eb3eb9539b2f7aec",
"zh:cba32d5de02674004e0a5955bd5222016d9991ca0553d4bd3bea517cd9def6ab",
"zh:d22c8cd527c6d0e84567f57be5911792e2fcd5969e3bba3747489f18bb16705b",
"zh:e4eeede9b3e72cdadd6cc252d4cbcf41baee6ecfd12bacd927e2dcbe733ab210",
"zh:facdaa787a69f86203cd3cc6922baea0b4a18bd9c36b0a8162e2e88ef6c90655",
]
}

View File

@@ -5,7 +5,7 @@ terraform {
required_providers {
cloudflare = {
source = "cloudflare/cloudflare"
version = "4.50.0"
version = "4.34.0"
}
}
}

View File

@@ -9,6 +9,6 @@ resource "cloudflare_record" "immich_app_release_domain" {
proxied = true
ttl = 1
type = "CNAME"
content = data.terraform_remote_state.cloudflare_immich_app_docs.outputs.immich_app_branch_pages_hostname
value = data.terraform_remote_state.cloudflare_immich_app_docs.outputs.immich_app_branch_pages_hostname
zone_id = data.terraform_remote_state.cloudflare_account.outputs.immich_app_zone_id
}

View File

@@ -2,37 +2,37 @@
# Manual edits may be lost in future updates.
provider "registry.opentofu.org/cloudflare/cloudflare" {
version = "4.50.0"
constraints = "4.50.0"
version = "4.34.0"
constraints = "4.34.0"
hashes = [
"h1:0qvD5ZKn2tMZ8cOjQrUSITIC9tKCZbrSaSswV9lOyiU=",
"h1:4N0gplrZ0zOsJv3Kx1VfIx2FwrZHbYU0Un2yfiLZIGQ=",
"h1:81AMQq4kNKU/35U8ElQegUxG4E6xB0erIjG5xVmjIyo=",
"h1:EEQNADUmV3IL6x00yzy04i7OCSLeOMgM9XQkV3w71gA=",
"h1:HD0KI7td6oiSSAnJNn8UPSGf+hKiTo4JVQYfAiU1SqM=",
"h1:Hl+o5LtcvZg2f3l1hh9vaG/DFK6k+dTIZSeM0lXyfpo=",
"h1:ZUO2oIJ6jtZdvl816h0cEIiIeZ/fFCF64+abGEVxZZM=",
"h1:Zio80fnEeUKdlSOhTVskMEFSLUQ6TMsMKnXc+Dy2P2A=",
"h1:aLLvg36evTyqjtXGV2MjAV8imktXFmry7p/xCu9GQC4=",
"h1:azL05eWyy2V8SWkbZZImPWvv8ynG4eqmrbZhjXBDFug=",
"h1:ckMysHY4fJmr7o58XMi+DdgOTB/U/Mf1u1JA9ly3g/I=",
"h1:jxOwjDNjt5WCb4YjjiMsman91O8Y+MAPz6UwJ4a6F+0=",
"h1:u4OfnjSLa4Wk1IUFAzrvMnGgr8MvRHEWVDHEScPK2E8=",
"h1:wQkR1oeSkzlHn3rnVuLJRJLBHlg4EHt7Y64DeTjfkjQ=",
"zh:0ef99ed39472a94e6a0d6fa733cf0a46bce3bf66eba2873efae8846efdddc237",
"zh:2929cbbffcead171d45c88e4a7a59e9c013ea775dafa68b10da8db7cd04b6140",
"zh:462601c87118088e1a718842e367af7d8e7620598d426980a6d6b33de759865e",
"zh:56766eb62a74a9d88d9efb8486dd3a0c5c9db873d0a980ae9ef1e8af27d74231",
"zh:6b4e8810d99498a5a20a5872982a0f1354e79cfc4a7dfe7cc656f1c7eaae47d8",
"zh:6d65bdb4ec94b6eecc8abe26d94e2ca09262dc1e7a9934db829f418be0119920",
"zh:71adeaf31e41a358ec6095004062e43f56ee7d4b2504e5613ab351d511695641",
"h1:+W0+Xe1AUh7yvHjDbgR9T7CY1UbBC3Y6U7Eo+ucLnJM=",
"h1:2+1lKObDDdFZRluvROF3RKtXD66CFT3PfnHOvR6CmfA=",
"h1:7vluN2wmw8D9nI11YwTgoGv3hGDXlkt8xqQ4L/JABeQ=",
"h1:B0Urm8ZKTJ8cXzSCtEpJ+o+LsD8MXaD6LU59qVbh50Q=",
"h1:FpGLCm5oF12FaRti3E4iQJlkVbdCC7toyGVuH8og7KY=",
"h1:FunTmrCMDy+rom7YskY0WiL5/Y164zFrrD9xnBxU5NY=",
"h1:GrxZhEb+5HzmHF/BvZBdGKBJy6Wyjme0+ABVDz/63to=",
"h1:J36dda2K42/oTfHuZ4jKkW5+nI6BTWFRUvo60P17NJg=",
"h1:Kq0Wyn+j6zoQeghMYixbnfnyP9ZSIEJbOCzMbaCiAQQ=",
"h1:TKxunXCiS/z105sN/kBNFwU6tIKD67JKJ3ZKjwzoCuI=",
"h1:TR0URKFQxsRO5/v7bKm5hkD/CTTjsG7aVGllL/Mf25c=",
"h1:V+3Qs0Reb6r+8p4XjE5ZFDWYrOIN0x5SwORz4wvHOJ4=",
"h1:mZB3Ui7V/lPQMQK53eBOjIHcrul74252dT06Kgn3J+s=",
"h1:wJwZrIXxoki8omXLJ7XA7B1KaSrtcLMJp090fRtFRAc=",
"zh:02aa46743c1585ada8faa7db23af68ea614053a506f88f05d1090ff5e0e68076",
"zh:1e1a545e83e6457a0e15357b23139bc288fb4fbd5e9a5ddfedc95a6a0216b08c",
"zh:29eef2621e0b1501f620e615bf73b1b90d5417d745e38af63634bc03250faf87",
"zh:3c20989d7e1e141882e6091384bf85fdc83f70f3d29e3e047c493a07de992095",
"zh:3d39619379ba29c7ffb15196f0ea72a04c84cfcdf4b39ac42ac4cf4c19f3eae2",
"zh:805f4a2774e9279c590b8214aabe6df9dcc22bb995df2530513f2f78c647ce75",
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
"zh:89761c15908ccc2cf9c50bb5cb3be45d3ad0c45fc7c608c6b95f48c0288b7160",
"zh:8cc5d7c5939da89cfd01f3e51c84f3576564783acea9db86bd9e32049805ed96",
"zh:987cff8225b1dd436cdcb4fc6228689ae7e4281de6896412a2a9a3325c49f05e",
"zh:991e83ebb89867d71e01a1c215ed159efb425683b0a44707be8579eb0a337f06",
"zh:ab8177ae2d8f5cfa90043a6f867435012cae115f6061b832a7e2462e0ae87a67",
"zh:d1ca34df1398f201274a6a18102975148c10ca15aa43cfc56cc9897620929509",
"zh:d34946f70201baf6dda03e3b294c6bbe40d95d0278e97b9f636ded94822b24ac",
"zh:8af716f8655a57aa986861a8a7fa1d724594a284bd77c870eaea4db5f8b9732d",
"zh:a3d13c93b4e6ee6004782debaa9a17f990f2fe8ec8ba545c232818bb6064aba9",
"zh:bfa136acf82d3719473c0064446cc16d1b0303d98b06f55f503b7abeebceadb1",
"zh:ca6cf9254ae5436f2efbc01a0e3f7e4aa3c08b45182037b3eb3eb9539b2f7aec",
"zh:cba32d5de02674004e0a5955bd5222016d9991ca0553d4bd3bea517cd9def6ab",
"zh:d22c8cd527c6d0e84567f57be5911792e2fcd5969e3bba3747489f18bb16705b",
"zh:e4eeede9b3e72cdadd6cc252d4cbcf41baee6ecfd12bacd927e2dcbe733ab210",
"zh:facdaa787a69f86203cd3cc6922baea0b4a18bd9c36b0a8162e2e88ef6c90655",
]
}

View File

@@ -5,7 +5,7 @@ terraform {
required_providers {
cloudflare = {
source = "cloudflare/cloudflare"
version = "4.50.0"
version = "4.34.0"
}
}
}

View File

@@ -9,7 +9,7 @@ resource "cloudflare_record" "immich_app_branch_subdomain" {
proxied = true
ttl = 1
type = "CNAME"
content = "${replace(var.prefix_name, "/\\/|\\./", "-")}.${local.is_release ? data.terraform_remote_state.cloudflare_account.outputs.immich_app_archive_pages_project_subdomain : data.terraform_remote_state.cloudflare_account.outputs.immich_app_preview_pages_project_subdomain}"
value = "${replace(var.prefix_name, "/\\/|\\./", "-")}.${local.is_release ? data.terraform_remote_state.cloudflare_account.outputs.immich_app_archive_pages_project_subdomain : data.terraform_remote_state.cloudflare_account.outputs.immich_app_preview_pages_project_subdomain}"
zone_id = data.terraform_remote_state.cloudflare_account.outputs.immich_app_zone_id
}
@@ -18,7 +18,7 @@ output "immich_app_branch_subdomain" {
}
output "immich_app_branch_pages_hostname" {
value = cloudflare_record.immich_app_branch_subdomain.content
value = cloudflare_record.immich_app_branch_subdomain.value
}
output "pages_project_name" {

View File

@@ -26,52 +26,31 @@ services:
- /etc/localtime:/etc/localtime:ro
env_file:
- .env
environment:
IMMICH_REPOSITORY: immich-app/immich
IMMICH_REPOSITORY_URL: https://github.com/immich-app/immich
IMMICH_SOURCE_REF: local
IMMICH_SOURCE_COMMIT: af2efbdbbddc27cd06142f22253ccbbbbeec1f55
IMMICH_SOURCE_URL: https://github.com/immich-app/immich/commit/af2efbdbbddc27cd06142f22253ccbbbbeec1f55
IMMICH_BUILD: '9654404849'
IMMICH_BUILD_URL: https://github.com/immich-app/immich/actions/runs/9654404849
IMMICH_BUILD_IMAGE: development
IMMICH_BUILD_IMAGE_URL: https://github.com/immich-app/immich/pkgs/container/immich-server
IMMICH_THIRD_PARTY_SOURCE_URL: https://github.com/immich-app/immich/
IMMICH_THIRD_PARTY_BUG_FEATURE_URL: https://github.com/immich-app/immich/issues
IMMICH_THIRD_PARTY_DOCUMENTATION_URL: https://immich.app/docs
IMMICH_THIRD_PARTY_SUPPORT_URL: https://immich.app/docs/third-party
ulimits:
nofile:
soft: 1048576
hard: 1048576
ports:
- 3001:3001
- 9230:9230
- 9231:9231
- 2283:2283
depends_on:
- redis
- database
healthcheck:
disable: false
immich-web:
container_name: immich_web
image: immich-web-dev:latest
# Needed for rootless docker setup, see https://github.com/moby/moby/issues/45919
# user: 0:0
build:
context: ../web
command: ['/usr/src/app/bin/immich-web']
env_file:
- .env
ports:
- 3000:3000
- 2283:3000
- 24678:24678
volumes:
- ../web:/usr/src/app
- ../i18n:/usr/src/i18n
- ../open-api/:/usr/src/open-api/
# - ../../ui:/usr/ui
- /usr/src/app/node_modules
ulimits:
nofile:
@@ -102,12 +81,10 @@ services:
depends_on:
- database
restart: unless-stopped
healthcheck:
disable: false
redis:
container_name: immich_redis
image: redis:6.2-alpine@sha256:905c4ee67b8e0aa955331960d2aa745781e6bd89afc44a8584bfd13bc890f0ae
image: redis:6.2-alpine@sha256:e31ca60b18f7e9b78b573d156702471d4eda038803c0b8e6f01559f350031e93
healthcheck:
test: redis-cli ping || exit 1
@@ -126,25 +103,13 @@ services:
ports:
- 5432:5432
healthcheck:
test: >-
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1;
Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align
--command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')";
echo "checksum failure count is $$Chksum";
[ "$$Chksum" = '0' ] || exit 1
test: pg_isready --dbname='${DB_DATABASE_NAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT SUM(checksum_failures) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command: >-
postgres
-c shared_preload_libraries=vectors.so
-c 'search_path="$$user", public, vectors'
-c logging_collector=on
-c max_wal_size=2GB
-c shared_buffers=512MB
-c wal_compression=on
command: ["postgres", "-c" ,"shared_preload_libraries=vectors.so", "-c", 'search_path="$$user", public, vectors', "-c", "logging_collector=on", "-c", "max_wal_size=2GB", "-c", "shared_buffers=512MB", "-c", "wal_compression=on"]
# set IMMICH_TELEMETRY_INCLUDE=all in .env to enable metrics
# set IMMICH_METRICS=true in .env to enable metrics
# immich-prometheus:
# container_name: immich_prometheus
# ports:

View File

@@ -16,13 +16,11 @@ services:
env_file:
- .env
ports:
- 2283:2283
- 2283:3001
depends_on:
- redis
- database
restart: always
healthcheck:
disable: false
immich-machine-learning:
container_name: immich_machine_learning
@@ -35,19 +33,15 @@ services:
dockerfile: Dockerfile
args:
- DEVICE=cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference
ports:
- 3003:3003
volumes:
- model-cache:/cache
env_file:
- .env
restart: always
healthcheck:
disable: false
redis:
container_name: immich_redis
image: redis:6.2-alpine@sha256:905c4ee67b8e0aa955331960d2aa745781e6bd89afc44a8584bfd13bc890f0ae
image: redis:6.2-alpine@sha256:e31ca60b18f7e9b78b573d156702471d4eda038803c0b8e6f01559f350031e93
healthcheck:
test: redis-cli ping || exit 1
restart: always
@@ -67,31 +61,19 @@ services:
ports:
- 5432:5432
healthcheck:
test: >-
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1;
Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align
--command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')";
echo "checksum failure count is $$Chksum";
[ "$$Chksum" = '0' ] || exit 1
test: pg_isready --dbname='${DB_DATABASE_NAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT SUM(checksum_failures) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command: >-
postgres
-c shared_preload_libraries=vectors.so
-c 'search_path="$$user", public, vectors'
-c logging_collector=on
-c max_wal_size=2GB
-c shared_buffers=512MB
-c wal_compression=on
command: ["postgres", "-c" ,"shared_preload_libraries=vectors.so", "-c", 'search_path="$$user", public, vectors', "-c", "logging_collector=on", "-c", "max_wal_size=2GB", "-c", "shared_buffers=512MB", "-c", "wal_compression=on"]
restart: always
# set IMMICH_TELEMETRY_INCLUDE=all in .env to enable metrics
# set IMMICH_METRICS=true in .env to enable metrics
immich-prometheus:
container_name: immich_prometheus
ports:
- 9090:9090
image: prom/prometheus@sha256:6559acbd5d770b15bb3c954629ce190ac3cbbdb2b7f1c30f0385c4e05104e218
image: prom/prometheus@sha256:5c435642ca4d8427ca26f4901c11114023004709037880cd7860d5b7176aa731
volumes:
- ./prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus-data:/prometheus
@@ -103,7 +85,7 @@ services:
command: ['./run.sh', '-disable-reporting']
ports:
- 3000:3000
image: grafana/grafana:11.4.0-ubuntu@sha256:afccec22ba0e4815cca1d2bf3836e414322390dc78d77f1851976ffa8d61051c
image: grafana/grafana:11.0.0-ubuntu@sha256:02e99d1ee0b52dc9d3000c7b5314e7a07e0dfd69cc49bb3f8ce323491ed3406b
volumes:
- grafana-data:/var/lib/grafana

View File

@@ -16,19 +16,16 @@ services:
# file: hwaccel.transcoding.yml
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
volumes:
# Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file
- ${UPLOAD_LOCATION}:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
env_file:
- .env
ports:
- '2283:2283'
- 2283:3001
depends_on:
- redis
- database
restart: always
healthcheck:
disable: false
immich-machine-learning:
container_name: immich_machine_learning
@@ -43,12 +40,10 @@ services:
env_file:
- .env
restart: always
healthcheck:
disable: false
redis:
container_name: immich_redis
image: docker.io/redis:6.2-alpine@sha256:905c4ee67b8e0aa955331960d2aa745781e6bd89afc44a8584bfd13bc890f0ae
image: docker.io/redis:6.2-alpine@sha256:e31ca60b18f7e9b78b573d156702471d4eda038803c0b8e6f01559f350031e93
healthcheck:
test: redis-cli ping || exit 1
restart: always
@@ -62,26 +57,13 @@ services:
POSTGRES_DB: ${DB_DATABASE_NAME}
POSTGRES_INITDB_ARGS: '--data-checksums'
volumes:
# Do not edit the next line. If you want to change the database storage location on your system, edit the value of DB_DATA_LOCATION in the .env file
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data
healthcheck:
test: >-
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1;
Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align
--command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')";
echo "checksum failure count is $$Chksum";
[ "$$Chksum" = '0' ] || exit 1
test: pg_isready --dbname='${DB_DATABASE_NAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT SUM(checksum_failures) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command: >-
postgres
-c shared_preload_libraries=vectors.so
-c 'search_path="$$user", public, vectors'
-c logging_collector=on
-c max_wal_size=2GB
-c shared_buffers=512MB
-c wal_compression=on
command: ["postgres", "-c" ,"shared_preload_libraries=vectors.so", "-c", 'search_path="$$user", public, vectors', "-c", "logging_collector=on", "-c", "max_wal_size=2GB", "-c", "shared_buffers=512MB", "-c", "wal_compression=on"]
restart: always
volumes:

View File

@@ -12,7 +12,6 @@ DB_DATA_LOCATION=./postgres
IMMICH_VERSION=release
# Connection secret for postgres. You should change it to a random password
# Please use only the characters `A-Za-z0-9`, without special characters or spaces
DB_PASSWORD=postgres
# The values below this line do not need to be changed

View File

@@ -51,4 +51,5 @@ services:
volumes:
- /usr/lib/wsl:/usr/lib/wsl
environment:
- LD_LIBRARY_PATH=/usr/lib/wsl/lib
- LIBVA_DRIVER_NAME=d3d12

View File

@@ -3,10 +3,10 @@ global:
evaluation_interval: 15s
scrape_configs:
- job_name: immich_api
- job_name: immich_server
static_configs:
- targets: ['immich-server:8081']
- job_name: immich_microservices
static_configs:
- targets: ['immich-server:8082']
- targets: ['immich-microservices:8081']

View File

@@ -1,49 +0,0 @@
#!/bin/sh
set -eu
LOG_LEVEL="${IMMICH_LOG_LEVEL:='info'}"
logDebug() {
if [ "$LOG_LEVEL" = "debug" ] || [ "$LOG_LEVEL" = "verbose" ]; then
echo "DEBUG: $1" >&2
fi
}
if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
logDebug "cgroup v2 detected."
if [ -f /sys/fs/cgroup/cpu.max ]; then
read -r quota period </sys/fs/cgroup/cpu.max
if [ "$quota" = "max" ]; then
logDebug "No CPU limits set."
unset quota period
fi
else
logDebug "/sys/fs/cgroup/cpu.max not found."
fi
else
logDebug "cgroup v1 detected."
if [ -f /sys/fs/cgroup/cpu/cpu.cfs_quota_us ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_period_us ]; then
quota=$(cat /sys/fs/cgroup/cpu/cpu.cfs_quota_us)
period=$(cat /sys/fs/cgroup/cpu/cpu.cfs_period_us)
if [ "$quota" = "-1" ]; then
logDebug "No CPU limits set."
unset quota period
fi
else
logDebug "/sys/fs/cgroup/cpu/cpu.cfs_quota_us or /sys/fs/cgroup/cpu/cpu.cfs_period_us not found."
fi
fi
if [ -n "${quota:-}" ] && [ -n "${period:-}" ]; then
cpus=$((quota / period))
if [ "$cpus" -eq 0 ]; then
cpus=1
fi
else
cpus=$(grep -c ^processor /proc/cpuinfo)
fi
echo "$cpus"

View File

@@ -1 +1 @@
22.13.1
20.14

View File

@@ -94,7 +94,7 @@ Thank you, and I am asking for your support for the project. I hope to be a full
- Bitcoin: 3QVAb9dCHutquVejeNXitPqZX26Yg5kxb7
- Give a project a star - the contributors love gazing at the stars and seeing their creations shining in the sky.
Join our friendly [Discord](https://discord.immich.app) to talk and discuss Immich, tech, or anything
Join our friendly [Discord](https://discord.gg/D8JsnBEuKb) to talk and discuss Immich, tech, or anything
Cheer!

View File

@@ -142,7 +142,7 @@ Thank you, and I am asking for your support for the project. I hope to be a full
- Bitcoin: 3QVAb9dCHutquVejeNXitPqZX26Yg5kxb7
- Give a project a star - the contributors love gazing at the stars and seeing their creations shining in the sky.
Join our friendly [Discord](https://discord.immich.app) to talk and discuss Immich, tech, or anything
Join our friendly [Discord](https://discord.gg/D8JsnBEuKb) to talk and discuss Immich, tech, or anything
Cheer!

View File

@@ -1,7 +1,7 @@
---
title: The Immich core team goes full-time
authors: [alextran]
tags: [update, announcement, FUTO]
tags: [update, announcement, futo]
date: 2024-05-01T00:00
---

View File

@@ -1,91 +0,0 @@
---
title: Licensing announcement - Purchase a license to support Immich
authors: [alextran]
tags: [update, announcement, FUTO]
date: 2024-07-18T00:00
---
Hello everybody,
Firstly, on behalf of the Immich team, I'd like to thank everybody for your continuous support of Immich since the very first day! Your contributions, encouragement, and community engagement have helped bring Immich to its current state. The team and I are forever grateful for that.
Since our [last announcement of the core team joining FUTO to work on Immich full-time](https://immich.app/blog/2024/immich-core-team-goes-fulltime), one of the goals of our new position is to foster a healthy relationship between the developers and the users. We believe that this enables us to create great software, establish transparent policies and build trust.
We want to build a great software application that brings value to you and your loved ones' lives. We are not using you as a product, i.e., selling or tracking your data. We are not putting annoying ads into our software. We respect your privacy. We want to be compensated for the hard work we put in to build Immich for you.
With those notes, we have enabled a way for you to financially support the continued development of Immich, ensuring the software can move forward and will be maintained, by offering a lifetime license of the software. We think if you like and use software, you should pay for it, but _we're never going to force anyone to pay or try to limit Immich for those who don't._
There are two types of license that you can choose to purchase: **Server License** and **Individual License**.
### Server License
This is a lifetime license costing **$99.99**. The license is applied to the whole server. You and all users that use your server are licensed.
### Individual License
This is a lifetime license costing **$24.99**. The license is applied to a single user, and can be used on any server they choose to connect to.
<img
width="837"
alt="license-social-gh"
src="https://github.com/user-attachments/assets/241932ed-ef3b-44ec-a9e2-ee80754e0cca"
/>
You can purchase the license on [our page - https://buy.immich.app](https://buy.immich.app).
Starting with release `v1.109.0` you can purchase and enter your purchased license key directly in the app.
<img
width="1414"
alt="license-page-gh"
src="https://github.com/user-attachments/assets/364fc32a-f6ef-4594-9fea-28d5a26ad77c"
/>
## Thank you
Thank you again for your support, this will help create a strong foundation and stability for the Immich team to continue developing and maintaining the project that you love to use.
<p align="center">
<img
src="https://media.giphy.com/media/v1.Y2lkPTc5MGI3NjExbjY2eWc5Y2F0ZW56MmR4aWE0dDhzZXlidXRmYWZyajl1bWZidXZpcyZlcD12MV9pbnRlcm5hbF9naWZfYnlfaWQmY3Q9Zw/87CKDqErVfMqY/giphy.gif"
width="550"
title="SUPPORT THE PROJECT!"
/>
</p>
<br />
<br />
Cheers! 🎉
Immich team
# FAQ
### 1. Where can I purchase a license?
There are several places where you can purchase the license from
- [https://buy.immich.app](https://buy.immich.app)
- [https://pay.futo.org](https://pay.futo.org/)
- or directly from the app.
### 2. Do I need both _Individual License_ and _Server License_?
No,
If you are the admin and the sole user, or your instance has less than a total of 4 users, you can buy the **Individual License** for each user.
If your instance has more than 4 users, it is more cost-effective to buy the **Server License**, which will license all the users on your instance.
### 3. What do I do if I don't pay?
You can continue using Immich without any restriction.
### 4. Will there be any paywalled features?
No, there will never be any paywalled features.
### 5. Where can I get support regarding payment issues?
You can email us with your `orderId` and your email address `billing@futo.org` or on our Discord server.

View File

@@ -1,78 +0,0 @@
---
title: Immich Update - July 2024
authors: [alextran]
date: 2024-07-01T00:00
tags: [update, v1.106.0]
---
Hello everybody! Alex from Immich here and I am back with another development progress update for the project.
Summer has returned once again, and the night sky is filled with stars, thank you for **38_000 shining stars** you have sent to our [GitHub repo](https://github.com/immich-app/immich)! Since the last announcement several core contributors have started full time. Everything is going great with development, PRs get merged with _brrrrrrr_ rate, conversation exchange between team members is on a new high, we met and are working with the great engineers at FUTO. The spirit is high and we have a lot of things brewing that we think you will like.
Let's go over some of the updates we had since the last post.
### Container consolidation
Reduced the number of total containers from 5 to 4 by making the microservices thread get spawned directly in the server container. Woohoo, remember when Immich had 7 containers?
### Email notifications
![smtp](https://github.com/immich-app/immich/assets/27055614/949cba85-d3f1-4cd3-b246-a6f5fb5d3ae8)
We added email notifications to the app with SMTP settings that you can configure for the following events
- A new account is created for you.
- You are added to a shared album.
- New media is added to an album.
### Versioned docs
You can now jump back into the past or take a peek at the unreleased version of the documentation by selecting the version on the website.
![version-doc](https://github.com/immich-app/immich/assets/27055614/6d22898a-5093-41ad-b416-4573d7ce6e03)
### Similarity deduplication
With more machine learning and CLIP magic, we now have similarity deduplication built into the application where it will search for closely similar images and let you decide what to do with them; i.e keep or trash.
![similarity-deduplication](https://github.com/immich-app/immich/assets/27055614/3cac8478-fbf7-47ea-acb6-0146901dc67e)
### Permanent URL for asset on the web
The detail view for an asset now has a permanent URL so you can easily share them with your loved ones.
### Web app translations
We now have a public Weblate project which the community can use to translate the webapp to their native languages. We are planning to port the mobile app translation to this platform as well. If you would like to contribute, you can take a look [here](https://hosted.weblate.org/projects/immich/immich/). We're already close to 50% translations -- we really appreciate everyone contributing to that!
![web-translation](https://github.com/immich-app/immich/assets/27055614/363df2ed-656c-4584-bd82-0708a693c5bc)
### Read-only/Editor mode on shared album
As the owner of the album, you can choose if the shared user can edit the album or to only view the content of the album without any modification.
![read-only-album](https://github.com/immich-app/immich/assets/27055614/c6f66375-b869-495a-9a86-3e87b316d109)
### Better video thumbnails
Immich now tries to find a descriptive video thumbnail instead of simply using the first frame. No more black images for thumbnails!
### Public Roadmap
We now have a [public roadmap](https://immich.app/roadmap), giving you a high-level overview of things the team is working on. The first goal of this roadmap is to bring Immich to a stable release, which is expected sometime later this year. Some of the highlights include
- Auto stacking - Auto stacking of burst photos
- Basic editor - Basic photo editing capabilities
- Workflows - Automate tasks with workflows
- Fine grained access controls - Granular access controls for users and api keys
- Better background backups - Rework background backups to be more reliable
- Private/locked photos - Private assets with extra protections
Beyond the items in the roadmap, we have _many many_ more ideas for Immich. The team and I hope that you are enjoying the application, find it helpful in your life and we have nothing but the intention of building out great software for you all!
Have an amazing Summer or Winter for those in the southern hemisphere! :D
Until next time,
Cheers!
Alex

View File

@@ -49,38 +49,17 @@ The behaviors differ based on your device manufacturer and operating system, but
On iOS (iPhone and iPad), the operating system determines if a particular app can invoke background tasks based on multiple factors, most of which the Immich app has no control over. To increase the likelihood that the background backup task is run, follow the steps below:
- Enable Background App Refresh for Immich in the iOS settings at `Settings > General > Background App Refresh`.
- Disable **Low Power Mode** when not needed, as this can prevent apps from running in the background.
- Disable Background App Refresh for apps that don't need background tasks to run. This will reduce the competition for background task invocation for Immich.
- Use the Immich app more often.
### Why are features in the mobile app not working with a self-signed certificate, Basic Auth, custom headers, or mutual TLS?
These network features are experimental. They often do not work with video playback, asset upload or download, and other features.
Many of these limitations are tracked in [#15230](https://github.com/immich-app/immich/issues/15230).
Instead of these experimental features, we recommend using the URL switching feature, a VPN, or a [free trusted SSL certificate](https://letsencrypt.org/) for your domain.
We are not actively developing these features and will not be able to provide support, but welcome contributions to improve them.
Please discuss any large PRs with our dev team to ensure your time is not wasted.
### Why isn't the mobile app updated yet?
The app stores can take a few days to approve new builds of the app. If you're impatient, android APKs can be downloaded from the GitHub releases.
---
## Assets
### Does Immich change the file?
No, Immich does not modify the original files.
All edited metadata is saved in companion `.xmp` sidecar files and the database.
However, Immich will delete original files that have been trashed when the trash is emptied in the Immich UI.
### Why do my file names appear as a random string in the file manager?
When Storage Template is off (default) Immich saves the file names in a random string (also known as random UUIDs) to prevent duplicate file names.
To retrieve the original file names, you must enable the Storage Template and then run the STORAGE TEMPLATE MIGRATION job.
It is recommended to read about [Storage Template](https://immich.app/docs/administration/storage-template) before activation.
No, Immich does not touch the original file under any circumstances,
all edited metadata are saved in the companion sidecar file and the database.
### Can I add my existing photo library?
@@ -92,20 +71,11 @@ Template changes will only apply to _new_ assets. To retroactively apply the tem
### Why are only photos and not videos being uploaded to Immich?
This often happens when using a reverse proxy in front of Immich.
Make sure to [set your reverse proxy](/docs/administration/reverse-proxy/) to allow large requests.
Also, check the disk space of your reverse proxy.
In some cases, proxies cache requests to disk before passing them on, and if disk space runs out, the request fails.
If you are using Cloudflare Tunnel, please know that they set a maxiumum filesize of 100 MB that cannot be changed.
At times, files larger than this may work, potentially up to 1 GB. However, the official limit is 100 MB.
If you are having issues, we recommend switching to a different network deployment.
This often happens when using a reverse proxy (such as Nginx or Cloudflare tunnel) in front of Immich. Make sure to set your reverse proxy to allow large `POST` requests. In `nginx`, set `client_max_body_size 50000M;` or similar. Also, check the disk space of your reverse proxy. In some cases, proxies cache requests to disk before passing them on, and if disk space runs out, the request fails.
### Why are some photos stored in the file system with the wrong date?
There are a few different scenarios that can lead to this situation. The solution is to rerun the storage migration job.
The job is only automatically run once per asset after upload. If metadata extraction originally failed, the jobs were cleared/canceled, etc.,
the job may not have run automatically the first time.
There are a few different scenarios that can lead to this situation. The solution is to rerun the storage migration job. The job is only automatically run once per asset after upload. If metadata extraction originally failed, the jobs were cleared/canceled, etc., the job may not have run automatically the first time.
### How can I hide photos from the timeline?
@@ -135,8 +105,7 @@ Also, there are additional jobs for person (face) thumbnails.
### Why do files from WhatsApp not appear with the correct date?
Files sent on WhatsApp are saved without metadata on the file. Therefore, Immich has no way of knowing the original date of the file when files are uploaded from WhatsApp,
not the order of arrival on the device. [See #9116](https://github.com/immich-app/immich/discussions/9116).
Files sent on WhatsApp are saved without metadata on the file. Therefore, Immich has no way of knowing the original date of the file when files are uploaded from WhatsApp, not the order of arrival on the device. [See #3527](https://github.com/immich-app/immich/issues/3527).
### What happens if an asset exists in more than one account?
@@ -164,35 +133,40 @@ For example, say you have existing transcodes with the policy "Videos higher tha
No. Our design principle is that the original assets should always be untouched.
### How can I mount a CIFS/Samba volume within Docker?
### How can I move all data (photos, persons, albums, libraries) from one user to another?
If you aren't able to or prefer not to mount Samba on the host (such as Windows environment), you can mount the volume within Docker.
Below is an example in the `docker-compose.yml`.
This is not officially supported but can be accomplished with some database updates. You can do this on the command line (in the PostgreSQL container using the `psql` command), or you can add, for example, an [Adminer](https://www.adminer.org/) container to the `docker-compose.yml` file so that you can use a web interface.
Change your username, password, local IP, and share name, and see below where the line `- originals:/usr/src/app/originals`,
corrolates to the section where the volume `originals` was created. You can call this whatever you like, and map it to the docker container as you like.
For example you could change `originals:` to `Photos:`, and change `- originals:/usr/src/app/originals` to `Photos:/usr/src/app/photos`.
<details>
<summary>Steps</summary>
```diff
...
services:
immich-server:
...
volumes:
# Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file
- ${UPLOAD_LOCATION}:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
+ - originals:/usr/src/app/originals
...
volumes:
model-cache:
+ originals:
+ driver_opts:
+ type: cifs
+ o: 'iocharset=utf8,username=USERNAMEHERE,password=PASSWORDHERE,rw' # change to `ro` if read only desired
+ device: '//localipaddress/sharename'
1. **MAKE A BACKUP** - See [backup and restore](/docs/administration/backup-and-restore.md).
2. Find the ID of both the 'source' and the 'destination' user (it's the id column in the `users` table)
3. Four tables need to be updated:
```sql
BEGIN;
-- reassign albums
UPDATE albums SET "ownerId" = '<destinationId>' WHERE "ownerId" = '<sourceId>';
-- reassign people
UPDATE person SET "ownerId" = '<destinationId>' WHERE "ownerId" = '<sourceId>';
-- reassign assets
UPDATE assets SET "ownerId" = '<destinationId>' WHERE "ownerId" = '<sourceId>'
AND CHECKSUM NOT IN (SELECT CHECKSUM FROM assets WHERE "ownerId" = '<destinationId>');
-- reassign external libraries
UPDATE libraries SET "ownerId" = '<destinationId>' WHERE "ownerId" = '<sourceId>';
COMMIT;
```
4. There might be left-over assets in the 'source' user's library if they are skipped by the last query because of duplicate checksums. These are probably duplicates anyway, and can probably be removed.
</details>
---
## Albums
@@ -217,30 +191,17 @@ We haven't implemented an official mechanism for creating albums from external l
Duplicate checking only exists for upload libraries, using the file hash. Furthermore, duplicate checking is not global, but _per library_. Therefore, a situation where the same file appears twice in the timeline is possible, especially for external libraries.
### Why are my edits to files not being saved in read-only external libraries?
Images in read-write external libraries (the default) can be edited as normal.
In read-only libraries (`:ro` in the `docker-compose.yml`), Immich is unable to create the `.xmp` sidecar files to store edited file metadata.
For this reason, the metadata (timestamp, location, description, star rating, etc.) cannot be edited for files in read-only external libraries.
### How are deletions of files handled in external libraries?
Immich will attempt to delete original files that have been trashed when the trash is emptied.
In read-write external libraries (the default), Immich will delete the original file.
In read-only libraries (`:ro` in the `docker-compose.yml`), files can still be trashed in the UI.
However, when the trash is emptied, the files will re-appear in the main timeline since Immich is unable to delete the original file.
---
## Machine Learning
### How does smart search work?
Immich uses CLIP models. An ML model converts each image to an "embedding", which is essentially a string of numbers that semantically encodes what is in the image. The same is done for the text that you enter when you do a search, and that text embedding is then compared with those of the images to find similar ones. As such, there are no "tags", "labels", or "descriptions" generated that you can look at. For more information about CLIP and its capabilities, read about it [here](https://openai.com/research/clip).
Immich uses CLIP models. For more information about CLIP and its capabilities, read about it [here](https://openai.com/research/clip).
### How does facial recognition work?
See [How Facial Recognition Works](/docs/features/facial-recognition#how-facial-recognition-works) for details.
For face detection and recognition, Immich uses [InsightFace models](https://github.com/deepinsight/insightface/tree/master/model_zoo).
### How can I disable machine learning?
@@ -254,15 +215,19 @@ However, disabling all jobs will not disable the machine learning service itself
### I'm getting errors about models being corrupt or failing to download. What do I do?
You can delete the model cache volume, where models are downloaded. This will give the service a clean environment to download the model again. If models are failing to download entirely, you can manually download them from [Hugging Face][huggingface] and place them in the cache folder.
You can delete the model cache volume, where models are downloaded. This will give the service a clean environment to download the model again. If models are failing to download entirely, you can manually download them from [Huggingface][huggingface] and place them in the cache folder.
### Can I use a custom CLIP model?
No, this is not supported. Only models listed in the [Hugging Face][huggingface] page are compatible. Feel free to make a feature request if there's a model not listed here that you think should be added.
No, this is not supported. Only models listed in the [Huggingface][huggingface] page are compatible. Feel free to make a feature request if there's a model not listed here that you think should be added.
### I want to be able to search in other languages besides English. How can I do that?
You can change to a multilingual CLIP model. See [here](/docs/features/searching#clip-model) for instructions.
You can change to a multilingual model listed [here](https://huggingface.co/collections/immich-app/multilingual-clip-654eb08c2382f591eeb8c2a7) by going to Administration > Machine Learning Settings > Smart Search and replacing the name of the model. Be sure to re-run Smart Search on all assets after this change. You can then search in over 100 languages.
:::note
Feel free to make a feature request if there's a model you want to use that isn't in [Immich Huggingface list][huggingface].
:::
### Does Immich support Facial Recognition for videos?
@@ -273,7 +238,7 @@ Scanning the entire video for faces may be implemented in the future.
No.
:::tip
You can use [Smart Search](/docs/features/searching.md) for this to some extent. For example, if you have a Golden Retriever and a Chihuahua, type these words in the smart search and watch the results.
You can use [Smart Search](/docs/features/smart-search.md) for this to some extent. For example, if you have a Golden Retriever and a Chihuahua, type these words in the smart search and watch the results.
:::
### I'm getting a lot of "faces" that aren't faces, what can I do?
@@ -303,7 +268,7 @@ ls clip/ facial-recognition/
### Why is Immich slow on low-memory systems like the Raspberry Pi?
Immich optionally uses transcoding and machine learning for several features. However, it can be too heavy to run on a Raspberry Pi. You can [mitigate](/docs/FAQ#can-i-lower-cpu-and-ram-usage) this or host Immich's machine-learning container on a [more powerful system](/docs/guides/remote-machine-learning), or [disable](/docs/FAQ#how-can-i-disable-machine-learning) machine learning entirely.
Immich optionally uses machine learning for several features. However, it can be too heavy to run on a Raspberry Pi. You can [mitigate](/docs/FAQ#can-i-lower-cpu-and-ram-usage) this or host Immich's machine-learning container on a [more powerful system](/docs/guides/remote-machine-learning), or [disable](/docs/FAQ#how-can-i-disable-machine-learning) machine learning entirely.
### Can I lower CPU and RAM usage?
@@ -312,12 +277,10 @@ The initial backup is the most intensive due to the number of jobs running. The
- Lower the job concurrency for these jobs to 1.
- Under Settings > Transcoding Settings > Threads, set the number of threads to a low number like 1 or 2.
- Under Settings > Machine Learning Settings > Facial Recognition > Model Name, you can change the facial recognition model to `buffalo_s` instead of `buffalo_l`. The former is a smaller and faster model, albeit not as good.
- For facial recognition on new images to work properly, You must re-run the Face Detection job for all images after this.
- At the container level, you can [set resource constraints](/docs/FAQ#can-i-limit-cpu-and-ram-usage) to lower usage further.
- It's recommended to only apply these constraints _after_ taking some of the measures here for best performance.
- If these changes are not enough, see [above](/docs/FAQ#how-can-i-disable-machine-learning) for instructions on how to disable machine learning.
- For facial recognition on new images to work properly, You must re-run the Face Detection job for all images after this.
- If these changes are not enough, see [below](/docs/FAQ#how-can-i-disable-machine-learning) for instructions on how to disable machine learning.
### Can I limit CPU and RAM usage?
### Can I limit the amount of CPU and RAM usage?
By default, a container has no resource constraints and can use as much of a given resource as the host's kernel scheduler allows. To limit this, you can add the following to the `docker-compose.yml` block of any containers that you want to have limited resources.
@@ -337,8 +300,6 @@ deploy:
</details>
For more details, you can look at the [original docker docs](https://docs.docker.com/config/containers/resource_constraints/) or use this [guide](https://www.baeldung.com/ops/docker-memory-limit).
Note that memory constraints work by terminating the container, so this can introduce instability if set too low.
### How can I boost machine learning speed?
:::note
@@ -348,16 +309,21 @@ This advice improves throughput, not latency. This is to say that it will make S
You can increase throughput by increasing the job concurrency for machine learning jobs (Smart Search, Face Detection). With higher concurrency, the host will work on more assets in parallel. You can do this by navigating to Administration > Settings > Job Settings and increasing concurrency as needed.
:::danger
On a normal machine, 2 or 3 concurrent jobs can probably max the CPU. Storage speed and latency can quickly become the limiting factor beyond this, particularly when using HDDs.
On a normal machine, 2 or 3 concurrent jobs can probably max the CPU. Beyond this, note that storage speed and latency may quickly become the limiting factor; particularly when using HDDs.
The concurrency can be increased more comfortably with a GPU, but should still not be above 16 in most cases.
Do not exaggerate with the amount of jobs because you're probably thoroughly overloading the server.
Do not exaggerate with the job concurrency because you're probably thoroughly overloading the server.
More details can be found [here](https://discord.com/channels/979116623879368755/994044917355663450/1174711719994605708)
:::
### My server shows Server Status Offline | Version Unknown. What can I do?
### Why is Immich using so much of my CPU?
You need to [enable WebSockets](/docs/administration/reverse-proxy/) on your reverse proxy.
When a large number of assets are uploaded to Immich, it makes sense that the CPU and RAM will be heavily used for machine learning work and creating image thumbnails.
Once this process is completed, the percentage of CPU usage will drop to around 3-5% usage
### My server shows Server Status Offline | Version Unknown what can I do?
You need to enable Websocket on your reverse proxy.
---
@@ -367,12 +333,6 @@ You need to [enable WebSockets](/docs/administration/reverse-proxy/) on your rev
Immich components are typically deployed using docker. To see logs for deployed docker containers, you can use the [Docker CLI](https://docs.docker.com/engine/reference/commandline/cli/), specifically the `docker logs` command. For examples, see [Docker Help](/docs/guides/docker-help.md).
### How can I reduce the log verbosity of Redis?
To decrease Redis logs, you can add the following line to the `redis:` section of the `docker-compose.yml`:
` command: redis-server --loglevel warning`
### How can I run Immich as a non-root user?
You can change the user in the container by setting the `user` argument in `docker-compose.yml` for each service.
@@ -382,13 +342,9 @@ You may need to add mount points or docker volumes for the following internal co
- `immich-machine-learning:/.cache`
- `redis:/data`
The non-root user/group needs read/write access to the volume mounts, including `UPLOAD_LOCATION` and `/cache` for machine-learning.
The non-root user/group needs read/write access to the volume mounts, including `UPLOAD_LOCATION`.
:::note Docker Compose Volumes
The Docker Compose top level volume element does not support non-root access, all of the above volumes must be local volume mounts.
:::
For a further hardened system, you can add the following block to every container.
For a further hardened system, you can add the following block to every container except for `immich_postgres`.
<details>
<summary>docker-compose.yml</summary>
@@ -437,28 +393,29 @@ If the error says the worker is exiting, then this is normal. This is a feature
There are a few reasons why this can happen.
If the error mentions SIGKILL or error code 137, it most likely means the service is running out of memory.
Consider either increasing the server's RAM or moving the service to a server with more RAM.
If the error mentions SIGKILL or error code 137, it most likely means the service is running out of memory. Consider either increasing the server's RAM or moving the service to a server with more RAM.
If it mentions SIGILL (note the lack of a K) or error code 132, it most likely means your server's CPU is incompatible with Immich.
If it mentions SIGILL (note the lack of a K) or error code 132, it most likely means your server's CPU is incompatible. This is unlikely to occur on version 1.92.0 or later. Consider upgrading if your version of Immich is below that.
If your version of Immich is below 1.92.0 and the crash occurs after logs about tracing or exporting a model, consider either upgrading or disabling the Tag Objects job.
## Database
### Why am I getting database ownership errors?
If you get database errors such as `FATAL: data directory "/var/lib/postgresql/data" has wrong ownership` upon database startup, this is likely due to an issue with your filesystem.
NTFS and ex/FAT/32 filesystems are not supported. See [here](/docs/install/requirements#special-requirements-for-windows-users) for more details.
NTFS and ex/FAT/32 filesystems are not supported. See [here](/docs/install/environment-variables#supported-filesystems) for more details.
### How can I verify the integrity of my database?
Database checksums are enabled by default for new installations since v1.104.0. You can check if they are enabled by running the following command.
If you installed Immich using v1.104.0 or later, you likely have database checksums enabled by default. You can check this by running the following command.
A result of `on` means that checksums are enabled.
<details>
<summary>Check if checksums are enabled</summary>
```bash
docker exec -it immich_postgres psql --dbname=postgres --username=<DB_USERNAME> --command="show data_checksums"
docker exec -it immich_postgres psql --dbname=immich --username=<DB_USERNAME> --command="show data_checksums"
data_checksums
----------------
on
@@ -467,13 +424,13 @@ docker exec -it immich_postgres psql --dbname=postgres --username=<DB_USERNAME>
</details>
If checksums are enabled, you can check the status of the database with the following command. A normal result is all `0`s.
If checksums are enabled, you can check the status of the database with the following command. A normal result is all zeroes.
<details>
<summary>Check for database corruption</summary>
```bash
docker exec -it immich_postgres psql --dbname=postgres --username=<DB_USERNAME> --command="SELECT datname, checksum_failures, checksum_last_failure FROM pg_stat_database WHERE datname IS NOT NULL"
docker exec -it immich_postgres psql --dbname=immich --username=<DB_USERNAME> --command="SELECT datname, checksum_failures, checksum_last_failure FROM pg_stat_database WHERE datname IS NOT NULL"
datname | checksum_failures | checksum_last_failure
-----------+-------------------+-----------------------
postgres | 0 |
@@ -485,29 +442,4 @@ docker exec -it immich_postgres psql --dbname=postgres --username=<DB_USERNAME>
</details>
You can also scan the Postgres database file structure for errors:
<details>
<summary>Scan for file structure errors</summary>
```bash
docker exec -it immich_postgres pg_amcheck --username=postgres --heapallindexed --parent-check --rootdescend --progress --all --install-missing
```
A normal result will end something like this and return with an exit code of `0`:
```bash
7470/8832 relations (84%), 730829/734735 pages (99%)
8425/8832 relations (95%), 734367/734735 pages (99%)
8832/8832 relations (100%), 734735/734735 pages (100%)
```
</details>
If corruption is detected, you should immediately make a backup before performing any other work in the database.
To do so, you may need to set the `zero_damaged_pages=on` flag for the database server to allow `pg_dumpall` to succeed.
After taking a backup, the recommended next step is to restore the database from a healthy backup before corruption was detected.
The damaged database dump can be used to manually recover any changes made since the last backup, if needed.
The causes of possible corruption are many, but can include unexpected poweroffs or unmounts, use of a network share for Postgres data, or a poor storage medium such an SD card or failing HDD/SSD.
[huggingface]: https://huggingface.co/immich-app

View File

@@ -5,10 +5,6 @@ import TabItem from '@theme/TabItem';
A [3-2-1 backup strategy](https://www.backblaze.com/blog/the-3-2-1-backup-strategy/) is recommended to protect your data. You should keep copies of your uploaded photos/videos as well as the Immich database for a comprehensive backup solution. This page provides an overview on how to backup the database and the location of user-uploaded pictures and videos. A template bash script that can be run as a cron job is provided [here](/docs/guides/template-backup-script.md)
:::danger
The instructions on this page show you how to prepare your Immich instance to be backed up, and which files to take a backup of. You still need to take care of using an actual backup tool to make a backup yourself.
:::
## Database
:::caution
@@ -19,24 +15,12 @@ Immich saves [file paths in the database](https://github.com/immich-app/immich/d
Refer to the official [postgres documentation](https://www.postgresql.org/docs/current/backup.html) for details about backing up and restoring a postgres database.
:::
The recommended way to backup and restore the Immich database is to use the `pg_dumpall` command. When restoring, you need to delete the `DB_DATA_LOCATION` folder (if it exists) to reset the database.
:::caution
It is not recommended to directly backup the `DB_DATA_LOCATION` folder. Doing so while the database is running can lead to a corrupted backup that cannot be restored.
:::
### Automatic Database Backups
For convenience, Immich will automatically create database backups by default. The backups are stored in `UPLOAD_LOCATION/backups`.
As mentioned above, you should make your own backup of these together with the asset folders as noted below.
You can adjust the schedule and amount of kept backups in the [admin settings](http://my.immich.app/admin/system-settings?isOpen=backup).
By default, Immich will keep the last 14 backups and create a new backup every day at 2:00 AM.
#### Restoring
We hope to make restoring simpler in future versions, for now you can find the backups in the `UPLOAD_LOCATION/backups` folder on your host.
Then please follow the steps in the following section for restoring the database.
### Manual Backup and Restore
<Tabs>
<TabItem value="Linux system" label="Linux system" default>
@@ -45,64 +29,92 @@ docker exec -t immich_postgres pg_dumpall --clean --if-exists --username=postgre
```
```bash title='Restore'
docker compose down -v # CAUTION! Deletes all Immich data to start from scratch
## Uncomment the next line and replace DB_DATA_LOCATION with your Postgres path to permanently reset the Postgres database
# rm -rf DB_DATA_LOCATION # CAUTION! Deletes all Immich data to start from scratch
docker compose pull # Update to latest version of Immich (if desired)
docker compose create # Create Docker containers for Immich apps without running them
docker compose down -v # CAUTION! Deletes all Immich data to start from scratch.
# rm -rf DB_DATA_LOCATION # CAUTION! Deletes all Immich data to start from scratch.
docker compose pull # Update to latest version of Immich (if desired)
docker compose create # Create Docker containers for Immich apps without running them.
docker start immich_postgres # Start Postgres server
sleep 10 # Wait for Postgres server to start up
# Check the database user if you deviated from the default
sleep 10 # Wait for Postgres server to start up
gunzip < "/path/to/backup/dump.sql.gz" \
| sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" \
| docker exec -i immich_postgres psql --dbname=postgres --username=<DB_USERNAME> # Restore Backup
docker compose up -d # Start remainder of Immich apps
| docker exec -i immich_postgres psql --username=postgres # Restore Backup
docker compose up -d # Start remainder of Immich apps
```
</TabItem>
<TabItem value="Windows system (PowerShell)" label="Windows system (PowerShell)">
```powershell title='Backup'
[System.IO.File]::WriteAllLines("C:\absolute\path\to\backup\dump.sql", (docker exec -t immich_postgres pg_dumpall --clean --if-exists --username=postgres))
docker exec -t immich_postgres pg_dumpall --clean --if-exists --username=postgres > "\path\to\backup\dump.sql"
```
```powershell title='Restore'
docker compose down -v # CAUTION! Deletes all Immich data to start from scratch
## Uncomment the next line and replace DB_DATA_LOCATION with your Postgres path to permanently reset the Postgres database
# Remove-Item -Recurse -Force DB_DATA_LOCATION # CAUTION! Deletes all Immich data to start from scratch
## You should mount the backup (as a volume, example: `- 'C:\path\to\backup\dump.sql:/dump.sql'`) into the immich_postgres container using the docker-compose.yml
docker compose pull # Update to latest version of Immich (if desired)
docker compose create # Create Docker containers for Immich apps without running them
docker start immich_postgres # Start Postgres server
sleep 10 # Wait for Postgres server to start up
docker exec -it immich_postgres bash # Enter the Docker shell and run the following command
# Check the database user if you deviated from the default. If your backup ends in `.gz`, replace `cat` with `gunzip`
cat < "/dump.sql" \
| sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" \
| psql --dbname=postgres --username=<DB_USERNAME> # Restore Backup
exit # Exit the Docker shell
docker compose up -d # Start remainder of Immich apps
docker compose down -v # CAUTION! Deletes all Immich data to start from scratch.
# Remove-Item -Recurse -Force DB_DATA_LOCATION # CAUTION! Deletes all Immich data to start from scratch.
docker compose pull # Update to latest version of Immich (if desired)
docker compose create # Create Docker containers for Immich apps without running them.
docker start immich_postgres # Start Postgres server
sleep 10 # Wait for Postgres server to start up
gc "C:\path\to\backup\dump.sql" | docker exec -i immich_postgres psql --username=postgres # Restore Backup
docker compose up -d # Start remainder of Immich apps
```
</TabItem>
</Tabs>
Note that for the database restore to proceed properly, it requires a completely fresh install (i.e. the Immich server has never run since creating the Docker containers). If the Immich app has run, Postgres conflicts may be encountered upon database restoration (relation already exists, violated foreign key constraints, multiple primary keys, etc.), in which case you need to delete the `DB_DATA_LOCATION` folder to reset the database.
Note that for the database restore to proceed properly, it requires a completely fresh install (i.e. the Immich server has never run since creating the Docker containers). If the Immich app has run, Postgres conflicts may be encountered upon database restoration (relation already exists, violated foreign key constraints, multiple primary keys, etc.).
:::tip
Some deployment methods make it difficult to start the database without also starting the server. In these cases, you may set the environment variable `DB_SKIP_MIGRATIONS=true` before starting the services. This will prevent the server from running migrations that interfere with the restore process. Be sure to remove this variable and restart the services after the database is restored.
Some deployment methods make it difficult to start the database without also starting the server or microservices. In these cases, you may set the environmental variable `DB_SKIP_MIGRATIONS=true` before starting the services. This will prevent the server from running migrations that interfere with the restore process. Note that both the server and microservices must have this variable set to prevent the migrations from running. Be sure to remove this variable and restart the services after the database is restored.
:::
The database dumps can also be automated (using [this image](https://github.com/prodrigestivill/docker-postgres-backup-local)) by editing the docker compose file to match the following:
```yaml
services:
...
backup:
container_name: immich_db_dumper
image: prodrigestivill/postgres-backup-local:14
env_file:
- .env
environment:
POSTGRES_HOST: database
POSTGRES_CLUSTER: 'TRUE'
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_DB: ${DB_DATABASE_NAME}
SCHEDULE: "@daily"
POSTGRES_EXTRA_OPTS: '--clean --if-exists'
BACKUP_DIR: /db_dumps
volumes:
- ./db_dumps:/db_dumps
depends_on:
- database
```
Then you can restore with the same command but pointed at the latest dump.
```bash title='Automated Restore'
gunzip < db_dumps/last/immich-latest.sql.gz \
| sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" \
| docker exec -i immich_postgres psql --username=postgres
```
:::note
If you see the error `ERROR: type "earth" does not exist`, or you have problems with Reverse Geocoding after a restore, add the following `sed` fragment to your restore command.
Example: `gunzip < "/path/to/backup/dump.sql.gz" | sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" | docker exec -i immich_postgres psql --username=postgres`
:::
## Filesystem
Immich stores two types of content in the filesystem: (a) original, unmodified assets (photos and videos), and (b) generated content. We recommend backing up the entire contents of `UPLOAD_LOCATION`, but only the original content is critical, which is stored in the following folders:
Immich stores two types of content in the filesystem: (1) original, unmodified assets (photos and videos), and (2) generated content. Only the original content needs to be backed-up, which is stored in the following folders:
1. `UPLOAD_LOCATION/library`
2. `UPLOAD_LOCATION/upload`
3. `UPLOAD_LOCATION/profile`
If you choose to back up only those folders, you will need to rerun the transcoding and thumbnail generation jobs for all assets after you restore from a backup.
:::caution
If you moved some of these folders onto a different storage device, such as `profile/`, make sure to adjust the backup path to match your setup
:::
@@ -136,21 +148,9 @@ for more info read the [release notes](https://github.com/immich-app/immich/rele
- Preview images (small thumbnails and large previews) for each asset and thumbnails for recognized faces.
- Stored in `UPLOAD_LOCATION/thumbs/<userID>`.
- **Encoded Assets:**
- Videos that have been re-encoded from the original for wider compatibility. The original is not removed.
- Stored in `UPLOAD_LOCATION/encoded-video/<userID>`.
- **Postgres**
- The Immich database containing all the information to allow the system to function properly.
**Note:** This folder will only appear to users who have made the changes mentioned in [v1.102.0](https://github.com/immich-app/immich/discussions/8930) (an optional, non-mandatory change) or who started with this version.
- Stored in `DB_DATA_LOCATION`.
:::danger
A backup of this folder does not constitute a backup of your database!
Follow the instructions listed [here](/docs/administration/backup-and-restore#database) to learn how to perform a proper backup.
:::
</TabItem>
<TabItem value="Storage Template On" label="Storage Template On">
@@ -178,7 +178,7 @@ When you turn off the storage template engine, it will leave the assets in `UPLO
- Stored in `UPLOAD_LOCATION/profile/<userID>`.
- **Thumbs Images:**
- Preview images (blurred, small, large) for each asset and thumbnails for recognized faces.
- Stored in `UPLOAD_LOCATION/thumbs/<userID>`.
- Stored in `UPLOCAD_LOCATION/thumbs/<userID>`.
- **Encoded Assets:**
- Videos that have been re-encoded from the original for wider compatibility. The original is not removed.
- Stored in `UPLOAD_LOCATION/encoded-video/<userID>`.
@@ -186,22 +186,11 @@ When you turn off the storage template engine, it will leave the assets in `UPLO
- Files uploaded through mobile apps.
- Temporarily located in `UPLOAD_LOCATION/upload/<userID>`.
- Transferred to `UPLOAD_LOCATION/library/<userID>` upon successful upload.
- **Postgres**
- The Immich database containing all the information to allow the system to function properly.
**Note:** This folder will only appear to users who have made the changes mentioned in [v1.102.0](https://github.com/immich-app/immich/discussions/8930) (an optional, non-mandatory change) or who started with this version.
- Stored in `DB_DATA_LOCATION`.
:::danger
A backup of this folder does not constitute a backup of your database!
Follow the instructions listed [here](/docs/administration/backup-and-restore#database) to learn how to perform a proper backup.
:::
</TabItem>
</Tabs>
:::danger
Do not touch the files inside these folders under any circumstances except taking a backup. Changing or removing an asset can cause untracked and missing files.
Do not touch the files inside these folders under any circumstances except taking a backup, changing or removing an asset can cause untracked and missing files.
You can think of it as App-Which-Must-Not-Be-Named, the only access to viewing, changing and deleting assets is only through the mobile or browser interface.
:::

View File

@@ -1,27 +0,0 @@
# Email Notifications
Immich supports the option to send notifications via Email for the following events:
- Creating a new user
- Notifying a user when they get added to a shared album
- Informing other users about the addition of new assets to a shared album
## SMTP settings
You can access the settings panel from the web at `Administration -> Settings -> Notification settings`.
Under Email, enter the required details to connect with an SMTP server.
You can use [this guide](/docs/guides/smtp-gmail) to use Gmail's SMTP server.
## User's notifications settings
Users can manage their email notification settings from their account settings page on the web. They can choose to turn email notifications on or off for the following events:
<img src={require('./img/user-notifications-settings.webp').default} width="80%" title="User notification settings" />
## Notification templates
You can override the default notification text with custom templates in HTML format. You can use tags to show dynamic tags in your templates.
<img src={require('./img/user-notifications-templates.webp').default} width="80%" title="User notification templates" />

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 167 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 68 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.9 KiB

Some files were not shown because too many files have changed in this diff Show More