Compare commits

..

1 Commits

Author SHA1 Message Date
bo0tzz
c24830acbf fix: do not cancel running docs builds 2024-10-02 10:24:20 +02:00
1979 changed files with 89422 additions and 129653 deletions

View File

@@ -1,2 +0,0 @@
.env
library

View File

@@ -1,16 +0,0 @@
ARG BASEIMAGE=mcr.microsoft.com/devcontainers/typescript-node:22@sha256:2ef23730ec68d8511ec8e6e0b82550ca728b256805d81f60ed890f3bfb21cfb9
FROM ${BASEIMAGE}
# Flutter SDK
# https://flutter.dev/docs/development/tools/sdk/releases?tab=linux
ENV FLUTTER_CHANNEL="stable"
ENV FLUTTER_VERSION="3.29.1"
ENV FLUTTER_HOME=/flutter
ENV PATH=${PATH}:${FLUTTER_HOME}/bin
# Flutter SDK
RUN mkdir -p ${FLUTTER_HOME} \
&& curl -C - --output flutter.tar.xz https://storage.googleapis.com/flutter_infra_release/releases/${FLUTTER_CHANNEL}/linux/flutter_linux_${FLUTTER_VERSION}-${FLUTTER_CHANNEL}.tar.xz \
&& tar -xf flutter.tar.xz --strip-components=1 -C ${FLUTTER_HOME} \
&& rm flutter.tar.xz \
&& chown -R 1000:1000 ${FLUTTER_HOME}

View File

@@ -1,26 +0,0 @@
{
"name": "Immich",
"service": "immich-devcontainer",
"dockerComposeFile": [
"docker-compose.yml",
"../docker/docker-compose.dev.yml"
],
"customizations": {
"vscode": {
"extensions": [
"Dart-Code.dart-code",
"Dart-Code.flutter",
"dbaeumer.vscode-eslint",
"dcmdev.dcm-vscode-extension",
"esbenp.prettier-vscode",
"svelte.svelte-vscode"
]
}
},
"forwardPorts": [],
"initializeCommand": "bash .devcontainer/scripts/initializeCommand.sh",
"onCreateCommand": "bash .devcontainer/scripts/onCreateCommand.sh",
"overrideCommand": true,
"workspaceFolder": "/immich",
"remoteUser": "node"
}

View File

@@ -1,8 +0,0 @@
services:
immich-devcontainer:
build:
dockerfile: Dockerfile
extra_hosts:
- 'host.docker.internal:host-gateway'
volumes:
- ..:/immich:cached

View File

@@ -1,6 +0,0 @@
#!/bin/bash
# If .env file does not exist, create it by copying example.env from the docker folder
if [ ! -f ".devcontainer/.env" ]; then
cp docker/example.env .devcontainer/.env
fi

View File

@@ -1,25 +0,0 @@
#!/bin/bash
# Enable multiarch for arm64 if necessary
if [ "$(dpkg --print-architecture)" = "arm64" ]; then
sudo dpkg --add-architecture amd64 && \
sudo apt-get update && \
sudo apt-get install -y --no-install-recommends \
qemu-user-static \
libc6:amd64 \
libstdc++6:amd64 \
libgcc1:amd64
fi
# Install DCM
wget -qO- https://dcm.dev/pgp-key.public | sudo gpg --dearmor -o /usr/share/keyrings/dcm.gpg
sudo echo 'deb [signed-by=/usr/share/keyrings/dcm.gpg arch=amd64] https://dcm.dev/debian stable main' | sudo tee /etc/apt/sources.list.d/dart_stable.list
sudo apt-get update
sudo apt-get install dcm
dart --disable-analytics
# Install immich
cd /immich || exit
make install-all

1
.github/.nvmrc vendored
View File

@@ -1 +0,0 @@
22.14.0

View File

@@ -1,5 +1,5 @@
title: '[Feature] feature-name-goes-here' title: "[Feature] feature-name-goes-here"
labels: ['feature'] labels: ["feature"]
body: body:
- type: markdown - type: markdown
@@ -11,9 +11,9 @@ body:
- type: checkboxes - type: checkboxes
attributes: attributes:
label: I have searched the existing feature requests, both open and closed, to make sure this is not a duplicate request. label: I have searched the existing feature requests to make sure this is not a duplicate request.
options: options:
- label: 'Yes' - label: "Yes"
required: true required: true
- type: textarea - type: textarea

2
.github/FUNDING.yml vendored
View File

@@ -1 +1 @@
custom: ['https://buy.immich.app', 'https://immich.store'] custom: ['https://buy.immich.app']

View File

@@ -1,13 +1,6 @@
name: Report an issue with Immich name: Report an issue with Immich
description: Report an issue with Immich description: Report an issue with Immich
body: body:
- type: checkboxes
attributes:
label: I have searched the existing issues, both open and closed, to make sure this is not a duplicate report.
options:
- label: 'Yes'
required: true
- type: markdown - type: markdown
attributes: attributes:
value: | value: |
@@ -84,7 +77,7 @@ body:
id: repro id: repro
attributes: attributes:
label: Reproduction steps label: Reproduction steps
description: 'How do you trigger this bug? Please walk us through it step by step.' description: "How do you trigger this bug? Please walk us through it step by step."
value: | value: |
1. 1.
2. 2.
@@ -97,13 +90,12 @@ body:
id: logs id: logs
attributes: attributes:
label: Relevant log output label: Relevant log output
description: description: Please copy and paste any relevant logs below. (code formatting is
Please copy and paste any relevant logs below. (code formatting is
enabled, no need for backticks) enabled, no need for backticks)
render: shell render: shell
validations: validations:
required: false required: false
- type: textarea - type: textarea
attributes: attributes:
label: Additional information label: Additional information

View File

@@ -1 +1,2 @@
blank_issues_enabled: false
blank_pull_request_template_enabled: false blank_pull_request_template_enabled: false

View File

@@ -0,0 +1,22 @@
## Description
<!--- Describe your changes in detail -->
<!--- Why is this change required? What problem does it solve? -->
<!--- If it fixes an open issue, please link to the issue here. -->
Fixes # (issue)
## How Has This Been Tested?
<!-- Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration -->
- [ ] Test A
- [ ] Test B
## Screenshots (if appropriate):
## Checklist:
- [ ] I have performed a self-review of my own code
- [ ] I have made corresponding changes to the documentation if applicable

7
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,7 @@
version: 2
updates:
# Maintain dependencies for GitHub Actions
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"

28
.github/package-lock.json generated vendored
View File

@@ -1,28 +0,0 @@
{
"name": ".github",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"devDependencies": {
"prettier": "^3.5.3"
}
},
"node_modules/prettier": {
"version": "3.5.3",
"resolved": "https://registry.npmjs.org/prettier/-/prettier-3.5.3.tgz",
"integrity": "sha512-QQtaxnoDJeAkDvDKWCLiwIXkTgRhwYDEQCghU9Z6q03iyek/rxRh/2lC3HB7P8sWT2xC/y5JDctPLBIGzHKbhw==",
"dev": true,
"license": "MIT",
"bin": {
"prettier": "bin/prettier.cjs"
},
"engines": {
"node": ">=14"
},
"funding": {
"url": "https://github.com/prettier/prettier?sponsor=1"
}
}
}
}

View File

@@ -1,9 +0,0 @@
{
"scripts": {
"format": "prettier --check .",
"format:fix": "prettier --write ."
},
"devDependencies": {
"prettier": "^3.5.3"
}
}

View File

@@ -1,36 +0,0 @@
## Description
<!--- Describe your changes in detail -->
<!--- Why is this change required? What problem does it solve? -->
<!--- If it fixes an open issue, please link to the issue here. -->
Fixes # (issue)
## How Has This Been Tested?
<!-- Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration -->
- [ ] Test A
- [ ] Test B
<details><summary><h2>Screenshots (if appropriate)</h2></summary>
<!-- Images go below this line. -->
</details>
<!-- API endpoint changes (if relevant)
## API Changes
The `/api/something` endpoint is now `/api/something-else`
-->
## Checklist:
- [ ] I have performed a self-review of my own code
- [ ] I have made corresponding changes to the documentation if applicable
- [ ] I have no unrelated changes in the PR.
- [ ] I have confirmed that any new dependencies are strictly necessary.
- [ ] I have written tests for new code (if applicable)
- [ ] I have followed naming conventions/patterns in the surrounding code
- [ ] All code in `src/services/` uses repositories implementations for database calls, filesystem operations, etc.
- [ ] All code in `src/repositories/` is pretty basic/simple and does not have any immich specific logic (that belongs in `src/services/`)

62
.github/release.yml vendored
View File

@@ -1,33 +1,29 @@
changelog: changelog:
categories: categories:
- title: 🚨 Breaking Changes - title: 🚨 Breaking Changes
labels: labels:
- changelog:breaking-change - changelog:breaking-change
- title: 🫥 Deprecated Changes - title: 🔒 Security
labels: labels:
- changelog:deprecated - changelog:security
- title: 🔒 Security - title: 🚀 Features
labels: labels:
- changelog:security - changelog:feature
- title: 🚀 Features - title: 🌟 Enhancements
labels: labels:
- changelog:feature - changelog:enhancement
- title: 🌟 Enhancements - title: 🐛 Bug fixes
labels: labels:
- changelog:enhancement - changelog:bugfix
- title: 🐛 Bug fixes - title: 📚 Documentation
labels: labels:
- changelog:bugfix - changelog:documentation
- title: 📚 Documentation - title: 🌐 Translations
labels: labels:
- changelog:documentation - changelog:translation
- title: 🌐 Translations
labels:
- changelog:translation

View File

@@ -22,18 +22,16 @@ jobs:
should_run: ${{ steps.found_paths.outputs.mobile == 'true' || steps.should_force.outputs.should_force == 'true' }} should_run: ${{ steps.found_paths.outputs.mobile == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 uses: actions/checkout@v4
- id: found_paths - id: found_paths
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3 uses: dorny/paths-filter@v3
with: with:
filters: | filters: |
mobile: mobile:
- 'mobile/**' - 'mobile/**'
workflow:
- '.github/workflows/build-mobile.yml'
- name: Check if we should force jobs to run - name: Check if we should force jobs to run
id: should_force id: should_force
run: echo "should_force=${{ steps.found_paths.outputs.workflow == 'true' || github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' }}" >> "$GITHUB_OUTPUT" run: echo "should_force=${{ github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' }}" >> "$GITHUB_OUTPUT"
build-sign-android: build-sign-android:
name: Build and sign Android name: Build and sign Android
@@ -51,18 +49,18 @@ jobs:
ref="${input_ref:-$github_ref}" ref="${input_ref:-$github_ref}"
echo "ref=$ref" >> $GITHUB_OUTPUT echo "ref=$ref" >> $GITHUB_OUTPUT
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - uses: actions/checkout@v4
with: with:
ref: ${{ steps.get-ref.outputs.ref }} ref: ${{ steps.get-ref.outputs.ref }}
- uses: actions/setup-java@3a4f6e1af504cf6a31855fa899c6aa5355ba6c12 # v4 - uses: actions/setup-java@v4
with: with:
distribution: 'zulu' distribution: 'zulu'
java-version: '17' java-version: '17'
cache: 'gradle' cache: 'gradle'
- name: Setup Flutter SDK - name: Setup Flutter SDK
uses: subosito/flutter-action@e938fdf56512cc96ef2f93601a5a40bde3801046 # v2 uses: subosito/flutter-action@v2
with: with:
channel: 'stable' channel: 'stable'
flutter-version-file: ./mobile/pubspec.yaml flutter-version-file: ./mobile/pubspec.yaml
@@ -89,7 +87,7 @@ jobs:
flutter build apk --release --split-per-abi --target-platform android-arm,android-arm64,android-x64 flutter build apk --release --split-per-abi --target-platform android-arm,android-arm64,android-x64
- name: Publish Android Artifact - name: Publish Android Artifact
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 uses: actions/upload-artifact@v4
with: with:
name: release-apk-signed name: release-apk-signed
path: mobile/build/app/outputs/flutter-apk/*.apk path: mobile/build/app/outputs/flutter-apk/*.apk

View File

@@ -14,7 +14,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 uses: actions/checkout@v4
- name: Cleanup - name: Cleanup
run: | run: |

View File

@@ -6,6 +6,7 @@ on:
- 'cli/**' - 'cli/**'
- '.github/workflows/cli.yml' - '.github/workflows/cli.yml'
pull_request: pull_request:
branches: [main]
paths: paths:
- 'cli/**' - 'cli/**'
- '.github/workflows/cli.yml' - '.github/workflows/cli.yml'
@@ -28,9 +29,9 @@ jobs:
working-directory: ./cli working-directory: ./cli
steps: steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - uses: actions/checkout@v4
# Setup .npmrc file to publish to npm # Setup .npmrc file to publish to npm
- uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4 - uses: actions/setup-node@v4
with: with:
node-version-file: './cli/.nvmrc' node-version-file: './cli/.nvmrc'
registry-url: 'https://registry.npmjs.org' registry-url: 'https://registry.npmjs.org'
@@ -52,16 +53,16 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 uses: actions/checkout@v4
- name: Set up QEMU - name: Set up QEMU
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0 uses: docker/setup-qemu-action@v3.2.0
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0 uses: docker/setup-buildx-action@v3.6.1
- name: Login to GitHub Container Registry - name: Login to GitHub Container Registry
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 uses: docker/login-action@v3
if: ${{ !github.event.pull_request.head.repo.fork }} if: ${{ !github.event.pull_request.head.repo.fork }}
with: with:
registry: ghcr.io registry: ghcr.io
@@ -76,7 +77,7 @@ jobs:
- name: Generate docker image tags - name: Generate docker image tags
id: metadata id: metadata
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5 uses: docker/metadata-action@v5
with: with:
flavor: | flavor: |
latest=false latest=false
@@ -87,7 +88,7 @@ jobs:
type=raw,value=latest,enable=${{ github.event_name == 'release' }} type=raw,value=latest,enable=${{ github.event_name == 'release' }}
- name: Build and push image - name: Build and push image
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4 # v6.15.0 uses: docker/build-push-action@v6.9.0
with: with:
file: cli/Dockerfile file: cli/Dockerfile
platforms: linux/amd64,linux/arm64 platforms: linux/amd64,linux/arm64

View File

@@ -9,14 +9,14 @@
# the `language` matrix defined below to confirm you have the correct set of # the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages. # supported CodeQL languages.
# #
name: 'CodeQL' name: "CodeQL"
on: on:
push: push:
branches: ['main'] branches: [ "main" ]
pull_request: pull_request:
# The branches below must be a subset of the branches above # The branches below must be a subset of the branches above
branches: ['main'] branches: [ "main" ]
schedule: schedule:
- cron: '20 13 * * 1' - cron: '20 13 * * 1'
@@ -36,42 +36,43 @@ jobs:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
language: ['javascript', 'python'] language: [ 'javascript', 'python' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
# Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 uses: actions/checkout@v4
# Initializes the CodeQL tools for scanning. # Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL - name: Initialize CodeQL
uses: github/codeql-action/init@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3 uses: github/codeql-action/init@v3
with: with:
languages: ${{ matrix.language }} languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file. # If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file. # By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file. # Prefix the list here with "+" to use these queries and those in the config file.
# Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
# queries: security-extended,security-and-quality # queries: security-extended,security-and-quality
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3
# Command-line programs to run using the OS shell. # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun # If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v3
# If the Autobuild fails above, remove it and uncomment the following three lines. # Command-line programs to run using the OS shell.
# modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
# - run: | # If the Autobuild fails above, remove it and uncomment the following three lines.
# echo "Run, Build Application using script" # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
# ./location_of_script_within_repo/buildscript.sh
- name: Perform CodeQL Analysis # - run: |
uses: github/codeql-action/analyze@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3 # echo "Run, Build Application using script"
with: # ./location_of_script_within_repo/buildscript.sh
category: '/language:${{matrix.language}}'
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
with:
category: "/language:${{matrix.language}}"

73
.github/workflows/docker-cleanup.yml vendored Normal file
View File

@@ -0,0 +1,73 @@
# This workflow runs on certain conditions to check for and potentially
# delete container images from the GHCR which no longer have an associated
# code branch.
# Requires a PAT with the correct scope set in the secrets.
#
# This workflow will not trigger runs on forked repos.
name: Docker Cleanup
on:
pull_request:
types:
- "closed"
push:
paths:
- ".github/workflows/docker-cleanup.yml"
concurrency:
group: registry-tags-cleanup
cancel-in-progress: false
jobs:
cleanup-images:
name: Cleanup Stale Images Tags for ${{ matrix.primary-name }}
runs-on: ubuntu-24.04
strategy:
fail-fast: false
matrix:
include:
- primary-name: "immich-server"
- primary-name: "immich-machine-learning"
env:
# Requires a personal access token with the OAuth scope delete:packages
TOKEN: ${{ secrets.PACKAGE_DELETE_TOKEN }}
steps:
- name: Clean temporary images
if: "${{ env.TOKEN != '' }}"
uses: stumpylog/image-cleaner-action/ephemeral@v0.8.0
with:
token: "${{ env.TOKEN }}"
owner: "immich-app"
is_org: "true"
do_delete: "true"
package_name: "${{ matrix.primary-name }}"
scheme: "pull_request"
repo_name: "immich"
match_regex: '^pr-(\d+)$|^(\d+)$'
cleanup-untagged-images:
name: Cleanup Untagged Images Tags for ${{ matrix.primary-name }}
runs-on: ubuntu-24.04
needs:
- cleanup-images
strategy:
fail-fast: false
matrix:
include:
- primary-name: "immich-server"
- primary-name: "immich-machine-learning"
- primary-name: "immich-build-cache"
env:
# Requires a personal access token with the OAuth scope delete:packages
TOKEN: ${{ secrets.PACKAGE_DELETE_TOKEN }}
steps:
- name: Clean untagged images
if: "${{ env.TOKEN != '' }}"
uses: stumpylog/image-cleaner-action/untagged@v0.8.0
with:
token: "${{ env.TOKEN }}"
owner: "immich-app"
do_delete: "true"
is_org: "true"
package_name: "${{ matrix.primary-name }}"

View File

@@ -5,6 +5,7 @@ on:
push: push:
branches: [main] branches: [main]
pull_request: pull_request:
branches: [main]
release: release:
types: [published] types: [published]
@@ -23,24 +24,21 @@ jobs:
should_run_ml: ${{ steps.found_paths.outputs.machine-learning == 'true' || steps.should_force.outputs.should_force == 'true' }} should_run_ml: ${{ steps.found_paths.outputs.machine-learning == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 uses: actions/checkout@v4
- id: found_paths - id: found_paths
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3 uses: dorny/paths-filter@v3
with: with:
filters: | filters: |
server: server:
- 'server/**' - 'server/**'
- 'openapi/**' - 'openapi/**'
- 'web/**' - 'web/**'
- 'i18n/**'
machine-learning: machine-learning:
- 'machine-learning/**' - 'machine-learning/**'
workflow:
- '.github/workflows/docker.yml'
- name: Check if we should force jobs to run - name: Check if we should force jobs to run
id: should_force id: should_force
run: echo "should_force=${{ steps.found_paths.outputs.workflow == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'release' }}" >> "$GITHUB_OUTPUT" run: echo "should_force=${{ github.event_name == 'workflow_dispatch' || github.event_name == 'release' }}" >> "$GITHUB_OUTPUT"
retag_ml: retag_ml:
name: Re-Tag ML name: Re-Tag ML
@@ -49,23 +47,21 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
suffix: ['', '-cuda', '-rocm', '-openvino', '-armnn', '-rknn'] suffix: ["", "-cuda", "-openvino", "-armnn"]
steps: steps:
- name: Login to GitHub Container Registry - name: Login to GitHub Container Registry
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 uses: docker/login-action@v3
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.repository_owner }} username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
- name: Re-tag image - name: Re-tag image
run: | run: |
REGISTRY_NAME="ghcr.io" REGISTRY_NAME="ghcr.io"
REPOSITORY=${{ github.repository_owner }}/immich-machine-learning REPOSITORY=${{ github.repository_owner }}/immich-machine-learning
TAG_OLD=main${{ matrix.suffix }} TAG_OLD=main${{ matrix.suffix }}
TAG_PR=${{ github.event.number == 0 && github.ref_name || format('pr-{0}', github.event.number) }}${{ matrix.suffix }} TAG_NEW=${{ github.event.number == 0 && github.ref_name || format('pr-{0}', github.event.number) }}${{ matrix.suffix }}
TAG_COMMIT=commit-${{ github.event_name != 'pull_request' && github.sha || github.event.pull_request.head.sha }}${{ matrix.suffix }} docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_NEW $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_PR $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_COMMIT $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
retag_server: retag_server:
name: Re-Tag Server name: Re-Tag Server
@@ -74,10 +70,10 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
suffix: [''] suffix: [""]
steps: steps:
- name: Login to GitHub Container Registry - name: Login to GitHub Container Registry
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 uses: docker/login-action@v3
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.repository_owner }} username: ${{ github.repository_owner }}
@@ -87,110 +83,107 @@ jobs:
REGISTRY_NAME="ghcr.io" REGISTRY_NAME="ghcr.io"
REPOSITORY=${{ github.repository_owner }}/immich-server REPOSITORY=${{ github.repository_owner }}/immich-server
TAG_OLD=main${{ matrix.suffix }} TAG_OLD=main${{ matrix.suffix }}
TAG_PR=${{ github.event.number == 0 && github.ref_name || format('pr-{0}', github.event.number) }}${{ matrix.suffix }} TAG_NEW=${{ github.event.number == 0 && github.ref_name || format('pr-{0}', github.event.number) }}${{ matrix.suffix }}
TAG_COMMIT=commit-${{ github.event_name != 'pull_request' && github.sha || github.event.pull_request.head.sha }}${{ matrix.suffix }} docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_NEW $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_PR $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_COMMIT $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
build_and_push_ml: build_and_push_ml:
name: Build and Push ML name: Build and Push ML
needs: pre-job needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_ml == 'true' }} if: ${{ needs.pre-job.outputs.should_run_ml == 'true' }}
runs-on: ${{ matrix.runner }} runs-on: ubuntu-latest
env: env:
image: immich-machine-learning image: immich-machine-learning
context: machine-learning context: machine-learning
file: machine-learning/Dockerfile file: machine-learning/Dockerfile
GHCR_REPO: ghcr.io/${{ github.repository_owner }}/immich-machine-learning
strategy: strategy:
# Prevent a failure in one image from stopping the other builds # Prevent a failure in one image from stopping the other builds
fail-fast: false fail-fast: false
matrix: matrix:
include: include:
- platform: linux/amd64 - platforms: linux/amd64,linux/arm64
runner: ubuntu-latest
device: cpu device: cpu
- platform: linux/arm64 - platforms: linux/amd64
runner: ubuntu-24.04-arm
device: cpu
- platform: linux/amd64
runner: ubuntu-latest
device: cuda device: cuda
suffix: -cuda suffix: -cuda
- platform: linux/amd64 - platforms: linux/amd64
runner: mich
device: rocm
suffix: -rocm
- platform: linux/amd64
runner: ubuntu-latest
device: openvino device: openvino
suffix: -openvino suffix: -openvino
- platform: linux/arm64 - platforms: linux/arm64
runner: ubuntu-24.04-arm
device: armnn device: armnn
suffix: -armnn suffix: -armnn
- platform: linux/arm64
runner: ubuntu-24.04-arm
device: rknn
suffix: -rknn
steps: steps:
- name: Prepare
run: |
platform=${{ matrix.platform }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
- name: Checkout - name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3.2.0
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0 uses: docker/setup-buildx-action@v3.6.1
- name: Login to Docker Hub
# Only push to Docker Hub when making a release
if: ${{ github.event_name == 'release' }}
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GitHub Container Registry - name: Login to GitHub Container Registry
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 uses: docker/login-action@v3
# Skip when PR from a fork
if: ${{ !github.event.pull_request.head.repo.fork }} if: ${{ !github.event.pull_request.head.repo.fork }}
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.repository_owner }} username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
- name: Generate cache key suffix - name: Generate docker image tags
run: | id: metadata
if [[ "${{ github.event_name }}" == "pull_request" ]]; then uses: docker/metadata-action@v5
echo "CACHE_KEY_SUFFIX=pr-${{ github.event.number }}" >> $GITHUB_ENV with:
else flavor: |
echo "CACHE_KEY_SUFFIX=$(echo ${{ github.ref_name }} | sed 's/[^a-zA-Z0-9]/-/g')" >> $GITHUB_ENV # Disable latest tag
fi latest=false
images: |
name=ghcr.io/${{ github.repository_owner }}/${{env.image}}
name=altran1502/${{env.image}},enable=${{ github.event_name == 'release' }}
tags: |
# Tag with branch name
type=ref,event=branch,suffix=${{ matrix.suffix }}
# Tag with pr-number
type=ref,event=pr,suffix=${{ matrix.suffix }}
# Tag with git tag on release
type=ref,event=tag,suffix=${{ matrix.suffix }}
type=raw,value=release,enable=${{ github.event_name == 'release' }},suffix=${{ matrix.suffix }}
- name: Generate cache target - name: Determine build cache output
id: cache-target id: cache-target
run: | run: |
if [[ "${{ github.event.pull_request.head.repo.fork }}" == "true" ]]; then if [[ "${{ github.event_name }}" == "pull_request" ]]; then
# Essentially just ignore the cache output (forks can't write to registry cache) # Essentially just ignore the cache output (PR can't write to registry cache)
echo "cache-to=type=local,dest=/tmp/discard,ignore-error=true" >> $GITHUB_OUTPUT echo "cache-to=type=local,dest=/tmp/discard,ignore-error=true" >> $GITHUB_OUTPUT
else else
echo "cache-to=type=registry,ref=${{ env.GHCR_REPO }}-build-cache:${{ env.PLATFORM_PAIR }}-${{ matrix.device }}-${{ env.CACHE_KEY_SUFFIX }},mode=max,compression=zstd" >> $GITHUB_OUTPUT echo "cache-to=type=registry,mode=max,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{ env.image }}" >> $GITHUB_OUTPUT
fi fi
- name: Build and push image - name: Build and push image
id: build uses: docker/build-push-action@v6.9.0
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4 # v6.15.0
with: with:
context: ${{ env.context }} context: ${{ env.context }}
file: ${{ env.file }} file: ${{ env.file }}
platforms: ${{ matrix.platforms }} platforms: ${{ matrix.platforms }}
labels: ${{ steps.metadata.outputs.labels }} # Skip pushing when PR from a fork
push: ${{ !github.event.pull_request.head.repo.fork }}
cache-from: type=registry,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{env.image}}
cache-to: ${{ steps.cache-target.outputs.cache-to }} cache-to: ${{ steps.cache-target.outputs.cache-to }}
cache-from: | tags: ${{ steps.metadata.outputs.tags }}
type=registry,ref=${{ env.GHCR_REPO }}-build-cache:${{ env.PLATFORM_PAIR }}-${{ matrix.device }}-${{ env.CACHE_KEY_SUFFIX }} labels: ${{ steps.metadata.outputs.labels }}
type=registry,ref=${{ env.GHCR_REPO }}-build-cache:${{ env.PLATFORM_PAIR }}-${{ matrix.device }}-main
outputs: type=image,"name=${{ env.GHCR_REPO }}",push-by-digest=true,name-canonical=true,push=${{ !github.event.pull_request.head.repo.fork }}
build-args: | build-args: |
DEVICE=${{ matrix.device }} DEVICE=${{ matrix.device }}
BUILD_ID=${{ github.run_id }} BUILD_ID=${{ github.run_id }}
@@ -198,255 +191,100 @@ jobs:
BUILD_SOURCE_REF=${{ github.ref_name }} BUILD_SOURCE_REF=${{ github.ref_name }}
BUILD_SOURCE_COMMIT=${{ github.sha }} BUILD_SOURCE_COMMIT=${{ github.sha }}
- name: Export digest
run: |
mkdir -p ${{ runner.temp }}/digests
digest="${{ steps.build.outputs.digest }}"
touch "${{ runner.temp }}/digests/${digest#sha256:}"
- name: Upload digest
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
with:
name: ml-digests-${{ matrix.device }}-${{ env.PLATFORM_PAIR }}
path: ${{ runner.temp }}/digests/*
if-no-files-found: error
retention-days: 1
merge_ml:
name: Merge & Push ML
runs-on: ubuntu-latest
if: ${{ needs.pre-job.outputs.should_run_ml == 'true' && !github.event.pull_request.head.repo.fork }}
env:
GHCR_REPO: ghcr.io/${{ github.repository_owner }}/immich-machine-learning
DOCKER_REPO: altran1502/immich-machine-learning
strategy:
matrix:
include:
- device: cpu
- device: cuda
suffix: -cuda
- device: rocm
suffix: -rocm
- device: openvino
suffix: -openvino
- device: armnn
suffix: -armnn
- device: rknn
suffix: -rknn
needs:
- build_and_push_ml
steps:
- name: Download digests
uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4
with:
path: ${{ runner.temp }}/digests
pattern: ml-digests-${{ matrix.device }}-*
merge-multiple: true
- name: Login to Docker Hub
if: ${{ github.event_name == 'release' }}
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GHCR
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3
- name: Generate docker image tags
id: meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5
env:
DOCKER_METADATA_PR_HEAD_SHA: 'true'
with:
flavor: |
# Disable latest tag
latest=false
suffix=${{ matrix.suffix }}
images: |
name=${{ env.GHCR_REPO }}
name=${{ env.DOCKER_REPO }},enable=${{ github.event_name == 'release' }}
tags: |
# Tag with branch name
type=ref,event=branch
# Tag with pr-number
type=ref,event=pr
# Tag with long commit sha hash
type=sha,format=long,prefix=commit-
# Tag with git tag on release
type=ref,event=tag
type=raw,value=release,enable=${{ github.event_name == 'release' }}
- name: Create manifest list and push
working-directory: ${{ runner.temp }}/digests
run: |
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
$(printf '${{ env.GHCR_REPO }}@sha256:%s ' *)
build_and_push_server: build_and_push_server:
name: Build and Push Server name: Build and Push Server
runs-on: ${{ matrix.runner }} runs-on: ubuntu-latest
needs: pre-job needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_server == 'true' }} if: ${{ needs.pre-job.outputs.should_run_server == 'true' }}
env: env:
image: immich-server image: immich-server
context: . context: .
file: server/Dockerfile file: server/Dockerfile
GHCR_REPO: ghcr.io/${{ github.repository_owner }}/immich-server
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
include: include:
- platform: linux/amd64 - platforms: linux/amd64,linux/arm64
runner: ubuntu-latest device: cpu
- platform: linux/arm64
runner: ubuntu-24.04-arm
steps: steps:
- name: Prepare
run: |
platform=${{ matrix.platform }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
- name: Checkout - name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3.2.0
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3 uses: docker/setup-buildx-action@v3.6.1
- name: Login to Docker Hub
# Only push to Docker Hub when making a release
if: ${{ github.event_name == 'release' }}
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GitHub Container Registry - name: Login to GitHub Container Registry
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 uses: docker/login-action@v3
# Skip when PR from a fork
if: ${{ !github.event.pull_request.head.repo.fork }} if: ${{ !github.event.pull_request.head.repo.fork }}
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.repository_owner }} username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
- name: Generate cache key suffix - name: Generate docker image tags
run: | id: metadata
if [[ "${{ github.event_name }}" == "pull_request" ]]; then uses: docker/metadata-action@v5
echo "CACHE_KEY_SUFFIX=pr-${{ github.event.number }}" >> $GITHUB_ENV with:
else flavor: |
echo "CACHE_KEY_SUFFIX=$(echo ${{ github.ref_name }} | sed 's/[^a-zA-Z0-9]/-/g')" >> $GITHUB_ENV # Disable latest tag
fi latest=false
images: |
name=ghcr.io/${{ github.repository_owner }}/${{env.image}}
name=altran1502/${{env.image}},enable=${{ github.event_name == 'release' }}
tags: |
# Tag with branch name
type=ref,event=branch,suffix=${{ matrix.suffix }}
# Tag with pr-number
type=ref,event=pr,suffix=${{ matrix.suffix }}
# Tag with git tag on release
type=ref,event=tag,suffix=${{ matrix.suffix }}
type=raw,value=release,enable=${{ github.event_name == 'release' }},suffix=${{ matrix.suffix }}
- name: Generate cache target - name: Determine build cache output
id: cache-target id: cache-target
run: | run: |
if [[ "${{ github.event.pull_request.head.repo.fork }}" == "true" ]]; then if [[ "${{ github.event_name }}" == "pull_request" ]]; then
# Essentially just ignore the cache output (forks can't write to registry cache) # Essentially just ignore the cache output (PR can't write to registry cache)
echo "cache-to=type=local,dest=/tmp/discard,ignore-error=true" >> $GITHUB_OUTPUT echo "cache-to=type=local,dest=/tmp/discard,ignore-error=true" >> $GITHUB_OUTPUT
else else
echo "cache-to=type=registry,ref=${{ env.GHCR_REPO }}-build-cache:${{ env.PLATFORM_PAIR }}-${{ matrix.device }}-${{ env.CACHE_KEY_SUFFIX }},mode=max,compression=zstd" >> $GITHUB_OUTPUT echo "cache-to=type=registry,mode=max,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{ env.image }}" >> $GITHUB_OUTPUT
fi fi
- name: Build and push image - name: Build and push image
id: build uses: docker/build-push-action@v6.9.0
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4 # v6.15.0
with: with:
context: ${{ env.context }} context: ${{ env.context }}
file: ${{ env.file }} file: ${{ env.file }}
platforms: ${{ matrix.platform }} platforms: ${{ matrix.platforms }}
labels: ${{ steps.metadata.outputs.labels }} # Skip pushing when PR from a fork
push: ${{ !github.event.pull_request.head.repo.fork }}
cache-from: type=registry,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{env.image}}
cache-to: ${{ steps.cache-target.outputs.cache-to }} cache-to: ${{ steps.cache-target.outputs.cache-to }}
cache-from: | tags: ${{ steps.metadata.outputs.tags }}
type=registry,ref=${{ env.GHCR_REPO }}-build-cache:${{ env.PLATFORM_PAIR }}-${{ env.CACHE_KEY_SUFFIX }} labels: ${{ steps.metadata.outputs.labels }}
type=registry,ref=${{ env.GHCR_REPO }}-build-cache:${{ env.PLATFORM_PAIR }}-main
outputs: type=image,"name=${{ env.GHCR_REPO }}",push-by-digest=true,name-canonical=true,push=${{ !github.event.pull_request.head.repo.fork }}
build-args: | build-args: |
DEVICE=cpu DEVICE=${{ matrix.device }}
BUILD_ID=${{ github.run_id }} BUILD_ID=${{ github.run_id }}
BUILD_IMAGE=${{ github.event_name == 'release' && github.ref_name || steps.metadata.outputs.tags }} BUILD_IMAGE=${{ github.event_name == 'release' && github.ref_name || steps.metadata.outputs.tags }}
BUILD_SOURCE_REF=${{ github.ref_name }} BUILD_SOURCE_REF=${{ github.ref_name }}
BUILD_SOURCE_COMMIT=${{ github.sha }} BUILD_SOURCE_COMMIT=${{ github.sha }}
- name: Export digest
run: |
mkdir -p ${{ runner.temp }}/digests
digest="${{ steps.build.outputs.digest }}"
touch "${{ runner.temp }}/digests/${digest#sha256:}"
- name: Upload digest
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
with:
name: server-digests-${{ env.PLATFORM_PAIR }}
path: ${{ runner.temp }}/digests/*
if-no-files-found: error
retention-days: 1
merge_server:
name: Merge & Push Server
runs-on: ubuntu-latest
if: ${{ needs.pre-job.outputs.should_run_server == 'true' && !github.event.pull_request.head.repo.fork }}
env:
GHCR_REPO: ghcr.io/${{ github.repository_owner }}/immich-server
DOCKER_REPO: altran1502/immich-server
needs:
- build_and_push_server
steps:
- name: Download digests
uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4
with:
path: ${{ runner.temp }}/digests
pattern: server-digests-*
merge-multiple: true
- name: Login to Docker Hub
if: ${{ github.event_name == 'release' }}
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GHCR
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3
- name: Generate docker image tags
id: meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5
env:
DOCKER_METADATA_PR_HEAD_SHA: 'true'
with:
flavor: |
# Disable latest tag
latest=false
suffix=${{ matrix.suffix }}
images: |
name=${{ env.GHCR_REPO }}
name=${{ env.DOCKER_REPO }},enable=${{ github.event_name == 'release' }}
tags: |
# Tag with branch name
type=ref,event=branch
# Tag with pr-number
type=ref,event=pr
# Tag with long commit sha hash
type=sha,format=long,prefix=commit-
# Tag with git tag on release
type=ref,event=tag
type=raw,value=release,enable=${{ github.event_name == 'release' }}
- name: Create manifest list and push
working-directory: ${{ runner.temp }}/digests
run: |
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
$(printf '${{ env.GHCR_REPO }}@sha256:%s ' *)
success-check-server: success-check-server:
name: Docker Build & Push Server Success name: Docker Build & Push Server Success
needs: [merge_server, retag_server] needs: [build_and_push_server, retag_server]
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: always() if: always()
steps: steps:
@@ -459,7 +297,7 @@ jobs:
success-check-ml: success-check-ml:
name: Docker Build & Push ML Success name: Docker Build & Push ML Success
needs: [merge_ml, retag_ml] needs: [build_and_push_ml, retag_ml]
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: always() if: always()
steps: steps:

View File

@@ -3,32 +3,31 @@ on:
push: push:
branches: [main] branches: [main]
pull_request: pull_request:
branches: [main]
release: release:
types: [published] types: [published]
concurrency: concurrency:
group: ${{ github.workflow }}-${{ github.ref }} group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true cancel-in-progress: false
jobs: jobs:
pre-job: pre-job:
runs-on: ubuntu-latest runs-on: ubuntu-latest
outputs: outputs:
should_run: ${{ steps.found_paths.outputs.docs == 'true' || steps.should_force.outputs.should_force == 'true' }} should_run: ${{ steps.found_paths.outputs.docs == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 uses: actions/checkout@v4
- id: found_paths - id: found_paths
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3 uses: dorny/paths-filter@v3
with: with:
filters: | filters: |
docs: docs:
- 'docs/**' - 'docs/**'
workflow:
- '.github/workflows/docs-build.yml'
- name: Check if we should force jobs to run - name: Check if we should force jobs to run
id: should_force id: should_force
run: echo "should_force=${{ steps.found_paths.outputs.workflow == 'true' || github.event_name == 'release' || github.ref_name == 'main' }}" >> "$GITHUB_OUTPUT" run: echo "should_force=${{ github.event_name == 'release' }}" >> "$GITHUB_OUTPUT"
build: build:
name: Docs Build name: Docs Build
@@ -41,10 +40,10 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 uses: actions/checkout@v4
- name: Setup Node - name: Setup Node
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4 uses: actions/setup-node@v4
with: with:
node-version-file: './docs/.nvmrc' node-version-file: './docs/.nvmrc'
@@ -58,7 +57,7 @@ jobs:
run: npm run build run: npm run build
- name: Upload build output - name: Upload build output
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 uses: actions/upload-artifact@v4
with: with:
name: docs-build-output name: docs-build-output
path: docs/build/ path: docs/build/

View File

@@ -1,7 +1,7 @@
name: Docs deploy name: Docs deploy
on: on:
workflow_run: workflow_run:
workflows: ['Docs build'] workflows: ["Docs build"]
types: types:
- completed - completed
@@ -17,7 +17,7 @@ jobs:
run: echo 'The triggering workflow did not succeed' && exit 1 run: echo 'The triggering workflow did not succeed' && exit 1
- name: Get artifact - name: Get artifact
id: get-artifact id: get-artifact
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7 uses: actions/github-script@v7
with: with:
script: | script: |
let allArtifacts = await github.rest.actions.listWorkflowRunArtifacts({ let allArtifacts = await github.rest.actions.listWorkflowRunArtifacts({
@@ -35,7 +35,7 @@ jobs:
return { found: true, id: matchArtifact.id }; return { found: true, id: matchArtifact.id };
- name: Determine deploy parameters - name: Determine deploy parameters
id: parameters id: parameters
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7 uses: actions/github-script@v7
with: with:
script: | script: |
const eventType = context.payload.workflow_run.event; const eventType = context.payload.workflow_run.event;
@@ -98,11 +98,11 @@ jobs:
if: ${{ fromJson(needs.checks.outputs.artifact).found && fromJson(needs.checks.outputs.parameters).shouldDeploy }} if: ${{ fromJson(needs.checks.outputs.artifact).found && fromJson(needs.checks.outputs.parameters).shouldDeploy }}
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 uses: actions/checkout@v4
- name: Load parameters - name: Load parameters
id: parameters id: parameters
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7 uses: actions/github-script@v7
with: with:
script: | script: |
const json = `${{ needs.checks.outputs.parameters }}`; const json = `${{ needs.checks.outputs.parameters }}`;
@@ -115,7 +115,7 @@ jobs:
echo "Starting docs deployment for ${{ steps.parameters.outputs.event }} ${{ steps.parameters.outputs.name }}" echo "Starting docs deployment for ${{ steps.parameters.outputs.event }} ${{ steps.parameters.outputs.name }}"
- name: Download artifact - name: Download artifact
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7 uses: actions/github-script@v7
with: with:
script: | script: |
let artifact = ${{ needs.checks.outputs.artifact }}; let artifact = ${{ needs.checks.outputs.artifact }};
@@ -138,12 +138,12 @@ jobs:
CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }} CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
TF_STATE_POSTGRES_CONN_STR: ${{ secrets.TF_STATE_POSTGRES_CONN_STR }} TF_STATE_POSTGRES_CONN_STR: ${{ secrets.TF_STATE_POSTGRES_CONN_STR }}
uses: gruntwork-io/terragrunt-action@9559e51d05873b0ea467c42bbabcb5c067642ccc # v2 uses: gruntwork-io/terragrunt-action@v2
with: with:
tg_version: '0.58.12' tg_version: "0.58.12"
tofu_version: '1.7.1' tofu_version: "1.7.1"
tg_dir: 'deployment/modules/cloudflare/docs' tg_dir: "deployment/modules/cloudflare/docs"
tg_command: 'apply' tg_command: "apply"
- name: Deploy Docs Subdomain Output - name: Deploy Docs Subdomain Output
id: docs-output id: docs-output
@@ -153,12 +153,12 @@ jobs:
CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }} CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
TF_STATE_POSTGRES_CONN_STR: ${{ secrets.TF_STATE_POSTGRES_CONN_STR }} TF_STATE_POSTGRES_CONN_STR: ${{ secrets.TF_STATE_POSTGRES_CONN_STR }}
uses: gruntwork-io/terragrunt-action@9559e51d05873b0ea467c42bbabcb5c067642ccc # v2 uses: gruntwork-io/terragrunt-action@v2
with: with:
tg_version: '0.58.12' tg_version: "0.58.12"
tofu_version: '1.7.1' tofu_version: "1.7.1"
tg_dir: 'deployment/modules/cloudflare/docs' tg_dir: "deployment/modules/cloudflare/docs"
tg_command: 'output -json' tg_command: "output -json"
- name: Output Cleaning - name: Output Cleaning
id: clean id: clean
@@ -167,13 +167,13 @@ jobs:
echo "output=$TG_OUT" >> $GITHUB_OUTPUT echo "output=$TG_OUT" >> $GITHUB_OUTPUT
- name: Publish to Cloudflare Pages - name: Publish to Cloudflare Pages
uses: cloudflare/pages-action@f0a1cd58cd66095dee69bfa18fa5efd1dde93bca # v1 uses: cloudflare/pages-action@v1
with: with:
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN_PAGES_UPLOAD }} apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN_PAGES_UPLOAD }}
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
projectName: ${{ fromJson(steps.clean.outputs.output).pages_project_name.value }} projectName: ${{ fromJson(steps.clean.outputs.output).pages_project_name.value }}
workingDirectory: 'docs' workingDirectory: "docs"
directory: 'build' directory: "build"
branch: ${{ steps.parameters.outputs.name }} branch: ${{ steps.parameters.outputs.name }}
wranglerVersion: '3' wranglerVersion: '3'
@@ -184,7 +184,7 @@ jobs:
CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }} CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
TF_STATE_POSTGRES_CONN_STR: ${{ secrets.TF_STATE_POSTGRES_CONN_STR }} TF_STATE_POSTGRES_CONN_STR: ${{ secrets.TF_STATE_POSTGRES_CONN_STR }}
uses: gruntwork-io/terragrunt-action@9559e51d05873b0ea467c42bbabcb5c067642ccc # v2 uses: gruntwork-io/terragrunt-action@v2
with: with:
tg_version: '0.58.12' tg_version: '0.58.12'
tofu_version: '1.7.1' tofu_version: '1.7.1'
@@ -192,7 +192,7 @@ jobs:
tg_command: 'apply' tg_command: 'apply'
- name: Comment - name: Comment
uses: actions-cool/maintain-one-comment@4b2dbf086015f892dcb5e8c1106f5fccd6c1476b # v3 uses: actions-cool/maintain-one-comment@v3
if: ${{ steps.parameters.outputs.event == 'pr' }} if: ${{ steps.parameters.outputs.event == 'pr' }}
with: with:
number: ${{ fromJson(needs.checks.outputs.parameters).pr_number }} number: ${{ fromJson(needs.checks.outputs.parameters).pr_number }}

View File

@@ -9,24 +9,24 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 uses: actions/checkout@v4
- name: Destroy Docs Subdomain - name: Destroy Docs Subdomain
env: env:
TF_VAR_prefix_name: 'pr-${{ github.event.number }}' TF_VAR_prefix_name: "pr-${{ github.event.number }}"
TF_VAR_prefix_event_type: 'pr' TF_VAR_prefix_event_type: "pr"
CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }} CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
TF_STATE_POSTGRES_CONN_STR: ${{ secrets.TF_STATE_POSTGRES_CONN_STR }} TF_STATE_POSTGRES_CONN_STR: ${{ secrets.TF_STATE_POSTGRES_CONN_STR }}
uses: gruntwork-io/terragrunt-action@9559e51d05873b0ea467c42bbabcb5c067642ccc # v2 uses: gruntwork-io/terragrunt-action@v2
with: with:
tg_version: '0.58.12' tg_version: "0.58.12"
tofu_version: '1.7.1' tofu_version: "1.7.1"
tg_dir: 'deployment/modules/cloudflare/docs' tg_dir: "deployment/modules/cloudflare/docs"
tg_command: 'destroy -refresh=false' tg_command: "destroy"
- name: Comment - name: Comment
uses: actions-cool/maintain-one-comment@4b2dbf086015f892dcb5e8c1106f5fccd6c1476b # v3 uses: actions-cool/maintain-one-comment@v3
with: with:
number: ${{ github.event.number }} number: ${{ github.event.number }}
delete: true delete: true

View File

@@ -1,51 +0,0 @@
name: Fix formatting
on:
pull_request:
types: [labeled]
jobs:
fix-formatting:
runs-on: ubuntu-latest
if: ${{ github.event.label.name == 'fix:formatting' }}
permissions:
pull-requests: write
steps:
- name: Generate a token
id: generate-token
uses: actions/create-github-app-token@af35edadc00be37caa72ed9f3e6d5f7801bfdf09 # v1
with:
app-id: ${{ secrets.PUSH_O_MATIC_APP_ID }}
private-key: ${{ secrets.PUSH_O_MATIC_APP_KEY }}
- name: 'Checkout'
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
with:
ref: ${{ github.event.pull_request.head.ref }}
token: ${{ steps.generate-token.outputs.token }}
- name: Setup Node
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
with:
node-version-file: './server/.nvmrc'
- name: Fix formatting
run: make install-all && make format-all
- name: Commit and push
uses: EndBug/add-and-commit@a94899bca583c204427a224a7af87c02f9b325d5 # v9
with:
default_author: github_actions
message: 'chore: fix formatting'
- name: Remove label
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7
if: always()
with:
script: |
github.rest.issues.removeLabel({
issue_number: context.payload.pull_request.number,
owner: context.repo.owner,
repo: context.repo.repo,
name: 'fix:formatting'
})

View File

@@ -9,14 +9,13 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions: permissions:
issues: write issues: write
pull-requests: write pull-requests: read
steps: steps:
- name: Require PR to have a changelog label - name: Require PR to have a changelog label
uses: mheap/github-action-required-labels@388fd6af37b34cdfe5a23b37060e763217e58b03 # v5 uses: mheap/github-action-required-labels@v5
with: with:
mode: exactly mode: exactly
count: 1 count: 1
use_regex: true use_regex: true
labels: 'changelog:.*' labels: "changelog:.*"
add_comment: true add_comment: true
message: 'Label error. Requires {{errorString}} {{count}} of: {{ provided }}. Found: {{ applied }}. A maintainer will add the required label.'

View File

@@ -1,6 +1,6 @@
name: 'Pull Request Labeler' name: "Pull Request Labeler"
on: on:
- pull_request_target - pull_request_target
jobs: jobs:
labeler: labeler:
@@ -9,4 +9,4 @@ jobs:
pull-requests: write pull-requests: write
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/labeler@8558fd74291d67161a8a78ce36a881fa63b766a9 # v5 - uses: actions/labeler@v5

View File

@@ -9,7 +9,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: PR Conventional Commit Validation - name: PR Conventional Commit Validation
uses: ytanikin/PRConventionalCommits@b628c5a234cc32513014b7bfdd1e47b532124d98 # 1.3.0 uses: ytanikin/PRConventionalCommits@1.2.0
with: with:
task_types: '["feat","fix","docs","test","ci","refactor","perf","chore","revert"]' task_types: '["feat","fix","docs","test","ci","refactor","perf","chore","revert"]'
add_label: 'false' add_label: 'false'

View File

@@ -31,25 +31,25 @@ jobs:
steps: steps:
- name: Generate a token - name: Generate a token
id: generate-token id: generate-token
uses: actions/create-github-app-token@af35edadc00be37caa72ed9f3e6d5f7801bfdf09 # v1 uses: actions/create-github-app-token@v1
with: with:
app-id: ${{ secrets.PUSH_O_MATIC_APP_ID }} app-id: ${{ secrets.PUSH_O_MATIC_APP_ID }}
private-key: ${{ secrets.PUSH_O_MATIC_APP_KEY }} private-key: ${{ secrets.PUSH_O_MATIC_APP_KEY }}
- name: Checkout - name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 uses: actions/checkout@v4
with: with:
token: ${{ steps.generate-token.outputs.token }} token: ${{ steps.generate-token.outputs.token }}
- name: Install uv - name: Install Poetry
uses: astral-sh/setup-uv@22695119d769bdb6f7032ad67b9bca0ef8c4a174 # v5 run: pipx install poetry
- name: Bump version - name: Bump version
run: misc/release/pump-version.sh -s "${{ inputs.serverBump }}" -m "${{ inputs.mobileBump }}" run: misc/release/pump-version.sh -s "${{ inputs.serverBump }}" -m "${{ inputs.mobileBump }}"
- name: Commit and tag - name: Commit and tag
id: push-tag id: push-tag
uses: EndBug/add-and-commit@a94899bca583c204427a224a7af87c02f9b325d5 # v9 uses: EndBug/add-and-commit@v9
with: with:
default_author: github_actions default_author: github_actions
message: 'chore: version ${{ env.IMMICH_VERSION }}' message: 'chore: version ${{ env.IMMICH_VERSION }}'
@@ -68,25 +68,18 @@ jobs:
needs: build_mobile needs: build_mobile
steps: steps:
- name: Generate a token
id: generate-token
uses: actions/create-github-app-token@af35edadc00be37caa72ed9f3e6d5f7801bfdf09 # v1
with:
app-id: ${{ secrets.PUSH_O_MATIC_APP_ID }}
private-key: ${{ secrets.PUSH_O_MATIC_APP_KEY }}
- name: Checkout - name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 uses: actions/checkout@v4
with: with:
token: ${{ steps.generate-token.outputs.token }} token: ${{ secrets.ORG_RELEASE_TOKEN }}
- name: Download APK - name: Download APK
uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4 uses: actions/download-artifact@v4
with: with:
name: release-apk-signed name: release-apk-signed
- name: Create draft release - name: Create draft release
uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda # v2 uses: softprops/action-gh-release@v2
with: with:
draft: true draft: true
tag_name: ${{ env.IMMICH_VERSION }} tag_name: ${{ env.IMMICH_VERSION }}

View File

@@ -1,33 +0,0 @@
name: Preview label
on:
pull_request:
types: [labeled, closed]
jobs:
comment-status:
runs-on: ubuntu-latest
if: ${{ github.event.action == 'labeled' && github.event.label.name == 'preview' }}
permissions:
pull-requests: write
steps:
- uses: mshick/add-pr-comment@b8f338c590a895d50bcbfa6c5859251edc8952fc # v2
with:
message-id: 'preview-status'
message: 'Deploying preview environment to https://pr-${{ github.event.pull_request.number }}.preview.internal.immich.cloud/'
remove-label:
runs-on: ubuntu-latest
if: ${{ github.event.action == 'closed' && contains(github.event.pull_request.labels.*.name, 'preview') }}
permissions:
pull-requests: write
steps:
- uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7
with:
script: |
github.rest.issues.removeLabel({
issue_number: context.payload.pull_request.number,
owner: context.repo.owner,
repo: context.repo.repo,
name: 'preview'
})

View File

@@ -15,9 +15,9 @@ jobs:
run: run:
working-directory: ./open-api/typescript-sdk working-directory: ./open-api/typescript-sdk
steps: steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - uses: actions/checkout@v4
# Setup .npmrc file to publish to npm # Setup .npmrc file to publish to npm
- uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4 - uses: actions/setup-node@v4
with: with:
node-version-file: './open-api/typescript-sdk/.nvmrc' node-version-file: './open-api/typescript-sdk/.nvmrc'
registry-url: 'https://registry.npmjs.org' registry-url: 'https://registry.npmjs.org'

View File

@@ -16,18 +16,16 @@ jobs:
should_run: ${{ steps.found_paths.outputs.mobile == 'true' || steps.should_force.outputs.should_force == 'true' }} should_run: ${{ steps.found_paths.outputs.mobile == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 uses: actions/checkout@v4
- id: found_paths - id: found_paths
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3 uses: dorny/paths-filter@v3
with: with:
filters: | filters: |
mobile: mobile:
- 'mobile/**' - 'mobile/**'
workflow:
- '.github/workflows/static_analysis.yml'
- name: Check if we should force jobs to run - name: Check if we should force jobs to run
id: should_force id: should_force
run: echo "should_force=${{ steps.found_paths.outputs.workflow == 'true' || github.event_name == 'release' }}" >> "$GITHUB_OUTPUT" run: echo "should_force=${{ github.event_name == 'release' }}" >> "$GITHUB_OUTPUT"
mobile-dart-analyze: mobile-dart-analyze:
name: Run Dart Code Analysis name: Run Dart Code Analysis
@@ -38,10 +36,10 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 uses: actions/checkout@v4
- name: Setup Flutter SDK - name: Setup Flutter SDK
uses: subosito/flutter-action@e938fdf56512cc96ef2f93601a5a40bde3801046 # v2 uses: subosito/flutter-action@v2
with: with:
channel: 'stable' channel: 'stable'
flutter-version-file: ./mobile/pubspec.yaml flutter-version-file: ./mobile/pubspec.yaml
@@ -50,26 +48,6 @@ jobs:
run: dart pub get run: dart pub get
working-directory: ./mobile working-directory: ./mobile
- name: Run Build Runner
run: make build
working-directory: ./mobile
- name: Find file changes
uses: tj-actions/verify-changed-files@a1c6acee9df209257a246f2cc6ae8cb6581c1edf # v20
id: verify-changed-files
with:
files: |
mobile/**/*.g.dart
mobile/**/*.gr.dart
mobile/**/*.drift.dart
- name: Verify files have not changed
if: steps.verify-changed-files.outputs.files_changed == 'true'
run: |
echo "ERROR: Generated files not up to date! Run make_build inside the mobile directory"
echo "Changed files: ${{ steps.verify-changed-files.outputs.changed_files }}"
exit 1
- name: Run dart analyze - name: Run dart analyze
run: dart analyze --fatal-infos run: dart analyze --fatal-infos
working-directory: ./mobile working-directory: ./mobile
@@ -81,3 +59,8 @@ jobs:
- name: Run dart custom_lint - name: Run dart custom_lint
run: dart run custom_lint run: dart run custom_lint
working-directory: ./mobile working-directory: ./mobile
# Enable after riverpod generator migration is completed
# - name: Run dart custom lint
# run: dart run custom_lint
# working-directory: ./mobile

View File

@@ -21,17 +21,15 @@ jobs:
should_run_ml: ${{ steps.found_paths.outputs.machine-learning == 'true' || steps.should_force.outputs.should_force == 'true' }} should_run_ml: ${{ steps.found_paths.outputs.machine-learning == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run_e2e_web: ${{ steps.found_paths.outputs.e2e == 'true' || steps.found_paths.outputs.web == 'true' || steps.should_force.outputs.should_force == 'true' }} should_run_e2e_web: ${{ steps.found_paths.outputs.e2e == 'true' || steps.found_paths.outputs.web == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run_e2e_server_cli: ${{ steps.found_paths.outputs.e2e == 'true' || steps.found_paths.outputs.server == 'true' || steps.found_paths.outputs.cli == 'true' || steps.should_force.outputs.should_force == 'true' }} should_run_e2e_server_cli: ${{ steps.found_paths.outputs.e2e == 'true' || steps.found_paths.outputs.server == 'true' || steps.found_paths.outputs.cli == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run_.github: ${{ steps.found_paths.outputs['.github'] == 'true' || steps.should_force.outputs.should_force == 'true' }} # redundant to have should_force but if someone changes the trigger then this won't have to be changed
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 uses: actions/checkout@v4
- id: found_paths - id: found_paths
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3 uses: dorny/paths-filter@v3
with: with:
filters: | filters: |
web: web:
- 'web/**' - 'web/**'
- 'i18n/**'
- 'open-api/typescript-sdk/**' - 'open-api/typescript-sdk/**'
server: server:
- 'server/**' - 'server/**'
@@ -44,14 +42,10 @@ jobs:
- 'mobile/**' - 'mobile/**'
machine-learning: machine-learning:
- 'machine-learning/**' - 'machine-learning/**'
workflow:
- '.github/workflows/test.yml'
.github:
- '.github/**'
- name: Check if we should force jobs to run - name: Check if we should force jobs to run
id: should_force id: should_force
run: echo "should_force=${{ steps.found_paths.outputs.workflow == 'true' || github.event_name == 'workflow_dispatch' }}" >> "$GITHUB_OUTPUT" run: echo "should_force=${{ github.event_name == 'workflow_dispatch' }}" >> "$GITHUB_OUTPUT"
server-unit-tests: server-unit-tests:
name: Test & Lint Server name: Test & Lint Server
@@ -64,10 +58,10 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 uses: actions/checkout@v4
- name: Setup Node - name: Setup Node
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4 uses: actions/setup-node@v4
with: with:
node-version-file: './server/.nvmrc' node-version-file: './server/.nvmrc'
@@ -86,7 +80,7 @@ jobs:
run: npm run check run: npm run check
if: ${{ !cancelled() }} if: ${{ !cancelled() }}
- name: Run small tests & coverage - name: Run unit tests & coverage
run: npm run test:cov run: npm run test:cov
if: ${{ !cancelled() }} if: ${{ !cancelled() }}
@@ -101,10 +95,10 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 uses: actions/checkout@v4
- name: Setup Node - name: Setup Node
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4 uses: actions/setup-node@v4
with: with:
node-version-file: './cli/.nvmrc' node-version-file: './cli/.nvmrc'
@@ -142,10 +136,10 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 uses: actions/checkout@v4
- name: Setup Node - name: Setup Node
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4 uses: actions/setup-node@v4
with: with:
node-version-file: './cli/.nvmrc' node-version-file: './cli/.nvmrc'
@@ -176,10 +170,10 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 uses: actions/checkout@v4
- name: Setup Node - name: Setup Node
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4 uses: actions/setup-node@v4
with: with:
node-version-file: './web/.nvmrc' node-version-file: './web/.nvmrc'
@@ -221,10 +215,10 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 uses: actions/checkout@v4
- name: Setup Node - name: Setup Node
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4 uses: actions/setup-node@v4
with: with:
node-version-file: './e2e/.nvmrc' node-version-file: './e2e/.nvmrc'
@@ -249,31 +243,6 @@ jobs:
run: npm run check run: npm run check
if: ${{ !cancelled() }} if: ${{ !cancelled() }}
server-medium-tests:
name: Medium Tests (Server)
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_server == 'true' }}
runs-on: ubuntu-latest
defaults:
run:
working-directory: ./server
steps:
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Setup Node
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
with:
node-version-file: './server/.nvmrc'
- name: Run npm install
run: npm ci
- name: Run medium tests
run: npm run test:medium
if: ${{ !cancelled() }}
e2e-tests-server-cli: e2e-tests-server-cli:
name: End-to-End Tests (Server & CLI) name: End-to-End Tests (Server & CLI)
needs: pre-job needs: pre-job
@@ -285,12 +254,12 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 uses: actions/checkout@v4
with: with:
submodules: 'recursive' submodules: 'recursive'
- name: Setup Node - name: Setup Node
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4 uses: actions/setup-node@v4
with: with:
node-version-file: './e2e/.nvmrc' node-version-file: './e2e/.nvmrc'
@@ -327,12 +296,12 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 uses: actions/checkout@v4
with: with:
submodules: 'recursive' submodules: 'recursive'
- name: Setup Node - name: Setup Node
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4 uses: actions/setup-node@v4
with: with:
node-version-file: './e2e/.nvmrc' node-version-file: './e2e/.nvmrc'
@@ -363,9 +332,9 @@ jobs:
if: ${{ needs.pre-job.outputs.should_run_mobile == 'true' }} if: ${{ needs.pre-job.outputs.should_run_mobile == 'true' }}
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - uses: actions/checkout@v4
- name: Setup Flutter SDK - name: Setup Flutter SDK
uses: subosito/flutter-action@e938fdf56512cc96ef2f93601a5a40bde3801046 # v2 uses: subosito/flutter-action@v2
with: with:
channel: 'stable' channel: 'stable'
flutter-version-file: ./mobile/pubspec.yaml flutter-version-file: ./mobile/pubspec.yaml
@@ -382,60 +351,34 @@ jobs:
run: run:
working-directory: ./machine-learning working-directory: ./machine-learning
steps: steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - uses: actions/checkout@v4
- name: Install uv - name: Install poetry
uses: astral-sh/setup-uv@22695119d769bdb6f7032ad67b9bca0ef8c4a174 # v5 run: pipx install poetry
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5 - uses: actions/setup-python@v5
# TODO: add caching when supported (https://github.com/actions/setup-python/pull/818) with:
# with: python-version: 3.11
# python-version: 3.11 cache: 'poetry'
# cache: 'uv'
- name: Install dependencies - name: Install dependencies
run: | run: |
uv sync --extra cpu poetry install --with dev --with cpu
- name: Lint with ruff - name: Lint with ruff
run: | run: |
uv run ruff check --output-format=github app export poetry run ruff check --output-format=github app export
- name: Check black formatting - name: Check black formatting
run: | run: |
uv run black --check app export poetry run black --check app export
- name: Run mypy type checking - name: Run mypy type checking
run: | run: |
uv run mypy --strict app/ poetry run mypy --install-types --non-interactive --strict app/
- name: Run tests and coverage - name: Run tests and coverage
run: | run: |
uv run pytest app --cov=app --cov-report term-missing poetry run pytest app --cov=app --cov-report term-missing
github-files-formatting:
name: .github Files Formatting
needs: pre-job
if: ${{ needs.pre-job.outputs['should_run_.github'] == 'true' }}
runs-on: ubuntu-latest
defaults:
run:
working-directory: ./.github
steps:
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Setup Node
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4
with:
node-version-file: './.github/.nvmrc'
- name: Run npm install
run: npm ci
- name: Run formatter
run: npm run format
if: ${{ !cancelled() }}
shellcheck: shellcheck:
name: ShellCheck name: ShellCheck
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - uses: actions/checkout@v4
- name: Run ShellCheck - name: Run ShellCheck
uses: ludeeus/action-shellcheck@master uses: ludeeus/action-shellcheck@master
with: with:
@@ -449,10 +392,10 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 uses: actions/checkout@v4
- name: Setup Node - name: Setup Node
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4 uses: actions/setup-node@v4
with: with:
node-version-file: './server/.nvmrc' node-version-file: './server/.nvmrc'
@@ -466,7 +409,7 @@ jobs:
run: make open-api run: make open-api
- name: Find file changes - name: Find file changes
uses: tj-actions/verify-changed-files@a1c6acee9df209257a246f2cc6ae8cb6581c1edf # v20 uses: tj-actions/verify-changed-files@v20
id: verify-changed-files id: verify-changed-files
with: with:
files: | files: |
@@ -486,7 +429,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
services: services:
postgres: postgres:
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52 image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
env: env:
POSTGRES_PASSWORD: postgres POSTGRES_PASSWORD: postgres
POSTGRES_USER: postgres POSTGRES_USER: postgres
@@ -504,10 +447,10 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 uses: actions/checkout@v4
- name: Setup Node - name: Setup Node
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4 uses: actions/setup-node@v4
with: with:
node-version-file: './server/.nvmrc' node-version-file: './server/.nvmrc'
@@ -528,7 +471,7 @@ jobs:
run: npm run typeorm:migrations:generate ./src/migrations/TestMigration run: npm run typeorm:migrations:generate ./src/migrations/TestMigration
- name: Find file changes - name: Find file changes
uses: tj-actions/verify-changed-files@a1c6acee9df209257a246f2cc6ae8cb6581c1edf # v20 uses: tj-actions/verify-changed-files@v20
id: verify-changed-files id: verify-changed-files
with: with:
files: | files: |
@@ -538,7 +481,6 @@ jobs:
run: | run: |
echo "ERROR: Generated migration files not up to date!" echo "ERROR: Generated migration files not up to date!"
echo "Changed files: ${{ steps.verify-changed-files.outputs.changed_files }}" echo "Changed files: ${{ steps.verify-changed-files.outputs.changed_files }}"
cat ./src/migrations/*-TestMigration.ts
exit 1 exit 1
- name: Run SQL generation - name: Run SQL generation
@@ -547,7 +489,7 @@ jobs:
DB_URL: postgres://postgres:postgres@localhost:5432/immich DB_URL: postgres://postgres:postgres@localhost:5432/immich
- name: Find file changes - name: Find file changes
uses: tj-actions/verify-changed-files@a1c6acee9df209257a246f2cc6ae8cb6581c1edf # v20 uses: tj-actions/verify-changed-files@v20
id: verify-changed-sql-files id: verify-changed-sql-files
with: with:
files: | files: |

View File

@@ -1,57 +0,0 @@
name: Weblate checks
on:
pull_request:
branches: [main]
jobs:
pre-job:
runs-on: ubuntu-latest
outputs:
should_run: ${{ steps.found_paths.outputs.i18n == 'true' && github.head_ref != 'chore/translations'}}
steps:
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- id: found_paths
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3
with:
filters: |
i18n:
- 'i18n/!(en)**\.json'
- name: Debug
run: |
echo "Should run: ${{ steps.found_paths.outputs.i18n == 'true' && github.head_ref != 'chore/translations'}}"
echo "Found i18n paths: ${{ steps.found_paths.outputs.i18n }}"
echo "Head ref: ${{ github.head_ref }}"
enforce-lock:
name: Check Weblate Lock
needs: [pre-job]
runs-on: ubuntu-latest
if: ${{ needs.pre-job.outputs.should_run == 'true' }}
steps:
- name: Check weblate lock
run: |
if [[ "false" = $(curl https://hosted.weblate.org/api/components/immich/immich/lock/ | jq .locked) ]]; then
exit 1
fi
- name: Find Pull Request
uses: juliangruber/find-pull-request-action@48b6133aa6c826f267ebd33aa2d29470f9d9e7d0 # v1
id: find-pr
with:
branch: chore/translations
- name: Fail if existing weblate PR
if: ${{ steps.find-pr.outputs.number }}
run: exit 1
success-check-lock:
name: Weblate Lock Check Success
needs: [enforce-lock]
runs-on: ubuntu-latest
if: always()
steps:
- name: Any jobs failed?
if: ${{ contains(needs.*.result, 'failure') }}
run: exit 1
- name: All jobs passed or skipped
if: ${{ !(contains(needs.*.result, 'failure')) }}
run: echo "All jobs passed or skipped" && echo "${{ toJSON(needs.*.result) }}"

2
.gitignore vendored
View File

@@ -21,5 +21,3 @@ mobile/openapi/.openapi-generator/FILES
open-api/typescript-sdk/build open-api/typescript-sdk/build
mobile/android/fastlane/report.xml mobile/android/fastlane/report.xml
mobile/ios/fastlane/report.xml mobile/ios/fastlane/report.xml
vite.config.js.timestamp-*

View File

@@ -41,4 +41,4 @@
"explorer.fileNesting.patterns": { "explorer.fileNesting.patterns": {
"*.ts": "${capture}.spec.ts,${capture}.mock.ts" "*.ts": "${capture}.spec.ts,${capture}.mock.ts"
} }
} }

View File

@@ -39,7 +39,7 @@ attach-server:
renovate: renovate:
LOG_LEVEL=debug npx renovate --platform=local --repository-cache=reset LOG_LEVEL=debug npx renovate --platform=local --repository-cache=reset
MODULES = e2e server web cli sdk docs .github MODULES = e2e server web cli sdk
audit-%: audit-%:
npm --prefix $(subst sdk,open-api/typescript-sdk,$*) audit fix npm --prefix $(subst sdk,open-api/typescript-sdk,$*) audit fix
@@ -48,9 +48,11 @@ install-%:
build-cli: build-sdk build-cli: build-sdk
build-web: build-sdk build-web: build-sdk
build-%: install-% build-%: install-%
npm --prefix $(subst sdk,open-api/typescript-sdk,$*) run build npm --prefix $(subst sdk,open-api/typescript-sdk,$*) run | grep 'build' >/dev/null \
&& npm --prefix $(subst sdk,open-api/typescript-sdk,$*) run build || true
format-%: format-%:
npm --prefix $* run format:fix npm --prefix $(subst sdk,open-api/typescript-sdk,$*) run | grep 'format:fix' >/dev/null \
&& npm --prefix $(subst sdk,open-api/typescript-sdk,$*) run format:fix || true
lint-%: lint-%:
npm --prefix $* run lint:fix npm --prefix $* run lint:fix
check-%: check-%:
@@ -64,27 +66,15 @@ test-e2e:
docker compose -f ./e2e/docker-compose.yml build docker compose -f ./e2e/docker-compose.yml build
npm --prefix e2e run test npm --prefix e2e run test
npm --prefix e2e run test:web npm --prefix e2e run test:web
test-medium:
docker run \
--rm \
-v ./server/src:/usr/src/app/src \
-v ./server/test:/usr/src/app/test \
-v ./server/vitest.config.medium.mjs:/usr/src/app/vitest.config.medium.mjs \
-v ./server/tsconfig.json:/usr/src/app/tsconfig.json \
-e NODE_ENV=development \
immich-server:latest \
-c "npm ci && npm run test:medium -- --run"
test-medium-dev:
docker exec -it immich_server /bin/sh -c "npm run test:medium"
build-all: $(foreach M,$(filter-out e2e .github,$(MODULES)),build-$M) ; build-all: $(foreach M,$(MODULES),build-$M) ;
install-all: $(foreach M,$(MODULES),install-$M) ; install-all: $(foreach M,$(MODULES),install-$M) ;
check-all: $(foreach M,$(filter-out sdk cli docs .github,$(MODULES)),check-$M) ; check-all: $(foreach M,$(MODULES),check-$M) ;
lint-all: $(foreach M,$(filter-out sdk docs .github,$(MODULES)),lint-$M) ; lint-all: $(foreach M,$(MODULES),lint-$M) ;
format-all: $(foreach M,$(filter-out sdk,$(MODULES)),format-$M) ; format-all: $(foreach M,$(MODULES),format-$M) ;
audit-all: $(foreach M,$(MODULES),audit-$M) ; audit-all: $(foreach M,$(MODULES),audit-$M) ;
hygiene-all: lint-all format-all check-all sql audit-all; hygiene-all: lint-all format-all check-all sql audit-all;
test-all: $(foreach M,$(filter-out sdk docs .github,$(MODULES)),test-$M) ; test-all: $(foreach M,$(MODULES),test-$M) ;
clean: clean:
find . -name "node_modules" -type d -prune -exec rm -rf '{}' + find . -name "node_modules" -type d -prune -exec rm -rf '{}' +

View File

@@ -1,11 +1,11 @@
<p align="center"> <p align="center">
<br/> <br/>
<a href="https://opensource.org/license/agpl-v3"><img src="https://img.shields.io/badge/License-AGPL_v3-blue.svg?color=3F51B5&style=for-the-badge&label=License&logoColor=000000&labelColor=ececec" alt="License: AGPLv3"></a> <a href="https://opensource.org/license/agpl-v3"><img src="https://img.shields.io/badge/License-AGPL_v3-blue.svg?color=3F51B5&style=for-the-badge&label=License&logoColor=000000&labelColor=ececec" alt="License: AGPLv3"></a>
<a href="https://discord.immich.app"> <a href="https://discord.immich.app">
<img src="https://img.shields.io/discord/979116623879368755.svg?label=Discord&logo=Discord&style=for-the-badge&logoColor=000000&labelColor=ececec" alt="Discord"/> <img src="https://img.shields.io/discord/979116623879368755.svg?label=Discord&logo=Discord&style=for-the-badge&logoColor=000000&labelColor=ececec" alt="Discord"/>
</a> </a>
<br/> <br/>
<br/> <br/>
</p> </p>
<p align="center"> <p align="center">
@@ -17,25 +17,24 @@
<img src="design/immich-screenshots.png" title="Main Screenshot"> <img src="design/immich-screenshots.png" title="Main Screenshot">
</a> </a>
<br/> <br/>
<p align="center"> <p align="center">
<a href="readme_i18n/README_ca_ES.md">Català</a>
<a href="readme_i18n/README_es_ES.md">Español</a> <a href="readme_i18n/README_ca_ES.md">Català</a>
<a href="readme_i18n/README_fr_FR.md">Français</a> <a href="readme_i18n/README_es_ES.md">Español</a>
<a href="readme_i18n/README_it_IT.md">Italiano</a> <a href="readme_i18n/README_fr_FR.md">Français</a>
<a href="readme_i18n/README_ja_JP.md">日本語</a> <a href="readme_i18n/README_it_IT.md">Italiano</a>
<a href="readme_i18n/README_ko_KR.md">한국어</a> <a href="readme_i18n/README_ja_JP.md">日本語</a>
<a href="readme_i18n/README_de_DE.md">Deutsch</a> <a href="readme_i18n/README_ko_KR.md">한국어</a>
<a href="readme_i18n/README_nl_NL.md">Nederlands</a> <a href="readme_i18n/README_de_DE.md">Deutsch</a>
<a href="readme_i18n/README_tr_TR.md">Türkçe</a> <a href="readme_i18n/README_nl_NL.md">Nederlands</a>
<a href="readme_i18n/README_zh_CN.md">中文</a> <a href="readme_i18n/README_tr_TR.md">Türkçe</a>
<a href="readme_i18n/README_uk_UA.md">Українська</a> <a href="readme_i18n/README_zh_CN.md">中文</a>
<a href="readme_i18n/README_ru_RU.md">Русский</a> <a href="readme_i18n/README_ru_RU.md">Русский</a>
<a href="readme_i18n/README_pt_BR.md">Português Brasileiro</a> <a href="readme_i18n/README_pt_BR.md">Português Brasileiro</a>
<a href="readme_i18n/README_sv_SE.md">Svenska</a> <a href="readme_i18n/README_sv_SE.md">Svenska</a>
<a href="readme_i18n/README_ar_JO.md">العربية</a> <a href="readme_i18n/README_ar_JO.md">العربية</a>
<a href="readme_i18n/README_vi_VN.md">Tiếng Việt</a> <a href="readme_i18n/README_vi_VN.md">Tiếng Việt</a>
<a href="readme_i18n/README_th_TH.md">ภาษาไทย</a>
</p> </p>
## Disclaimer ## Disclaimer
@@ -63,7 +62,7 @@
Access the demo [here](https://demo.immich.app). The demo is running on a Free-tier Oracle VM in Amsterdam with a 2.4Ghz quad-core ARM64 CPU and 24GB RAM. Access the demo [here](https://demo.immich.app). The demo is running on a Free-tier Oracle VM in Amsterdam with a 2.4Ghz quad-core ARM64 CPU and 24GB RAM.
For the mobile app, you can use `https://demo.immich.app` for the `Server Endpoint URL` For the mobile app, you can use `https://demo.immich.app/api` for the `Server Endpoint URL`
### Login credentials ### Login credentials
@@ -103,8 +102,6 @@ For the mobile app, you can use `https://demo.immich.app` for the `Server Endpoi
| Offline support | Yes | No | | Offline support | Yes | No |
| Read-only gallery | Yes | Yes | | Read-only gallery | Yes | Yes |
| Stacked Photos | Yes | Yes | | Stacked Photos | Yes | Yes |
| Tags | No | Yes |
| Folder View | No | Yes |
## Translations ## Translations

View File

@@ -1 +1 @@
22.14.0 20.17.0

View File

@@ -1,4 +1,4 @@
FROM node:22.14.0-alpine3.20@sha256:40be979442621049f40b1d51a26b55e281246b5de4e5f51a18da7beb6e17e3f9 AS core FROM node:20.17.0-alpine3.20@sha256:2d07db07a2df6830718ae2a47db6fedce6745f5bcd174c398f2acdda90a11c03 AS core
WORKDIR /usr/src/open-api/typescript-sdk WORKDIR /usr/src/open-api/typescript-sdk
COPY open-api/typescript-sdk/package*.json open-api/typescript-sdk/tsconfig*.json ./ COPY open-api/typescript-sdk/package*.json open-api/typescript-sdk/tsconfig*.json ./

1595
cli/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{ {
"name": "@immich/cli", "name": "@immich/cli",
"version": "2.2.57", "version": "2.2.22",
"description": "Command Line Interface (CLI) for Immich", "description": "Command Line Interface (CLI) for Immich",
"type": "module", "type": "module",
"exports": "./dist/index.js", "exports": "./dist/index.js",
@@ -19,28 +19,27 @@
"@types/byte-size": "^8.1.0", "@types/byte-size": "^8.1.0",
"@types/cli-progress": "^3.11.0", "@types/cli-progress": "^3.11.0",
"@types/lodash-es": "^4.17.12", "@types/lodash-es": "^4.17.12",
"@types/micromatch": "^4.0.9",
"@types/mock-fs": "^4.13.1", "@types/mock-fs": "^4.13.1",
"@types/node": "^22.13.10", "@types/node": "^20.16.9",
"@typescript-eslint/eslint-plugin": "^8.15.0", "@typescript-eslint/eslint-plugin": "^8.0.0",
"@typescript-eslint/parser": "^8.15.0", "@typescript-eslint/parser": "^8.0.0",
"@vitest/coverage-v8": "^3.0.0", "@vitest/coverage-v8": "^2.0.5",
"byte-size": "^9.0.0", "byte-size": "^9.0.0",
"cli-progress": "^3.12.0", "cli-progress": "^3.12.0",
"commander": "^12.0.0", "commander": "^12.0.0",
"eslint": "^9.14.0", "eslint": "^9.0.0",
"eslint-config-prettier": "^10.0.0", "eslint-config-prettier": "^9.1.0",
"eslint-plugin-prettier": "^5.1.3", "eslint-plugin-prettier": "^5.1.3",
"eslint-plugin-unicorn": "^56.0.1", "eslint-plugin-unicorn": "^55.0.0",
"globals": "^16.0.0", "globals": "^15.9.0",
"mock-fs": "^5.2.0", "mock-fs": "^5.2.0",
"prettier": "^3.2.5", "prettier": "^3.2.5",
"prettier-plugin-organize-imports": "^4.0.0", "prettier-plugin-organize-imports": "^4.0.0",
"typescript": "^5.3.3", "typescript": "^5.3.3",
"vite": "^6.0.0", "vite": "^5.0.12",
"vite-tsconfig-paths": "^5.0.0", "vite-tsconfig-paths": "^5.0.0",
"vitest": "^3.0.0", "vitest": "^2.0.5",
"vitest-fetch-mock": "^0.4.0", "vitest-fetch-mock": "^0.3.0",
"yaml": "^2.3.1" "yaml": "^2.3.1"
}, },
"scripts": { "scripts": {
@@ -63,13 +62,11 @@
"node": ">=20.0.0" "node": ">=20.0.0"
}, },
"dependencies": { "dependencies": {
"chokidar": "^4.0.3",
"fast-glob": "^3.3.2", "fast-glob": "^3.3.2",
"fastq": "^1.17.1", "fastq": "^1.17.1",
"lodash-es": "^4.17.21", "lodash-es": "^4.17.21"
"micromatch": "^4.0.8"
}, },
"volta": { "volta": {
"node": "22.14.0" "node": "20.17.0"
} }
} }

View File

@@ -1,13 +1,12 @@
import * as fs from 'node:fs'; import * as fs from 'node:fs';
import * as os from 'node:os'; import * as os from 'node:os';
import * as path from 'node:path'; import * as path from 'node:path';
import { setTimeout as sleep } from 'node:timers/promises'; import { describe, expect, it, vi } from 'vitest';
import { describe, expect, it, MockedFunction, vi } from 'vitest';
import { Action, checkBulkUpload, defaults, getSupportedMediaTypes, Reason } from '@immich/sdk'; import { Action, checkBulkUpload, defaults, Reason } from '@immich/sdk';
import createFetchMock from 'vitest-fetch-mock'; import createFetchMock from 'vitest-fetch-mock';
import { checkForDuplicates, getAlbumName, startWatch, uploadFiles, UploadOptionsDto } from 'src/commands/asset'; import { checkForDuplicates, getAlbumName, uploadFiles, UploadOptionsDto } from './asset';
vi.mock('@immich/sdk'); vi.mock('@immich/sdk');
@@ -200,112 +199,3 @@ describe('checkForDuplicates', () => {
}); });
}); });
}); });
describe('startWatch', () => {
let testFolder: string;
let checkBulkUploadMocked: MockedFunction<typeof checkBulkUpload>;
beforeEach(async () => {
vi.restoreAllMocks();
vi.mocked(getSupportedMediaTypes).mockResolvedValue({
image: ['.jpg'],
sidecar: ['.xmp'],
video: ['.mp4'],
});
testFolder = await fs.promises.mkdtemp(path.join(os.tmpdir(), 'test-startWatch-'));
checkBulkUploadMocked = vi.mocked(checkBulkUpload);
checkBulkUploadMocked.mockResolvedValue({
results: [],
});
});
it('should start watching a directory and upload new files', async () => {
const testFilePath = path.join(testFolder, 'test.jpg');
await startWatch([testFolder], { concurrency: 1 }, { batchSize: 1, debounceTimeMs: 10 });
await sleep(100); // to debounce the watcher from considering the test file as a existing file
await fs.promises.writeFile(testFilePath, 'testjpg');
await vi.waitUntil(() => checkBulkUploadMocked.mock.calls.length > 0, 3000);
expect(checkBulkUpload).toHaveBeenCalledWith({
assetBulkUploadCheckDto: {
assets: [
expect.objectContaining({
id: testFilePath,
}),
],
},
});
});
it('should filter out unsupported files', async () => {
const testFilePath = path.join(testFolder, 'test.jpg');
const unsupportedFilePath = path.join(testFolder, 'test.txt');
await startWatch([testFolder], { concurrency: 1 }, { batchSize: 1, debounceTimeMs: 10 });
await sleep(100); // to debounce the watcher from considering the test file as a existing file
await fs.promises.writeFile(testFilePath, 'testjpg');
await fs.promises.writeFile(unsupportedFilePath, 'testtxt');
await vi.waitUntil(() => checkBulkUploadMocked.mock.calls.length > 0, 3000);
expect(checkBulkUpload).toHaveBeenCalledWith({
assetBulkUploadCheckDto: {
assets: expect.arrayContaining([
expect.objectContaining({
id: testFilePath,
}),
]),
},
});
expect(checkBulkUpload).not.toHaveBeenCalledWith({
assetBulkUploadCheckDto: {
assets: expect.arrayContaining([
expect.objectContaining({
id: unsupportedFilePath,
}),
]),
},
});
});
it('should filger out ignored patterns', async () => {
const testFilePath = path.join(testFolder, 'test.jpg');
const ignoredPattern = 'ignored';
const ignoredFolder = path.join(testFolder, ignoredPattern);
await fs.promises.mkdir(ignoredFolder, { recursive: true });
const ignoredFilePath = path.join(ignoredFolder, 'ignored.jpg');
await startWatch([testFolder], { concurrency: 1, ignore: ignoredPattern }, { batchSize: 1, debounceTimeMs: 10 });
await sleep(100); // to debounce the watcher from considering the test file as a existing file
await fs.promises.writeFile(testFilePath, 'testjpg');
await fs.promises.writeFile(ignoredFilePath, 'ignoredjpg');
await vi.waitUntil(() => checkBulkUploadMocked.mock.calls.length > 0, 3000);
expect(checkBulkUpload).toHaveBeenCalledWith({
assetBulkUploadCheckDto: {
assets: expect.arrayContaining([
expect.objectContaining({
id: testFilePath,
}),
]),
},
});
expect(checkBulkUpload).not.toHaveBeenCalledWith({
assetBulkUploadCheckDto: {
assets: expect.arrayContaining([
expect.objectContaining({
id: ignoredFilePath,
}),
]),
},
});
});
afterEach(async () => {
await fs.promises.rm(testFolder, { recursive: true, force: true });
});
});

View File

@@ -1,6 +1,5 @@
import { import {
Action, Action,
AssetBulkUploadCheckItem,
AssetBulkUploadCheckResult, AssetBulkUploadCheckResult,
AssetMediaResponseDto, AssetMediaResponseDto,
AssetMediaStatus, AssetMediaStatus,
@@ -12,18 +11,13 @@ import {
getSupportedMediaTypes, getSupportedMediaTypes,
} from '@immich/sdk'; } from '@immich/sdk';
import byteSize from 'byte-size'; import byteSize from 'byte-size';
import { Matcher, watch as watchFs } from 'chokidar'; import { Presets, SingleBar } from 'cli-progress';
import { MultiBar, Presets, SingleBar } from 'cli-progress';
import { chunk } from 'lodash-es'; import { chunk } from 'lodash-es';
import micromatch from 'micromatch';
import { Stats, createReadStream } from 'node:fs'; import { Stats, createReadStream } from 'node:fs';
import { stat, unlink } from 'node:fs/promises'; import { stat, unlink } from 'node:fs/promises';
import path, { basename } from 'node:path'; import path, { basename } from 'node:path';
import { Queue } from 'src/queue'; import { Queue } from 'src/queue';
import { BaseOptions, Batcher, authenticate, crawl, sha1 } from 'src/utils'; import { BaseOptions, authenticate, crawl, sha1 } from 'src/utils';
const UPLOAD_WATCH_BATCH_SIZE = 100;
const UPLOAD_WATCH_DEBOUNCE_TIME_MS = 10_000;
const s = (count: number) => (count === 1 ? '' : 's'); const s = (count: number) => (count === 1 ? '' : 's');
@@ -41,8 +35,6 @@ export interface UploadOptionsDto {
albumName?: string; albumName?: string;
includeHidden?: boolean; includeHidden?: boolean;
concurrency: number; concurrency: number;
progress?: boolean;
watch?: boolean;
} }
class UploadFile extends File { class UploadFile extends File {
@@ -62,94 +54,19 @@ class UploadFile extends File {
} }
} }
const uploadBatch = async (files: string[], options: UploadOptionsDto) => {
const { newFiles, duplicates } = await checkForDuplicates(files, options);
const newAssets = await uploadFiles(newFiles, options);
await updateAlbums([...newAssets, ...duplicates], options);
await deleteFiles(newFiles, options);
};
export const startWatch = async (
paths: string[],
options: UploadOptionsDto,
{
batchSize = UPLOAD_WATCH_BATCH_SIZE,
debounceTimeMs = UPLOAD_WATCH_DEBOUNCE_TIME_MS,
}: { batchSize?: number; debounceTimeMs?: number } = {},
) => {
const watcherIgnored: Matcher[] = [];
const { image, video } = await getSupportedMediaTypes();
const extensions = new Set([...image, ...video]);
if (options.ignore) {
watcherIgnored.push((path) => micromatch.contains(path, `**/${options.ignore}`));
}
const pathsBatcher = new Batcher<string>({
batchSize,
debounceTimeMs,
onBatch: async (paths: string[]) => {
const uniquePaths = [...new Set(paths)];
await uploadBatch(uniquePaths, options);
},
});
const onFile = async (path: string, stats?: Stats) => {
if (stats?.isDirectory()) {
return;
}
const ext = '.' + path.split('.').pop()?.toLowerCase();
if (!ext || !extensions.has(ext)) {
return;
}
if (!options.progress) {
// logging when progress is disabled as it can cause issues with the progress bar rendering
console.log(`Change detected: ${path}`);
}
pathsBatcher.add(path);
};
const fsWatcher = watchFs(paths, {
ignoreInitial: true,
ignored: watcherIgnored,
alwaysStat: true,
awaitWriteFinish: true,
depth: options.recursive ? undefined : 1,
persistent: true,
})
.on('add', onFile)
.on('change', onFile)
.on('error', (error) => console.error(`Watcher error: ${error}`));
process.on('SIGINT', async () => {
console.log('Exiting...');
await fsWatcher.close();
process.exit();
});
};
export const upload = async (paths: string[], baseOptions: BaseOptions, options: UploadOptionsDto) => { export const upload = async (paths: string[], baseOptions: BaseOptions, options: UploadOptionsDto) => {
await authenticate(baseOptions); await authenticate(baseOptions);
const scanFiles = await scan(paths, options); const scanFiles = await scan(paths, options);
if (scanFiles.length === 0) { if (scanFiles.length === 0) {
if (options.watch) { console.log('No files found, exiting');
console.log('No files found initially.'); return;
} else {
console.log('No files found, exiting');
return;
}
} }
if (options.watch) { const { newFiles, duplicates } = await checkForDuplicates(scanFiles, options);
console.log('Watching for changes...'); const newAssets = await uploadFiles(newFiles, options);
await startWatch(paths, options); await updateAlbums([...newAssets, ...duplicates], options);
// watcher does not handle the initial scan await deleteFiles(newFiles, options);
// as the scan() is a more efficient quick start with batched results
}
await uploadBatch(scanFiles, options);
}; };
const scan = async (pathsToCrawl: string[], options: UploadOptionsDto) => { const scan = async (pathsToCrawl: string[], options: UploadOptionsDto) => {
@@ -167,35 +84,29 @@ const scan = async (pathsToCrawl: string[], options: UploadOptionsDto) => {
return files; return files;
}; };
export const checkForDuplicates = async (files: string[], { concurrency, skipHash, progress }: UploadOptionsDto) => { export const checkForDuplicates = async (files: string[], { concurrency, skipHash }: UploadOptionsDto) => {
if (skipHash) { if (skipHash) {
console.log('Skipping hash check, assuming all files are new'); console.log('Skipping hash check, assuming all files are new');
return { newFiles: files, duplicates: [] }; return { newFiles: files, duplicates: [] };
} }
let multiBar: MultiBar | undefined; const progressBar = new SingleBar(
{ format: 'Checking files | {bar} | {percentage}% | ETA: {eta}s | {value}/{total} assets' },
Presets.shades_classic,
);
if (progress) { progressBar.start(files.length, 0);
multiBar = new MultiBar(
{ format: '{message} | {bar} | {percentage}% | ETA: {eta}s | {value}/{total} assets' },
Presets.shades_classic,
);
} else {
console.log(`Received ${files.length} files, hashing...`);
}
const hashProgressBar = multiBar?.create(files.length, 0, { message: 'Hashing files ' });
const checkProgressBar = multiBar?.create(files.length, 0, { message: 'Checking for duplicates' });
const newFiles: string[] = []; const newFiles: string[] = [];
const duplicates: Asset[] = []; const duplicates: Asset[] = [];
const checkBulkUploadQueue = new Queue<AssetBulkUploadCheckItem[], void>( const queue = new Queue<string[], AssetBulkUploadCheckResults>(
async (assets: AssetBulkUploadCheckItem[]) => { async (filepaths: string[]) => {
const response = await checkBulkUpload({ assetBulkUploadCheckDto: { assets } }); const dto = await Promise.all(
filepaths.map(async (filepath) => ({ id: filepath, checksum: await sha1(filepath) })),
);
const response = await checkBulkUpload({ assetBulkUploadCheckDto: { assets: dto } });
const results = response.results as AssetBulkUploadCheckResults; const results = response.results as AssetBulkUploadCheckResults;
for (const { id: filepath, assetId, action } of results) { for (const { id: filepath, assetId, action } of results) {
if (action === Action.Accept) { if (action === Action.Accept) {
newFiles.push(filepath); newFiles.push(filepath);
@@ -204,46 +115,19 @@ export const checkForDuplicates = async (files: string[], { concurrency, skipHas
duplicates.push({ id: assetId as string, filepath }); duplicates.push({ id: assetId as string, filepath });
} }
} }
progressBar.increment(filepaths.length);
checkProgressBar?.increment(assets.length);
},
{ concurrency, retry: 3 },
);
const results: { id: string; checksum: string }[] = [];
let checkBulkUploadRequests: AssetBulkUploadCheckItem[] = [];
const queue = new Queue<string, AssetBulkUploadCheckItem[]>(
async (filepath: string): Promise<AssetBulkUploadCheckItem[]> => {
const dto = { id: filepath, checksum: await sha1(filepath) };
results.push(dto);
checkBulkUploadRequests.push(dto);
if (checkBulkUploadRequests.length === 5000) {
const batch = checkBulkUploadRequests;
checkBulkUploadRequests = [];
void checkBulkUploadQueue.push(batch);
}
hashProgressBar?.increment();
return results; return results;
}, },
{ concurrency, retry: 3 }, { concurrency, retry: 3 },
); );
for (const item of files) { for (const items of chunk(files, concurrency)) {
void queue.push(item); await queue.push(items);
} }
await queue.drained(); await queue.drained();
if (checkBulkUploadRequests.length > 0) { progressBar.stop();
void checkBulkUploadQueue.push(checkBulkUploadRequests);
}
await checkBulkUploadQueue.drained();
multiBar?.stop();
console.log(`Found ${newFiles.length} new files and ${duplicates.length} duplicate${s(duplicates.length)}`); console.log(`Found ${newFiles.length} new files and ${duplicates.length} duplicate${s(duplicates.length)}`);
@@ -259,10 +143,7 @@ export const checkForDuplicates = async (files: string[], { concurrency, skipHas
return { newFiles, duplicates }; return { newFiles, duplicates };
}; };
export const uploadFiles = async ( export const uploadFiles = async (files: string[], { dryRun, concurrency }: UploadOptionsDto): Promise<Asset[]> => {
files: string[],
{ dryRun, concurrency, progress }: UploadOptionsDto,
): Promise<Asset[]> => {
if (files.length === 0) { if (files.length === 0) {
console.log('All assets were already uploaded, nothing to do.'); console.log('All assets were already uploaded, nothing to do.');
return []; return [];
@@ -282,20 +163,12 @@ export const uploadFiles = async (
return files.map((filepath) => ({ id: '', filepath })); return files.map((filepath) => ({ id: '', filepath }));
} }
let uploadProgress: SingleBar | undefined; const uploadProgress = new SingleBar(
{ format: 'Uploading assets | {bar} | {percentage}% | ETA: {eta_formatted} | {value_formatted}/{total_formatted}' },
if (progress) { Presets.shades_classic,
uploadProgress = new SingleBar( );
{ uploadProgress.start(totalSize, 0);
format: 'Uploading assets | {bar} | {percentage}% | ETA: {eta_formatted} | {value_formatted}/{total_formatted}', uploadProgress.update({ value_formatted: 0, total_formatted: byteSize(totalSize) });
},
Presets.shades_classic,
);
} else {
console.log(`Uploading ${files.length} asset${s(files.length)} (${byteSize(totalSize)})`);
}
uploadProgress?.start(totalSize, 0);
uploadProgress?.update({ value_formatted: 0, total_formatted: byteSize(totalSize) });
let duplicateCount = 0; let duplicateCount = 0;
let duplicateSize = 0; let duplicateSize = 0;
@@ -321,20 +194,20 @@ export const uploadFiles = async (
successSize += stats.size ?? 0; successSize += stats.size ?? 0;
} }
uploadProgress?.update(successSize, { value_formatted: byteSize(successSize + duplicateSize) }); uploadProgress.update(successSize, { value_formatted: byteSize(successSize + duplicateSize) });
return response; return response;
}, },
{ concurrency, retry: 3 }, { concurrency, retry: 3 },
); );
for (const item of files) { for (const filepath of files) {
void queue.push(item); await queue.push(filepath);
} }
await queue.drained(); await queue.drained();
uploadProgress?.stop(); uploadProgress.stop();
console.log(`Successfully uploaded ${successCount} new asset${s(successCount)} (${byteSize(successSize)})`); console.log(`Successfully uploaded ${successCount} new asset${s(successCount)} (${byteSize(successSize)})`);
if (duplicateCount > 0) { if (duplicateCount > 0) {

View File

@@ -69,13 +69,6 @@ program
.default(4), .default(4),
) )
.addOption(new Option('--delete', 'Delete local assets after upload').env('IMMICH_DELETE_ASSETS')) .addOption(new Option('--delete', 'Delete local assets after upload').env('IMMICH_DELETE_ASSETS'))
.addOption(new Option('--no-progress', 'Hide progress bars').env('IMMICH_PROGRESS_BAR').default(true))
.addOption(
new Option('--watch', 'Watch for changes and upload automatically')
.env('IMMICH_WATCH_CHANGES')
.default(false)
.implies({ progress: false }),
)
.argument('[paths...]', 'One or more paths to assets to be uploaded') .argument('[paths...]', 'One or more paths to assets to be uploaded')
.action((paths, options) => upload(paths, program.opts(), options)); .action((paths, options) => upload(paths, program.opts(), options));

View File

@@ -72,8 +72,8 @@ export class Queue<T, R> {
* @returns Promise<void> - The returned Promise will be resolved when all tasks in the queue have been processed by a worker. * @returns Promise<void> - The returned Promise will be resolved when all tasks in the queue have been processed by a worker.
* This promise could be ignored as it will not lead to a `unhandledRejection`. * This promise could be ignored as it will not lead to a `unhandledRejection`.
*/ */
drained(): Promise<void> { async drained(): Promise<void> {
return this.queue.drained(); await this.queue.drain();
} }
/** /**

View File

@@ -1,13 +1,11 @@
import mockfs from 'mock-fs'; import mockfs from 'mock-fs';
import { readFileSync } from 'node:fs'; import { readFileSync } from 'node:fs';
import { Batcher, CrawlOptions, crawl } from 'src/utils'; import { CrawlOptions, crawl } from 'src/utils';
import { Mock } from 'vitest';
interface Test { interface Test {
test: string; test: string;
options: Omit<CrawlOptions, 'extensions'>; options: Omit<CrawlOptions, 'extensions'>;
files: Record<string, boolean>; files: Record<string, boolean>;
skipOnWin32?: boolean;
} }
const cwd = process.cwd(); const cwd = process.cwd();
@@ -50,18 +48,6 @@ const tests: Test[] = [
'/photos/image.jpg': true, '/photos/image.jpg': true,
}, },
}, },
{
test: 'should crawl folders with quotes',
options: {
pathsToCrawl: ["/photo's/", '/photo"s/', '/photo`s/'],
},
files: {
"/photo's/image1.jpg": true,
'/photo"s/image2.jpg': true,
'/photo`s/image3.jpg': true,
},
skipOnWin32: true, // single quote interferes with mockfs root on Windows
},
{ {
test: 'should crawl a single file', test: 'should crawl a single file',
options: { options: {
@@ -129,7 +115,17 @@ const tests: Test[] = [
'/albums/image3.jpg': true, '/albums/image3.jpg': true,
}, },
}, },
{
test: 'should support globbing paths',
options: {
pathsToCrawl: ['/photos*'],
},
files: {
'/photos1/image1.jpg': true,
'/photos2/image2.jpg': true,
'/images/image3.jpg': false,
},
},
{ {
test: 'should crawl a single path without trailing slash', test: 'should crawl a single path without trailing slash',
options: { options: {
@@ -284,12 +280,8 @@ describe('crawl', () => {
}); });
describe('crawl', () => { describe('crawl', () => {
for (const { test: name, options, files, skipOnWin32 } of tests) { for (const { test, options, files } of tests) {
if (process.platform === 'win32' && skipOnWin32) { it(test, async () => {
test.skip(name);
continue;
}
it(name, async () => {
// The file contents is the same as the path. // The file contents is the same as the path.
mockfs(Object.fromEntries(Object.keys(files).map((file) => [file, file]))); mockfs(Object.fromEntries(Object.keys(files).map((file) => [file, file])));
@@ -304,38 +296,3 @@ describe('crawl', () => {
} }
}); });
}); });
describe('Batcher', () => {
let batcher: Batcher;
let onBatch: Mock;
beforeEach(() => {
onBatch = vi.fn();
batcher = new Batcher({ batchSize: 2, onBatch });
});
it('should trigger onBatch() when a batch limit is reached', async () => {
batcher.add('a');
batcher.add('b');
batcher.add('c');
expect(onBatch).toHaveBeenCalledOnce();
expect(onBatch).toHaveBeenCalledWith(['a', 'b']);
});
it('should trigger onBatch() when flush() is called', async () => {
batcher.add('a');
batcher.flush();
expect(onBatch).toHaveBeenCalledOnce();
expect(onBatch).toHaveBeenCalledWith(['a']);
});
it('should trigger onBatch() when debounce time reached', async () => {
vi.useFakeTimers();
batcher = new Batcher({ batchSize: 2, debounceTimeMs: 100, onBatch });
batcher.add('a');
expect(onBatch).not.toHaveBeenCalled();
vi.advanceTimersByTime(200);
expect(onBatch).toHaveBeenCalledOnce();
expect(onBatch).toHaveBeenCalledWith(['a']);
vi.useRealTimers();
});
});

View File

@@ -141,21 +141,25 @@ export const crawl = async (options: CrawlOptions): Promise<string[]> => {
} }
} }
if (patterns.length === 0) { let searchPattern: string;
if (patterns.length === 1) {
searchPattern = patterns[0];
} else if (patterns.length === 0) {
return crawledFiles; return crawledFiles;
} else {
searchPattern = '{' + patterns.join(',') + '}';
} }
const searchPatterns = patterns.map((pattern) => { if (recursive) {
let escapedPattern = pattern.replaceAll("'", "[']").replaceAll('"', '["]').replaceAll('`', '[`]'); searchPattern = searchPattern + '/**/';
if (recursive) { }
escapedPattern = escapedPattern + '/**';
}
return `${escapedPattern}/*.{${extensions.join(',')}}`;
});
const globbedFiles = await glob(searchPatterns, { searchPattern = `${searchPattern}/*.{${extensions.join(',')}}`;
const globbedFiles = await glob(searchPattern, {
absolute: true, absolute: true,
caseSensitiveMatch: false, caseSensitiveMatch: false,
onlyFiles: true,
dot: includeHidden, dot: includeHidden,
ignore: [`**/${exclusionPattern}`], ignore: [`**/${exclusionPattern}`],
}); });
@@ -172,64 +176,3 @@ export const sha1 = (filepath: string) => {
rs.on('end', () => resolve(hash.digest('hex'))); rs.on('end', () => resolve(hash.digest('hex')));
}); });
}; };
/**
* Batches items and calls onBatch to process them
* when the batch size is reached or the debounce time has passed.
*/
export class Batcher<T = unknown> {
private items: T[] = [];
private readonly batchSize: number;
private readonly debounceTimeMs?: number;
private readonly onBatch: (items: T[]) => void;
private debounceTimer?: NodeJS.Timeout;
constructor({
batchSize,
debounceTimeMs,
onBatch,
}: {
batchSize: number;
debounceTimeMs?: number;
onBatch: (items: T[]) => Promise<void>;
}) {
this.batchSize = batchSize;
this.debounceTimeMs = debounceTimeMs;
this.onBatch = onBatch;
}
private setDebounceTimer() {
if (this.debounceTimer) {
clearTimeout(this.debounceTimer);
}
if (this.debounceTimeMs) {
this.debounceTimer = setTimeout(() => this.flush(), this.debounceTimeMs);
}
}
private clearDebounceTimer() {
if (this.debounceTimer) {
clearTimeout(this.debounceTimer);
this.debounceTimer = undefined;
}
}
add(item: T) {
this.items.push(item);
this.setDebounceTimer();
if (this.items.length >= this.batchSize) {
this.flush();
}
}
flush() {
this.clearDebounceTimer();
if (this.items.length === 0) {
return;
}
this.onBatch(this.items);
this.items = [];
}
}

View File

@@ -2,37 +2,37 @@
# Manual edits may be lost in future updates. # Manual edits may be lost in future updates.
provider "registry.opentofu.org/cloudflare/cloudflare" { provider "registry.opentofu.org/cloudflare/cloudflare" {
version = "4.52.0" version = "4.43.0"
constraints = "4.52.0" constraints = "4.43.0"
hashes = [ hashes = [
"h1:2BEJyXJtYC4B4nda/WCYUmuJYDaYk88F8t1pwPzr0iQ=", "h1:2kDVLD36BOVgBzI9p0WIQ+xjFfMmjaItA0l8SyZWEPo=",
"h1:4IASk5SESeWKQ7JU0+M7KApuF5mZyklvwMXPBabim3c=", "h1:2sGJDAwFEgO8+3y+2suYO+yrjNOzSsihad0hbM3+jPg=",
"h1:5ImZxxALSnWfH/4EXw/wFirSmk5Tr0ACmcysy51AafE=", "h1:A1WPQFcdD+7FrFBFrKcx4CiSr75xSmsO93C0e5NBAeQ=",
"h1:6TJ3dxLSin4ZKBJLsZDn95H2ZYnGm8S7GGHvvXuuMQU=", "h1:BuXs/1ohmF4fWyOErY6vNbm7DaEIfbLSepSiZ2ol9I8=",
"h1:IzTUjg9kQ4N3qizP9CjYLeHwjsuGgtxwXvfUQWyOLcA=", "h1:QPh+X19oyo808sqdeJaVqahZcQgcG1jCi3DA5zpjz6U=",
"h1:NTaOQfYINA0YTG/V1/9+SYtgX1it63+cBugj4WK4FWc=", "h1:RI7c7dhSJoIkfou5b8ITRpM5MqsQD3FULj1h/rI4rJk=",
"h1:PXH48LuJn329sCfMXprdMDk51EZaWFyajVvS03qhQLs=", "h1:gdI5JTCPjewdGq1bhGAs+V5qCcmJ73N2gtMfuFybJp4=",
"h1:Pi5M+GeoMSN2eJ6QnIeXjBf19O+rby/74CfB2ocpv20=", "h1:h4lnJpCIYZ7dsN9IO2mmwNdWNiQYEPoAEUjLF2sZ5kc=",
"h1:ShXZ2ZjBvm3thfoPPzPT8+OhyismnydQVkUAfI8X12w=", "h1:jTaExrX/eR7vGT5wayGqH8ZtXS2zyk0WmD3zbAKFIQU=",
"h1:WQ9hu0Wge2msBbODfottCSKgu8oKUrw4Opz+fDPVVHk=", "h1:l5NKJUOQJ1mHl1eekeXaxUZ+g+8Yv4aGcIN9vuK6GL4=",
"h1:Z5yXML2DE0uH9UU+M0ut9JMQAORcwVZz1CxBHzeBmao=", "h1:sNbvm66/2vc8B/khyioOO8eNaU8nb89x693AN7fQheU=",
"h1:jqI2qKknpleS3JDSplyGYHMu0u9K/tor1ZOjFwDgEMk=", "h1:tXS4g1yE420AU4mvZ7RrYI+yYTutkRID3l+W0gBH4BM=",
"h1:kgfutDh14Q5nw4eg6qGFamFxIiY8Ae0FPKRBLDOzpcI=", "h1:vA+kES7uqmKA9K0U45IXR94jaTQZCHZLCHqMUeGxKMI=",
"h1:zCAO7GZmfYhWb+i6TfqlqhMeDyPZWGio2IzEzAh3YTs=", "h1:zV131k79+ob9p4jrLDgztDNvZvt8fvrrzpn0nPikBw8=",
"zh:19be1a91c982b902c42aba47766860dfa5dc151eed1e95fd39ca642229381ef0", "zh:006d111d6eafe6eeb5df2f91bd0ca320f979bd71f8cd8c475f10b2bd94acba55",
"zh:1de451c4d1ecf7efbe67b6dace3426ba810711afdd644b0f1b870364c8ae91f8", "zh:031fbb5cac23a841dc18e270cbfcd3ce9f4ba504edbd3c78931f7ed9827220a8",
"zh:352b4a2120173298622e669258744554339d959ac3a95607b117a48ee4a83238", "zh:07a72fe8b55afee99529bf4169ab6abfac5eabcd10968c29101925bcd358b09f",
"zh:3c6f1346d9154afbd2d558fabb4b0150fc8d559aa961254144fe1bc17fe6032f", "zh:0d14727d011c2d9df4c3058f527d2409223449ab48b46cbc86922eb553ef77c1",
"zh:4c4c92d53fb535b1e0eff26f222bbd627b97d3b4c891ec9c321268676d06152f", "zh:155ce1333672d26cd18a5866b0761489d91682beffee58e45c3a1b68e8491d3d",
"zh:53276f68006c9ceb7cdb10a6ccf91a5c1eadd1407a28edb5741e84e88d7e29e8", "zh:35a2a1939a965335b29ebdbfd759d93a97c0f589d9cd218f537dee6f600e3fb9",
"zh:7925a97773948171a63d4f65bb81ee92fd6d07a447e36012977313293a5435c9", "zh:52912fe421e7d911431f77788db2ea13836efd65a2e82385adb52c6a84d4ee90",
"zh:7dfb0a4496cfe032437386d0a2cd9229a1956e9c30bd920923c141b0f0440060", "zh:57374318d9194ea1db08884b0541a9055823d5970ad48f9a57547ac231163007",
"zh:5fb942b9e2553c058fe09fe12fb39dd175cd6715bb41c059c1a70df2bfc64dc1",
"zh:63cabd2bda201b09b35a3279d1f813ab71394b9b90fc5cf8962a5eba207803bc",
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f", "zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
"zh:8d4aa79f0a414bb4163d771063c70cd991c8fac6c766e685bac2ee12903c5bd6", "zh:978ee67d3d53970a5c474ab40b00adee97f4153b16804a2b6b7ee205ae69d18a",
"zh:a67540c13565616a7e7e51ee9366e88b0dc60046e1d75c72680e150bd02725bb", "zh:bbafdbef631b5c80570087817b42b16b1a76d556d692853a71c47fb48663cf00",
"zh:a936383a4767f5393f38f622e92bf2d0c03fe04b69c284951f27345766c7b31b", "zh:be91b3f2a697cbbb41f65aad2600972d0ede1e962a7d8a00bb3177cb77d86666",
"zh:d4887d73c466ff036eecf50ad6404ba38fd82ea4855296b1846d244b0f13c380", "zh:efe168ad4aaa6156ce5a31d4e50e9d54d38ee5a5888412f9e690c0de5d619683",
"zh:e9093c8bd5b6cd99c81666e315197791781b8f93afa14fc2e0f732d1bb2a44b7",
"zh:efd3b3f1ec59a37f635aa1d4efcf178734c2fcf8ddb0d56ea690bec342da8672",
] ]
} }

View File

@@ -5,7 +5,7 @@ terraform {
required_providers { required_providers {
cloudflare = { cloudflare = {
source = "cloudflare/cloudflare" source = "cloudflare/cloudflare"
version = "4.52.0" version = "4.43.0"
} }
} }
} }

View File

@@ -2,37 +2,37 @@
# Manual edits may be lost in future updates. # Manual edits may be lost in future updates.
provider "registry.opentofu.org/cloudflare/cloudflare" { provider "registry.opentofu.org/cloudflare/cloudflare" {
version = "4.52.0" version = "4.43.0"
constraints = "4.52.0" constraints = "4.43.0"
hashes = [ hashes = [
"h1:2BEJyXJtYC4B4nda/WCYUmuJYDaYk88F8t1pwPzr0iQ=", "h1:2kDVLD36BOVgBzI9p0WIQ+xjFfMmjaItA0l8SyZWEPo=",
"h1:4IASk5SESeWKQ7JU0+M7KApuF5mZyklvwMXPBabim3c=", "h1:2sGJDAwFEgO8+3y+2suYO+yrjNOzSsihad0hbM3+jPg=",
"h1:5ImZxxALSnWfH/4EXw/wFirSmk5Tr0ACmcysy51AafE=", "h1:A1WPQFcdD+7FrFBFrKcx4CiSr75xSmsO93C0e5NBAeQ=",
"h1:6TJ3dxLSin4ZKBJLsZDn95H2ZYnGm8S7GGHvvXuuMQU=", "h1:BuXs/1ohmF4fWyOErY6vNbm7DaEIfbLSepSiZ2ol9I8=",
"h1:IzTUjg9kQ4N3qizP9CjYLeHwjsuGgtxwXvfUQWyOLcA=", "h1:QPh+X19oyo808sqdeJaVqahZcQgcG1jCi3DA5zpjz6U=",
"h1:NTaOQfYINA0YTG/V1/9+SYtgX1it63+cBugj4WK4FWc=", "h1:RI7c7dhSJoIkfou5b8ITRpM5MqsQD3FULj1h/rI4rJk=",
"h1:PXH48LuJn329sCfMXprdMDk51EZaWFyajVvS03qhQLs=", "h1:gdI5JTCPjewdGq1bhGAs+V5qCcmJ73N2gtMfuFybJp4=",
"h1:Pi5M+GeoMSN2eJ6QnIeXjBf19O+rby/74CfB2ocpv20=", "h1:h4lnJpCIYZ7dsN9IO2mmwNdWNiQYEPoAEUjLF2sZ5kc=",
"h1:ShXZ2ZjBvm3thfoPPzPT8+OhyismnydQVkUAfI8X12w=", "h1:jTaExrX/eR7vGT5wayGqH8ZtXS2zyk0WmD3zbAKFIQU=",
"h1:WQ9hu0Wge2msBbODfottCSKgu8oKUrw4Opz+fDPVVHk=", "h1:l5NKJUOQJ1mHl1eekeXaxUZ+g+8Yv4aGcIN9vuK6GL4=",
"h1:Z5yXML2DE0uH9UU+M0ut9JMQAORcwVZz1CxBHzeBmao=", "h1:sNbvm66/2vc8B/khyioOO8eNaU8nb89x693AN7fQheU=",
"h1:jqI2qKknpleS3JDSplyGYHMu0u9K/tor1ZOjFwDgEMk=", "h1:tXS4g1yE420AU4mvZ7RrYI+yYTutkRID3l+W0gBH4BM=",
"h1:kgfutDh14Q5nw4eg6qGFamFxIiY8Ae0FPKRBLDOzpcI=", "h1:vA+kES7uqmKA9K0U45IXR94jaTQZCHZLCHqMUeGxKMI=",
"h1:zCAO7GZmfYhWb+i6TfqlqhMeDyPZWGio2IzEzAh3YTs=", "h1:zV131k79+ob9p4jrLDgztDNvZvt8fvrrzpn0nPikBw8=",
"zh:19be1a91c982b902c42aba47766860dfa5dc151eed1e95fd39ca642229381ef0", "zh:006d111d6eafe6eeb5df2f91bd0ca320f979bd71f8cd8c475f10b2bd94acba55",
"zh:1de451c4d1ecf7efbe67b6dace3426ba810711afdd644b0f1b870364c8ae91f8", "zh:031fbb5cac23a841dc18e270cbfcd3ce9f4ba504edbd3c78931f7ed9827220a8",
"zh:352b4a2120173298622e669258744554339d959ac3a95607b117a48ee4a83238", "zh:07a72fe8b55afee99529bf4169ab6abfac5eabcd10968c29101925bcd358b09f",
"zh:3c6f1346d9154afbd2d558fabb4b0150fc8d559aa961254144fe1bc17fe6032f", "zh:0d14727d011c2d9df4c3058f527d2409223449ab48b46cbc86922eb553ef77c1",
"zh:4c4c92d53fb535b1e0eff26f222bbd627b97d3b4c891ec9c321268676d06152f", "zh:155ce1333672d26cd18a5866b0761489d91682beffee58e45c3a1b68e8491d3d",
"zh:53276f68006c9ceb7cdb10a6ccf91a5c1eadd1407a28edb5741e84e88d7e29e8", "zh:35a2a1939a965335b29ebdbfd759d93a97c0f589d9cd218f537dee6f600e3fb9",
"zh:7925a97773948171a63d4f65bb81ee92fd6d07a447e36012977313293a5435c9", "zh:52912fe421e7d911431f77788db2ea13836efd65a2e82385adb52c6a84d4ee90",
"zh:7dfb0a4496cfe032437386d0a2cd9229a1956e9c30bd920923c141b0f0440060", "zh:57374318d9194ea1db08884b0541a9055823d5970ad48f9a57547ac231163007",
"zh:5fb942b9e2553c058fe09fe12fb39dd175cd6715bb41c059c1a70df2bfc64dc1",
"zh:63cabd2bda201b09b35a3279d1f813ab71394b9b90fc5cf8962a5eba207803bc",
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f", "zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
"zh:8d4aa79f0a414bb4163d771063c70cd991c8fac6c766e685bac2ee12903c5bd6", "zh:978ee67d3d53970a5c474ab40b00adee97f4153b16804a2b6b7ee205ae69d18a",
"zh:a67540c13565616a7e7e51ee9366e88b0dc60046e1d75c72680e150bd02725bb", "zh:bbafdbef631b5c80570087817b42b16b1a76d556d692853a71c47fb48663cf00",
"zh:a936383a4767f5393f38f622e92bf2d0c03fe04b69c284951f27345766c7b31b", "zh:be91b3f2a697cbbb41f65aad2600972d0ede1e962a7d8a00bb3177cb77d86666",
"zh:d4887d73c466ff036eecf50ad6404ba38fd82ea4855296b1846d244b0f13c380", "zh:efe168ad4aaa6156ce5a31d4e50e9d54d38ee5a5888412f9e690c0de5d619683",
"zh:e9093c8bd5b6cd99c81666e315197791781b8f93afa14fc2e0f732d1bb2a44b7",
"zh:efd3b3f1ec59a37f635aa1d4efcf178734c2fcf8ddb0d56ea690bec342da8672",
] ]
} }

View File

@@ -5,7 +5,7 @@ terraform {
required_providers { required_providers {
cloudflare = { cloudflare = {
source = "cloudflare/cloudflare" source = "cloudflare/cloudflare"
version = "4.52.0" version = "4.43.0"
} }
} }
} }

View File

@@ -1,13 +1,4 @@
# # See:
# WARNING: To install Immich, follow our guide: https://immich.app/docs/install/docker-compose
#
# Make sure to use the docker-compose.yml of the current release:
#
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
#
# The compose file on main may not be compatible with the latest release.
# For development see:
# - https://immich.app/docs/developer/setup # - https://immich.app/docs/developer/setup
# - https://immich.app/docs/developer/troubleshooting # - https://immich.app/docs/developer/troubleshooting
@@ -25,7 +16,7 @@ services:
context: ../ context: ../
dockerfile: server/Dockerfile dockerfile: server/Dockerfile
target: dev target: dev
restart: unless-stopped restart: always
volumes: volumes:
- ../server:/usr/src/app - ../server:/usr/src/app
- ../open-api:/usr/src/open-api - ../open-api:/usr/src/open-api
@@ -54,9 +45,9 @@ services:
soft: 1048576 soft: 1048576
hard: 1048576 hard: 1048576
ports: ports:
- 3001:3001
- 9230:9230 - 9230:9230
- 9231:9231 - 9231:9231
- 2283:2283
depends_on: depends_on:
- redis - redis
- database - database
@@ -66,21 +57,17 @@ services:
immich-web: immich-web:
container_name: immich_web container_name: immich_web
image: immich-web-dev:latest image: immich-web-dev:latest
# Needed for rootless docker setup, see https://github.com/moby/moby/issues/45919
# user: 0:0
build: build:
context: ../web context: ../web
command: ['/usr/src/app/bin/immich-web'] command: ['/usr/src/app/bin/immich-web']
env_file: env_file:
- .env - .env
ports: ports:
- 3000:3000 - 2283:3000
- 24678:24678 - 24678:24678
volumes: volumes:
- ../web:/usr/src/app - ../web:/usr/src/app
- ../i18n:/usr/src/i18n
- ../open-api/:/usr/src/open-api/ - ../open-api/:/usr/src/open-api/
# - ../../ui:/usr/ui
- /usr/src/app/node_modules - /usr/src/app/node_modules
ulimits: ulimits:
nofile: nofile:
@@ -95,12 +82,12 @@ services:
image: immich-machine-learning-dev:latest image: immich-machine-learning-dev:latest
# extends: # extends:
# file: hwaccel.ml.yml # file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference # service: cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference
build: build:
context: ../machine-learning context: ../machine-learning
dockerfile: Dockerfile dockerfile: Dockerfile
args: args:
- DEVICE=cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference - DEVICE=cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference
ports: ports:
- 3003:3003 - 3003:3003
volumes: volumes:
@@ -116,13 +103,13 @@ services:
redis: redis:
container_name: immich_redis container_name: immich_redis
image: redis:6.2-alpine@sha256:148bb5411c184abd288d9aaed139c98123eeb8824c5d3fce03cf721db58066d8 image: redis:6.2-alpine@sha256:2d1463258f2764328496376f5d965f20c6a67f66ea2b06dc42af351f75248792
healthcheck: healthcheck:
test: redis-cli ping || exit 1 test: redis-cli ping || exit 1
database: database:
container_name: immich_postgres container_name: immich_postgres
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52 image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
env_file: env_file:
- .env - .env
environment: environment:
@@ -135,25 +122,28 @@ services:
ports: ports:
- 5432:5432 - 5432:5432
healthcheck: healthcheck:
test: >- test: pg_isready --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1;
Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align
--command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')";
echo "checksum failure count is $$Chksum";
[ "$$Chksum" = '0' ] || exit 1
interval: 5m interval: 5m
start_interval: 30s start_interval: 30s
start_period: 5m start_period: 5m
command: >- command:
postgres [
-c shared_preload_libraries=vectors.so 'postgres',
-c 'search_path="$$user", public, vectors' '-c',
-c logging_collector=on 'shared_preload_libraries=vectors.so',
-c max_wal_size=2GB '-c',
-c shared_buffers=512MB 'search_path="$$user", public, vectors',
-c wal_compression=on '-c',
'logging_collector=on',
'-c',
'max_wal_size=2GB',
'-c',
'shared_buffers=512MB',
'-c',
'wal_compression=on',
]
# set IMMICH_TELEMETRY_INCLUDE=all in .env to enable metrics # set IMMICH_METRICS=true in .env to enable metrics
# immich-prometheus: # immich-prometheus:
# container_name: immich_prometheus # container_name: immich_prometheus
# ports: # ports:

View File

@@ -1,12 +1,3 @@
#
# WARNING: To install Immich, follow our guide: https://immich.app/docs/install/docker-compose
#
# Make sure to use the docker-compose.yml of the current release:
#
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
#
# The compose file on main may not be compatible with the latest release.
name: immich-prod name: immich-prod
services: services:
@@ -25,7 +16,7 @@ services:
env_file: env_file:
- .env - .env
ports: ports:
- 2283:2283 - 2283:3001
depends_on: depends_on:
- redis - redis
- database - database
@@ -38,12 +29,12 @@ services:
image: immich-machine-learning:latest image: immich-machine-learning:latest
# extends: # extends:
# file: hwaccel.ml.yml # file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference # service: cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference
build: build:
context: ../machine-learning context: ../machine-learning
dockerfile: Dockerfile dockerfile: Dockerfile
args: args:
- DEVICE=cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference - DEVICE=cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference
ports: ports:
- 3003:3003 - 3003:3003
volumes: volumes:
@@ -56,14 +47,14 @@ services:
redis: redis:
container_name: immich_redis container_name: immich_redis
image: redis:6.2-alpine@sha256:148bb5411c184abd288d9aaed139c98123eeb8824c5d3fce03cf721db58066d8 image: redis:6.2-alpine@sha256:2d1463258f2764328496376f5d965f20c6a67f66ea2b06dc42af351f75248792
healthcheck: healthcheck:
test: redis-cli ping || exit 1 test: redis-cli ping || exit 1
restart: always restart: always
database: database:
container_name: immich_postgres container_name: immich_postgres
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52 image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
env_file: env_file:
- .env - .env
environment: environment:
@@ -76,21 +67,19 @@ services:
ports: ports:
- 5432:5432 - 5432:5432
healthcheck: healthcheck:
test: >- test: pg_isready --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1; Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
interval: 5m interval: 5m
start_interval: 30s start_interval: 30s
start_period: 5m start_period: 5m
command: >- command: ["postgres", "-c", "shared_preload_libraries=vectors.so", "-c", 'search_path="$$user", public, vectors', "-c", "logging_collector=on", "-c", "max_wal_size=2GB", "-c", "shared_buffers=512MB", "-c", "wal_compression=on"]
postgres -c shared_preload_libraries=vectors.so -c 'search_path="$$user", public, vectors' -c logging_collector=on -c max_wal_size=2GB -c shared_buffers=512MB -c wal_compression=on
restart: always restart: always
# set IMMICH_TELEMETRY_INCLUDE=all in .env to enable metrics # set IMMICH_METRICS=true in .env to enable metrics
immich-prometheus: immich-prometheus:
container_name: immich_prometheus container_name: immich_prometheus
ports: ports:
- 9090:9090 - 9090:9090
image: prom/prometheus@sha256:502ad90314c7485892ce696cb14a99fceab9fc27af29f4b427f41bd39701a199 image: prom/prometheus@sha256:f6639335d34a77d9d9db382b92eeb7fc00934be8eae81dbc03b31cfe90411a94
volumes: volumes:
- ./prometheus.yml:/etc/prometheus/prometheus.yml - ./prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus-data:/prometheus - prometheus-data:/prometheus
@@ -99,10 +88,10 @@ services:
# add data source for http://immich-prometheus:9090 to get started # add data source for http://immich-prometheus:9090 to get started
immich-grafana: immich-grafana:
container_name: immich_grafana container_name: immich_grafana
command: [ './run.sh', '-disable-reporting' ] command: ['./run.sh', '-disable-reporting']
ports: ports:
- 3000:3000 - 3000:3000
image: grafana/grafana:11.5.2-ubuntu@sha256:8b5858c447e06fd7a89006b562ba7bba7c4d5813600c7982374c41852adefaeb image: grafana/grafana:11.2.1-ubuntu@sha256:b90c0fdc482913de7a55fe96539bf9e3c4fbcee835d0c2dffc59152bc3964ff7
volumes: volumes:
- grafana-data:/var/lib/grafana - grafana-data:/var/lib/grafana

View File

@@ -1,11 +1,10 @@
# #
# WARNING: To install Immich, follow our guide: https://immich.app/docs/install/docker-compose # WARNING: Make sure to use the docker-compose.yml of the current release:
#
# Make sure to use the docker-compose.yml of the current release:
# #
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml # https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
# #
# The compose file on main may not be compatible with the latest release. # The compose file on main may not be compatible with the latest release.
#
name: immich name: immich
@@ -23,7 +22,7 @@ services:
env_file: env_file:
- .env - .env
ports: ports:
- '2283:2283' - 2283:3001
depends_on: depends_on:
- redis - redis
- database - database
@@ -33,12 +32,12 @@ services:
immich-machine-learning: immich-machine-learning:
container_name: immich_machine_learning container_name: immich_machine_learning
# For hardware acceleration, add one of -[armnn, cuda, rocm, openvino, rknn] to the image tag. # For hardware acceleration, add one of -[armnn, cuda, openvino] to the image tag.
# Example tag: ${IMMICH_VERSION:-release}-cuda # Example tag: ${IMMICH_VERSION:-release}-cuda
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release} image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
# extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration # extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration
# file: hwaccel.ml.yml # file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference - use the `-wsl` version for WSL2 where applicable # service: cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - use the `-wsl` version for WSL2 where applicable
volumes: volumes:
- model-cache:/cache - model-cache:/cache
env_file: env_file:
@@ -49,14 +48,14 @@ services:
redis: redis:
container_name: immich_redis container_name: immich_redis
image: docker.io/redis:6.2-alpine@sha256:148bb5411c184abd288d9aaed139c98123eeb8824c5d3fce03cf721db58066d8 image: docker.io/redis:6.2-alpine@sha256:2d1463258f2764328496376f5d965f20c6a67f66ea2b06dc42af351f75248792
healthcheck: healthcheck:
test: redis-cli ping || exit 1 test: redis-cli ping || exit 1
restart: always restart: always
database: database:
container_name: immich_postgres container_name: immich_postgres
image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52 image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
environment: environment:
POSTGRES_PASSWORD: ${DB_PASSWORD} POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_USER: ${DB_USERNAME} POSTGRES_USER: ${DB_USERNAME}
@@ -66,13 +65,11 @@ services:
# Do not edit the next line. If you want to change the database storage location on your system, edit the value of DB_DATA_LOCATION in the .env file # Do not edit the next line. If you want to change the database storage location on your system, edit the value of DB_DATA_LOCATION in the .env file
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data - ${DB_DATA_LOCATION}:/var/lib/postgresql/data
healthcheck: healthcheck:
test: >- test: pg_isready --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1; Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
interval: 5m interval: 5m
start_interval: 30s start_interval: 30s
start_period: 5m start_period: 5m
command: >- command: ["postgres", "-c", "shared_preload_libraries=vectors.so", "-c", 'search_path="$$user", public, vectors', "-c", "logging_collector=on", "-c", "max_wal_size=2GB", "-c", "shared_buffers=512MB", "-c", "wal_compression=on"]
postgres -c shared_preload_libraries=vectors.so -c 'search_path="$$user", public, vectors' -c logging_collector=on -c max_wal_size=2GB -c shared_buffers=512MB -c wal_compression=on
restart: always restart: always
volumes: volumes:

View File

@@ -13,13 +13,6 @@ services:
volumes: volumes:
- /lib/firmware/mali_csffw.bin:/lib/firmware/mali_csffw.bin:ro # Mali firmware for your chipset (not always required depending on the driver) - /lib/firmware/mali_csffw.bin:/lib/firmware/mali_csffw.bin:ro # Mali firmware for your chipset (not always required depending on the driver)
- /usr/lib/libmali.so:/usr/lib/libmali.so:ro # Mali driver for your chipset (always required) - /usr/lib/libmali.so:/usr/lib/libmali.so:ro # Mali driver for your chipset (always required)
rknn:
security_opt:
- systempaths=unconfined
- apparmor=unconfined
devices:
- /dev/dri:/dev/dri
cpu: {} cpu: {}
@@ -33,13 +26,6 @@ services:
capabilities: capabilities:
- gpu - gpu
rocm:
group_add:
- video
devices:
- /dev/dri:/dev/dri
- /dev/kfd:/dev/kfd
openvino: openvino:
device_cgroup_rules: device_cgroup_rules:
- 'c 189:* rmw' - 'c 189:* rmw'

View File

@@ -48,7 +48,6 @@ services:
vaapi-wsl: # use this for VAAPI if you're running Immich in WSL2 vaapi-wsl: # use this for VAAPI if you're running Immich in WSL2
devices: devices:
- /dev/dri:/dev/dri - /dev/dri:/dev/dri
- /dev/dxg:/dev/dxg
volumes: volumes:
- /usr/lib/wsl:/usr/lib/wsl - /usr/lib/wsl:/usr/lib/wsl
environment: environment:

View File

@@ -1 +1 @@
22.14.0 20.17.0

View File

@@ -49,22 +49,13 @@ The behaviors differ based on your device manufacturer and operating system, but
On iOS (iPhone and iPad), the operating system determines if a particular app can invoke background tasks based on multiple factors, most of which the Immich app has no control over. To increase the likelihood that the background backup task is run, follow the steps below: On iOS (iPhone and iPad), the operating system determines if a particular app can invoke background tasks based on multiple factors, most of which the Immich app has no control over. To increase the likelihood that the background backup task is run, follow the steps below:
- Enable Background App Refresh for Immich in the iOS settings at `Settings > General > Background App Refresh`. - Enable Background App Refresh for Immich in the iOS settings at `Settings > General > Background App Refresh`.
- Disable **Low Power Mode** when not needed, as this can prevent apps from running in the background.
- Disable Background App Refresh for apps that don't need background tasks to run. This will reduce the competition for background task invocation for Immich. - Disable Background App Refresh for apps that don't need background tasks to run. This will reduce the competition for background task invocation for Immich.
- Use the Immich app more often. - Use the Immich app more often.
### Why are features in the mobile app not working with a self-signed certificate, Basic Auth, custom headers, or mutual TLS? ### Why are features not working with a self-signed cert or mTLS?
These network features are experimental. They often do not work with video playback, asset upload or download, and other features. Due to limitations in the upstream app/video library, using a self-signed TLS certificate or mutual TLS may break video playback or asset upload (both foreground and/or background).
Many of these limitations are tracked in [#15230](https://github.com/immich-app/immich/issues/15230). We recommend using a real SSL certificate from a free provider, for example [Let's Encrypt](https://letsencrypt.org/).
Instead of these experimental features, we recommend using the URL switching feature, a VPN, or a [free trusted SSL certificate](https://letsencrypt.org/) for your domain.
We are not actively developing these features and will not be able to provide support, but welcome contributions to improve them.
Please discuss any large PRs with our dev team to ensure your time is not wasted.
### Why isn't the mobile app updated yet?
The app stores can take a few days to approve new builds of the app. If you're impatient, android APKs can be downloaded from the GitHub releases.
--- ---
@@ -78,8 +69,7 @@ However, Immich will delete original files that have been trashed when the trash
### Why do my file names appear as a random string in the file manager? ### Why do my file names appear as a random string in the file manager?
When Storage Template is off (default) Immich saves the file names in a random string (also known as random UUIDs) to prevent duplicate file names. When Storage Template is off (default) Immich saves the file names in a random string (also known as random UUIDs) to prevent duplicate file names. To retrieve the original file names, you must enable the Storage Template and then run the STORAGE TEMPLATE MIGRATION job.
To retrieve the original file names, you must enable the Storage Template and then run the STORAGE TEMPLATE MIGRATION job.
It is recommended to read about [Storage Template](https://immich.app/docs/administration/storage-template) before activation. It is recommended to read about [Storage Template](https://immich.app/docs/administration/storage-template) before activation.
### Can I add my existing photo library? ### Can I add my existing photo library?
@@ -92,20 +82,11 @@ Template changes will only apply to _new_ assets. To retroactively apply the tem
### Why are only photos and not videos being uploaded to Immich? ### Why are only photos and not videos being uploaded to Immich?
This often happens when using a reverse proxy in front of Immich. This often happens when using a reverse proxy (such as Nginx or Cloudflare tunnel) in front of Immich. Make sure to set your reverse proxy to allow large `POST` requests. In `nginx`, set `client_max_body_size 50000M;` or similar. Also, check the disk space of your reverse proxy. In some cases, proxies cache requests to disk before passing them on, and if disk space runs out, the request fails.
Make sure to [set your reverse proxy](/docs/administration/reverse-proxy/) to allow large requests.
Also, check the disk space of your reverse proxy.
In some cases, proxies cache requests to disk before passing them on, and if disk space runs out, the request fails.
If you are using Cloudflare Tunnel, please know that they set a maximum filesize of 100 MB that cannot be changed.
At times, files larger than this may work, potentially up to 1 GB. However, the official limit is 100 MB.
If you are having issues, we recommend switching to a different network deployment.
### Why are some photos stored in the file system with the wrong date? ### Why are some photos stored in the file system with the wrong date?
There are a few different scenarios that can lead to this situation. The solution is to rerun the storage migration job. There are a few different scenarios that can lead to this situation. The solution is to rerun the storage migration job. The job is only automatically run once per asset after upload. If metadata extraction originally failed, the jobs were cleared/canceled, etc., the job may not have run automatically the first time.
The job is only automatically run once per asset after upload. If metadata extraction originally failed, the jobs were cleared/canceled, etc.,
the job may not have run automatically the first time.
### How can I hide photos from the timeline? ### How can I hide photos from the timeline?
@@ -117,7 +98,7 @@ See [Backup and Restore](/docs/administration/backup-and-restore.md).
### Does Immich support reading existing face tag metadata? ### Does Immich support reading existing face tag metadata?
Yes, it creates new faces and persons from the imported asset metadata. For details see the [feature request #4348](https://github.com/immich-app/immich/discussions/4348) and [PR #6455](https://github.com/immich-app/immich/pull/6455). No, it currently does not. There is an [open feature request on GitHub](https://github.com/immich-app/immich/discussions/4348).
### Does Immich support the filtering of NSFW images? ### Does Immich support the filtering of NSFW images?
@@ -135,8 +116,7 @@ Also, there are additional jobs for person (face) thumbnails.
### Why do files from WhatsApp not appear with the correct date? ### Why do files from WhatsApp not appear with the correct date?
Files sent on WhatsApp are saved without metadata on the file. Therefore, Immich has no way of knowing the original date of the file when files are uploaded from WhatsApp, Files sent on WhatsApp are saved without metadata on the file. Therefore, Immich has no way of knowing the original date of the file when files are uploaded from WhatsApp, not the order of arrival on the device. [See #3527](https://github.com/immich-app/immich/issues/3527).
not the order of arrival on the device. [See #9116](https://github.com/immich-app/immich/discussions/9116).
### What happens if an asset exists in more than one account? ### What happens if an asset exists in more than one account?
@@ -164,35 +144,6 @@ For example, say you have existing transcodes with the policy "Videos higher tha
No. Our design principle is that the original assets should always be untouched. No. Our design principle is that the original assets should always be untouched.
### How can I mount a CIFS/Samba volume within Docker?
If you aren't able to or prefer not to mount Samba on the host (such as Windows environment), you can mount the volume within Docker.
Below is an example in the `docker-compose.yml`.
Change your username, password, local IP, and share name, and see below where the line `- originals:/usr/src/app/originals`,
correlates to the section where the volume `originals` was created. You can call this whatever you like, and map it to the docker container as you like.
For example you could change `originals:` to `Photos:`, and change `- originals:/usr/src/app/originals` to `Photos:/usr/src/app/photos`.
```diff
...
services:
immich-server:
...
volumes:
# Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file
- ${UPLOAD_LOCATION}:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
+ - originals:/usr/src/app/originals
...
volumes:
model-cache:
+ originals:
+ driver_opts:
+ type: cifs
+ o: 'iocharset=utf8,username=USERNAMEHERE,password=PASSWORDHERE,rw' # change to `ro` if read only desired
+ device: '//localipaddress/sharename'
```
--- ---
## Albums ## Albums
@@ -240,7 +191,7 @@ Immich uses CLIP models. An ML model converts each image to an "embedding", whic
### How does facial recognition work? ### How does facial recognition work?
See [How Facial Recognition Works](/docs/features/facial-recognition#how-facial-recognition-works) for details. See [How Facial Recognition Works](/docs/features/facial-recognition#How-Facial-Recognition-Works) for details.
### How can I disable machine learning? ### How can I disable machine learning?
@@ -262,7 +213,7 @@ No, this is not supported. Only models listed in the [Hugging Face][huggingface]
### I want to be able to search in other languages besides English. How can I do that? ### I want to be able to search in other languages besides English. How can I do that?
You can change to a multilingual CLIP model. See [here](/docs/features/searching#clip-models) for instructions. You can change to a multilingual CLIP model. See [here](/docs/features/smart-search#CLIP-model) for instructions.
### Does Immich support Facial Recognition for videos? ### Does Immich support Facial Recognition for videos?
@@ -273,7 +224,7 @@ Scanning the entire video for faces may be implemented in the future.
No. No.
:::tip :::tip
You can use [Smart Search](/docs/features/searching.md) for this to some extent. For example, if you have a Golden Retriever and a Chihuahua, type these words in the smart search and watch the results. You can use [Smart Search](/docs/features/smart-search.md) for this to some extent. For example, if you have a Golden Retriever and a Chihuahua, type these words in the smart search and watch the results.
::: :::
### I'm getting a lot of "faces" that aren't faces, what can I do? ### I'm getting a lot of "faces" that aren't faces, what can I do?
@@ -315,7 +266,7 @@ The initial backup is the most intensive due to the number of jobs running. The
- For facial recognition on new images to work properly, You must re-run the Face Detection job for all images after this. - For facial recognition on new images to work properly, You must re-run the Face Detection job for all images after this.
- At the container level, you can [set resource constraints](/docs/FAQ#can-i-limit-cpu-and-ram-usage) to lower usage further. - At the container level, you can [set resource constraints](/docs/FAQ#can-i-limit-cpu-and-ram-usage) to lower usage further.
- It's recommended to only apply these constraints _after_ taking some of the measures here for best performance. - It's recommended to only apply these constraints _after_ taking some of the measures here for best performance.
- If these changes are not enough, see [above](/docs/FAQ#how-can-i-disable-machine-learning) for instructions on how to disable machine learning. - If these changes are not enough, see [below](/docs/FAQ#how-can-i-disable-machine-learning) for instructions on how to disable machine learning.
### Can I limit CPU and RAM usage? ### Can I limit CPU and RAM usage?
@@ -357,7 +308,7 @@ Do not exaggerate with the job concurrency because you're probably thoroughly ov
### My server shows Server Status Offline | Version Unknown. What can I do? ### My server shows Server Status Offline | Version Unknown. What can I do?
You need to [enable WebSockets](/docs/administration/reverse-proxy/) on your reverse proxy. You need to enable WebSockets on your reverse proxy.
--- ---
@@ -388,7 +339,7 @@ The non-root user/group needs read/write access to the volume mounts, including
The Docker Compose top level volume element does not support non-root access, all of the above volumes must be local volume mounts. The Docker Compose top level volume element does not support non-root access, all of the above volumes must be local volume mounts.
::: :::
For a further hardened system, you can add the following block to every container. For a further hardened system, you can add the following block to every container except for `immich_postgres`.
<details> <details>
<summary>docker-compose.yml</summary> <summary>docker-compose.yml</summary>
@@ -437,28 +388,29 @@ If the error says the worker is exiting, then this is normal. This is a feature
There are a few reasons why this can happen. There are a few reasons why this can happen.
If the error mentions SIGKILL or error code 137, it most likely means the service is running out of memory. If the error mentions SIGKILL or error code 137, it most likely means the service is running out of memory. Consider either increasing the server's RAM or moving the service to a server with more RAM.
Consider either increasing the server's RAM or moving the service to a server with more RAM.
If it mentions SIGILL (note the lack of a K) or error code 132, it most likely means your server's CPU is incompatible with Immich. If it mentions SIGILL (note the lack of a K) or error code 132, it most likely means your server's CPU is incompatible. This is unlikely to occur on version 1.92.0 or later. Consider upgrading if your version of Immich is below that.
If your version of Immich is below 1.92.0 and the crash occurs after logs about tracing or exporting a model, consider either upgrading or disabling the Tag Objects job.
## Database ## Database
### Why am I getting database ownership errors? ### Why am I getting database ownership errors?
If you get database errors such as `FATAL: data directory "/var/lib/postgresql/data" has wrong ownership` upon database startup, this is likely due to an issue with your filesystem. If you get database errors such as `FATAL: data directory "/var/lib/postgresql/data" has wrong ownership` upon database startup, this is likely due to an issue with your filesystem.
NTFS and ex/FAT/32 filesystems are not supported. See [here](/docs/install/requirements#special-requirements-for-windows-users) for more details. NTFS and ex/FAT/32 filesystems are not supported. See [here](/docs/install/environment-variables#supported-filesystems) for more details.
### How can I verify the integrity of my database? ### How can I verify the integrity of my database?
Database checksums are enabled by default for new installations since v1.104.0. You can check if they are enabled by running the following command. If you installed Immich using v1.104.0 or later, you likely have database checksums enabled by default. You can check this by running the following command.
A result of `on` means that checksums are enabled. A result of `on` means that checksums are enabled.
<details> <details>
<summary>Check if checksums are enabled</summary> <summary>Check if checksums are enabled</summary>
```bash ```bash
docker exec -it immich_postgres psql --dbname=postgres --username=<DB_USERNAME> --command="show data_checksums" docker exec -it immich_postgres psql --dbname=immich --username=<DB_USERNAME> --command="show data_checksums"
data_checksums data_checksums
---------------- ----------------
on on
@@ -467,13 +419,13 @@ docker exec -it immich_postgres psql --dbname=postgres --username=<DB_USERNAME>
</details> </details>
If checksums are enabled, you can check the status of the database with the following command. A normal result is all `0`s. If checksums are enabled, you can check the status of the database with the following command. A normal result is all zeroes.
<details> <details>
<summary>Check for database corruption</summary> <summary>Check for database corruption</summary>
```bash ```bash
docker exec -it immich_postgres psql --dbname=postgres --username=<DB_USERNAME> --command="SELECT datname, checksum_failures, checksum_last_failure FROM pg_stat_database WHERE datname IS NOT NULL" docker exec -it immich_postgres psql --dbname=immich --username=<DB_USERNAME> --command="SELECT datname, checksum_failures, checksum_last_failure FROM pg_stat_database WHERE datname IS NOT NULL"
datname | checksum_failures | checksum_last_failure datname | checksum_failures | checksum_last_failure
-----------+-------------------+----------------------- -----------+-------------------+-----------------------
postgres | 0 | postgres | 0 |
@@ -485,24 +437,6 @@ docker exec -it immich_postgres psql --dbname=postgres --username=<DB_USERNAME>
</details> </details>
You can also scan the Postgres database file structure for errors:
<details>
<summary>Scan for file structure errors</summary>
```bash
docker exec -it immich_postgres pg_amcheck --username=postgres --heapallindexed --parent-check --rootdescend --progress --all --install-missing
```
A normal result will end something like this and return with an exit code of `0`:
```bash
7470/8832 relations (84%), 730829/734735 pages (99%)
8425/8832 relations (95%), 734367/734735 pages (99%)
8832/8832 relations (100%), 734735/734735 pages (100%)
```
</details>
If corruption is detected, you should immediately make a backup before performing any other work in the database. If corruption is detected, you should immediately make a backup before performing any other work in the database.
To do so, you may need to set the `zero_damaged_pages=on` flag for the database server to allow `pg_dumpall` to succeed. To do so, you may need to set the `zero_damaged_pages=on` flag for the database server to allow `pg_dumpall` to succeed.
After taking a backup, the recommended next step is to restore the database from a healthy backup before corruption was detected. After taking a backup, the recommended next step is to restore the database from a healthy backup before corruption was detected.

View File

@@ -5,10 +5,6 @@ import TabItem from '@theme/TabItem';
A [3-2-1 backup strategy](https://www.backblaze.com/blog/the-3-2-1-backup-strategy/) is recommended to protect your data. You should keep copies of your uploaded photos/videos as well as the Immich database for a comprehensive backup solution. This page provides an overview on how to backup the database and the location of user-uploaded pictures and videos. A template bash script that can be run as a cron job is provided [here](/docs/guides/template-backup-script.md) A [3-2-1 backup strategy](https://www.backblaze.com/blog/the-3-2-1-backup-strategy/) is recommended to protect your data. You should keep copies of your uploaded photos/videos as well as the Immich database for a comprehensive backup solution. This page provides an overview on how to backup the database and the location of user-uploaded pictures and videos. A template bash script that can be run as a cron job is provided [here](/docs/guides/template-backup-script.md)
:::danger
The instructions on this page show you how to prepare your Immich instance to be backed up, and which files to take a backup of. You still need to take care of using an actual backup tool to make a backup yourself.
:::
## Database ## Database
:::caution :::caution
@@ -19,29 +15,12 @@ Immich saves [file paths in the database](https://github.com/immich-app/immich/d
Refer to the official [postgres documentation](https://www.postgresql.org/docs/current/backup.html) for details about backing up and restoring a postgres database. Refer to the official [postgres documentation](https://www.postgresql.org/docs/current/backup.html) for details about backing up and restoring a postgres database.
::: :::
The recommended way to backup and restore the Immich database is to use the `pg_dumpall` command. When restoring, you need to delete the `DB_DATA_LOCATION` folder (if it exists) to reset the database.
:::caution :::caution
It is not recommended to directly backup the `DB_DATA_LOCATION` folder. Doing so while the database is running can lead to a corrupted backup that cannot be restored. It is not recommended to directly backup the `DB_DATA_LOCATION` folder. Doing so while the database is running can lead to a corrupted backup that cannot be restored.
::: :::
### Automatic Database Backups
For convenience, Immich will automatically create database backups by default. The backups are stored in `UPLOAD_LOCATION/backups`.
As mentioned above, you should make your own backup of these together with the asset folders as noted below.
You can adjust the schedule and amount of kept backups in the [admin settings](http://my.immich.app/admin/system-settings?isOpen=backup).
By default, Immich will keep the last 14 backups and create a new backup every day at 2:00 AM.
#### Trigger Backup
You are able to trigger a backup in the [admin job status page](http://my.immich.app/admin/jobs-status).
Visit the page, open the "Create job" modal from the top right, select "Backup Database" and click "Confirm".
A job will run and trigger a backup, you can verify this worked correctly by checking the logs or the backup folder.
This backup will count towards the last X backups that will be kept based on your settings.
#### Restoring
We hope to make restoring simpler in future versions, for now you can find the backups in the `UPLOAD_LOCATION/backups` folder on your host.
Then please follow the steps in the following section for restoring the database.
### Manual Backup and Restore ### Manual Backup and Restore
<Tabs> <Tabs>
@@ -55,59 +34,94 @@ docker exec -t immich_postgres pg_dumpall --clean --if-exists --username=postgre
docker compose down -v # CAUTION! Deletes all Immich data to start from scratch docker compose down -v # CAUTION! Deletes all Immich data to start from scratch
## Uncomment the next line and replace DB_DATA_LOCATION with your Postgres path to permanently reset the Postgres database ## Uncomment the next line and replace DB_DATA_LOCATION with your Postgres path to permanently reset the Postgres database
# rm -rf DB_DATA_LOCATION # CAUTION! Deletes all Immich data to start from scratch # rm -rf DB_DATA_LOCATION # CAUTION! Deletes all Immich data to start from scratch
docker compose pull # Update to latest version of Immich (if desired) docker compose pull # Update to latest version of Immich (if desired)
docker compose create # Create Docker containers for Immich apps without running them docker compose create # Create Docker containers for Immich apps without running them
docker start immich_postgres # Start Postgres server docker start immich_postgres # Start Postgres server
sleep 10 # Wait for Postgres server to start up sleep 10 # Wait for Postgres server to start up
# Check the database user if you deviated from the default gunzip < "/path/to/backup/dump.sql.gz" \
gunzip --stdout "/path/to/backup/dump.sql.gz" \
| sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" \ | sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" \
| docker exec -i immich_postgres psql --dbname=postgres --username=<DB_USERNAME> # Restore Backup | docker exec -i immich_postgres psql --username=postgres # Restore Backup
docker compose up -d # Start remainder of Immich apps docker compose up -d # Start remainder of Immich apps
``` ```
</TabItem> </TabItem>
<TabItem value="Windows system (PowerShell)" label="Windows system (PowerShell)"> <TabItem value="Windows system (PowerShell)" label="Windows system (PowerShell)">
```powershell title='Backup' ```powershell title='Backup'
[System.IO.File]::WriteAllLines("C:\absolute\path\to\backup\dump.sql", (docker exec -t immich_postgres pg_dumpall --clean --if-exists --username=postgres)) docker exec -t immich_postgres pg_dumpall --clean --if-exists --username=postgres | Set-Content -Encoding utf8 "C:\path\to\backup\dump.sql"
``` ```
```powershell title='Restore' ```powershell title='Restore'
docker compose down -v # CAUTION! Deletes all Immich data to start from scratch docker compose down -v # CAUTION! Deletes all Immich data to start from scratch
## Uncomment the next line and replace DB_DATA_LOCATION with your Postgres path to permanently reset the Postgres database ## Uncomment the next line and replace DB_DATA_LOCATION with your Postgres path to permanently reset the Postgres database
# Remove-Item -Recurse -Force DB_DATA_LOCATION # CAUTION! Deletes all Immich data to start from scratch # Remove-Item -Recurse -Force DB_DATA_LOCATION # CAUTION! Deletes all Immich data to start from scratch
## You should mount the backup (as a volume, example: `- 'C:\path\to\backup\dump.sql:/dump.sql'`) into the immich_postgres container using the docker-compose.yml docker compose pull # Update to latest version of Immich (if desired)
docker compose pull # Update to latest version of Immich (if desired) docker compose create # Create Docker containers for Immich apps without running them
docker compose create # Create Docker containers for Immich apps without running them docker start immich_postgres # Start Postgres server
docker start immich_postgres # Start Postgres server sleep 10 # Wait for Postgres server to start up
sleep 10 # Wait for Postgres server to start up gc "C:\path\to\backup\dump.sql" | docker exec -i immich_postgres psql --username=postgres # Restore Backup
docker exec -it immich_postgres bash # Enter the Docker shell and run the following command docker compose up -d # Start remainder of Immich apps
# Check the database user if you deviated from the default. If your backup ends in `.gz`, replace `cat` with `gunzip --stdout`
cat "/dump.sql" | sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" | psql --dbname=postgres --username=<DB_USERNAME>
exit # Exit the Docker shell
docker compose up -d # Start remainder of Immich apps
``` ```
</TabItem> </TabItem>
</Tabs> </Tabs>
Note that for the database restore to proceed properly, it requires a completely fresh install (i.e. the Immich server has never run since creating the Docker containers). If the Immich app has run, Postgres conflicts may be encountered upon database restoration (relation already exists, violated foreign key constraints, multiple primary keys, etc.), in which case you need to delete the `DB_DATA_LOCATION` folder to reset the database. Note that for the database restore to proceed properly, it requires a completely fresh install (i.e. the Immich server has never run since creating the Docker containers). If the Immich app has run, Postgres conflicts may be encountered upon database restoration (relation already exists, violated foreign key constraints, multiple primary keys, etc.).
:::tip :::tip
Some deployment methods make it difficult to start the database without also starting the server. In these cases, you may set the environment variable `DB_SKIP_MIGRATIONS=true` before starting the services. This will prevent the server from running migrations that interfere with the restore process. Be sure to remove this variable and restart the services after the database is restored. Some deployment methods make it difficult to start the database without also starting the server or microservices. In these cases, you may set the environmental variable `DB_SKIP_MIGRATIONS=true` before starting the services. This will prevent the server from running migrations that interfere with the restore process. Note that both the server and microservices must have this variable set to prevent the migrations from running. Be sure to remove this variable and restart the services after the database is restored.
:::
### Automatic Database Backups
The database dumps can also be automated (using [this image](https://github.com/prodrigestivill/docker-postgres-backup-local)) by editing the docker compose file to match the following:
```yaml
services:
...
backup:
container_name: immich_db_dumper
image: prodrigestivill/postgres-backup-local:14
restart: always
env_file:
- .env
environment:
POSTGRES_HOST: database
POSTGRES_CLUSTER: 'TRUE'
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_DB: ${DB_DATABASE_NAME}
SCHEDULE: "@daily"
POSTGRES_EXTRA_OPTS: '--clean --if-exists'
BACKUP_DIR: /db_dumps
volumes:
- ./db_dumps:/db_dumps
depends_on:
- database
```
Then you can restore with the same command but pointed at the latest dump.
```bash title='Automated Restore'
gunzip < db_dumps/last/immich-latest.sql.gz \
| sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" \
| docker exec -i immich_postgres psql --username=postgres
```
:::note
If you see the error `ERROR: type "earth" does not exist`, or you have problems with Reverse Geocoding after a restore, add the following `sed` fragment to your restore command.
Example: `gunzip < "/path/to/backup/dump.sql.gz" | sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" | docker exec -i immich_postgres psql --username=postgres`
::: :::
## Filesystem ## Filesystem
Immich stores two types of content in the filesystem: (a) original, unmodified assets (photos and videos), and (b) generated content. We recommend backing up the entire contents of `UPLOAD_LOCATION`, but only the original content is critical, which is stored in the following folders: Immich stores two types of content in the filesystem: (1) original, unmodified assets (photos and videos), and (2) generated content. Only the original content needs to be backed-up, which is stored in the following folders:
1. `UPLOAD_LOCATION/library` 1. `UPLOAD_LOCATION/library`
2. `UPLOAD_LOCATION/upload` 2. `UPLOAD_LOCATION/upload`
3. `UPLOAD_LOCATION/profile` 3. `UPLOAD_LOCATION/profile`
If you choose to back up only those folders, you will need to rerun the transcoding and thumbnail generation jobs for all assets after you restore from a backup.
:::caution :::caution
If you moved some of these folders onto a different storage device, such as `profile/`, make sure to adjust the backup path to match your setup If you moved some of these folders onto a different storage device, such as `profile/`, make sure to adjust the backup path to match your setup
::: :::
@@ -183,7 +197,7 @@ When you turn off the storage template engine, it will leave the assets in `UPLO
- Stored in `UPLOAD_LOCATION/profile/<userID>`. - Stored in `UPLOAD_LOCATION/profile/<userID>`.
- **Thumbs Images:** - **Thumbs Images:**
- Preview images (blurred, small, large) for each asset and thumbnails for recognized faces. - Preview images (blurred, small, large) for each asset and thumbnails for recognized faces.
- Stored in `UPLOAD_LOCATION/thumbs/<userID>`. - Stored in `UPLOCAD_LOCATION/thumbs/<userID>`.
- **Encoded Assets:** - **Encoded Assets:**
- Videos that have been re-encoded from the original for wider compatibility. The original is not removed. - Videos that have been re-encoded from the original for wider compatibility. The original is not removed.
- Stored in `UPLOAD_LOCATION/encoded-video/<userID>`. - Stored in `UPLOAD_LOCATION/encoded-video/<userID>`.

View File

@@ -18,10 +18,4 @@ You can use [this guide](/docs/guides/smtp-gmail) to use Gmail's SMTP server.
Users can manage their email notification settings from their account settings page on the web. They can choose to turn email notifications on or off for the following events: Users can manage their email notification settings from their account settings page on the web. They can choose to turn email notifications on or off for the following events:
<img src={require('./img/user-notifications-settings.webp').default} width="80%" title="User notification settings" /> <img src={require('./img/user-notifications-settings.png').default} width="80%" title="User notification settings" />
## Notification templates
You can override the default notification text with custom templates in HTML format. You can use tags to show dynamic tags in your templates.
<img src={require('./img/user-notifications-templates.webp').default} width="80%" title="User notification templates" />

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 167 KiB

After

Width:  |  Height:  |  Size: 79 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 68 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 46 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 47 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 109 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 55 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

View File

@@ -22,7 +22,7 @@ Copy the entire `immich-server` block as a new service and make the following ch
- container_name: immich_server - container_name: immich_server
... ...
- ports: - ports:
- - 2283:2283 - - 2283:3001
+ immich-microservices: + immich-microservices:
+ container_name: immich_microservices + container_name: immich_microservices
``` ```
@@ -48,4 +48,8 @@ When a new asset is uploaded it kicks off a series of jobs, which include metada
Additionally, some jobs run on a schedule, which is every night at midnight. This schedule, with the exception of [External Libraries](/docs/features/libraries) scanning, cannot be changed. Additionally, some jobs run on a schedule, which is every night at midnight. This schedule, with the exception of [External Libraries](/docs/features/libraries) scanning, cannot be changed.
:::info
Storage Migration job can be run after changing the [Storage Template](/docs/administration/storage-template.mdx), in order to apply the change to the existing library.
:::
<img src={require('./img/admin-jobs.webp').default} width="60%" title="Admin jobs" /> <img src={require('./img/admin-jobs.webp').default} width="60%" title="Admin jobs" />

View File

@@ -11,7 +11,7 @@ Unable to set `app.immich:///oauth-callback` as a valid redirect URI? See [Mobil
Immich supports 3rd party authentication via [OpenID Connect][oidc] (OIDC), an identity layer built on top of OAuth2. OIDC is supported by most identity providers, including: Immich supports 3rd party authentication via [OpenID Connect][oidc] (OIDC), an identity layer built on top of OAuth2. OIDC is supported by most identity providers, including:
- [Authentik](https://goauthentik.io/integrations/sources/oauth/#openid-connect) - [Authentik](https://goauthentik.io/integrations/sources/oauth/#openid-connect)
- [Authelia](https://www.authelia.com/integration/openid-connect/immich/) - [Authelia](https://www.authelia.com/configuration/identity-providers/openid-connect/clients/)
- [Okta](https://www.okta.com/openid-connect/) - [Okta](https://www.okta.com/openid-connect/)
- [Google](https://developers.google.com/identity/openid-connect/openid-connect) - [Google](https://developers.google.com/identity/openid-connect/openid-connect)

Some files were not shown because too many files have changed in this diff Show More