Compare commits

..

9 Commits

Author SHA1 Message Date
Daniel Dietzler
4b422bd0f7 disable metrics in dev env 2023-12-31 19:50:10 +01:00
Daniel Dietzler
f33a662f48 open api 2023-12-28 19:49:39 +01:00
Daniel Dietzler
0232655da2 revert individual settings for metrics, now only enable/disable 2023-12-28 19:49:32 +01:00
Daniel Dietzler
ac4c57247e add icon indicating when metrics are being shared 2023-12-28 19:29:21 +01:00
Daniel Dietzler
fb01bd956f open api 2023-12-28 19:06:45 +01:00
Daniel Dietzler
902d4d0275 settings for metrics 2023-12-28 19:06:35 +01:00
Daniel Dietzler
db997f9173 queue metrics job every 24 hours 2023-12-23 22:09:51 +01:00
Daniel Dietzler
e9197cde67 collect more metrics, move everything related to metrics repo 2023-12-23 21:50:20 +01:00
Daniel Dietzler
874f707c92 initial sample implementation of metrics 2023-12-23 21:50:18 +01:00
733 changed files with 53780 additions and 32512 deletions

View File

@@ -8,7 +8,7 @@ machine-learning/
misc/
mobile/
server/node_modules/
server/node_modules
server/coverage/
server/.reverse-geocoding-dump/
server/upload/
@@ -19,10 +19,7 @@ web/coverage/
web/.svelte-kit
web/build/
cli/node_modules/
cli/node_modules
cli/.reverse-geocoding-dump/
cli/upload/
cli/dist/
open-api/typescript-sdk/node_modules/
open-api/typescript-sdk/build/
cli/dist/

13
.gitattributes vendored
View File

@@ -8,9 +8,14 @@ mobile/openapi/.openapi-generator/FILES linguist-generated=true
mobile/lib/**/*.g.dart -diff -merge
mobile/lib/**/*.g.dart linguist-generated=true
open-api/typescript-sdk/client/**/*.md -diff -merge
open-api/typescript-sdk/client/**/*.md linguist-generated=true
open-api/typescript-sdk/client/**/*.ts -diff -merge
open-api/typescript-sdk/client/**/*.ts linguist-generated=true
cli/src/api/open-api/**/*.md -diff -merge
cli/src/api/open-api/**/*.md linguist-generated=true
cli/src/api/open-api/**/*.ts -diff -merge
cli/src/api/open-api/**/*.ts linguist-generated=true
web/src/api/open-api/**/*.md -diff -merge
web/src/api/open-api/**/*.md linguist-generated=true
web/src/api/open-api/**/*.ts -diff -merge
web/src/api/open-api/**/*.ts linguist-generated=true
*.sh text eol=lf

View File

@@ -1,5 +1,7 @@
name: Report an issue with Immich
description: Report an issue with Immich
labels: ["bug", "need triage"]
title: "[BUG] <title>"
body:
- type: markdown
attributes:

29
.github/release.yml vendored
View File

@@ -1,42 +1,29 @@
changelog:
categories:
- title: ⚠️ Breaking Changes
- title: Breaking Changes 🛠
labels:
- breaking-change
- title: 🗄️ Server
- title: Server
labels:
- 🗄server
- title: 📱 Mobile
- title: Mobile
labels:
- 📱mobile
- title: 🖥️ Web
- title: Web
labels:
- 🖥web
- title: 🧠 Machine Learning
- title: Machine Learning
labels:
- 🧠machine-learning
- title: ⚡ CLI
- title: CLI
labels:
- cli
- title: 📓 Documentation
- title: Documentation
labels:
- documentation
- title: 🔨 Build
- title: Dependency updates
labels:
- deployment
- title: 🤖 Dependencies
labels:
- dependencies
- renovate
- title: Other changes
labels:
- "*"

View File

@@ -10,26 +10,9 @@ concurrency:
cancel-in-progress: true
jobs:
server-e2e-api:
name: Server (e2e-api)
runs-on: mich
defaults:
run:
working-directory: ./server
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Run npm install
run: npm ci
- name: Run e2e tests
run: npm run e2e:api
server-e2e-jobs:
name: Server (e2e-jobs)
runs-on: mich
e2e-tests:
name: Server (e2e)
runs-on: ubuntu-latest
steps:
- name: Checkout code
@@ -38,7 +21,7 @@ jobs:
submodules: "recursive"
- name: Run e2e tests
run: make server-e2e-jobs
run: make test-server-e2e
doc-tests:
name: Docs
@@ -58,6 +41,10 @@ jobs:
run: npm run format
if: ${{ !cancelled() }}
- name: Run tsc
run: npm run check
if: ${{ !cancelled() }}
- name: Run build
run: npm run build
if: ${{ !cancelled() }}
@@ -103,14 +90,10 @@ jobs:
- name: Checkout code
uses: actions/checkout@v4
- name: Run setup typescript-sdk
run: npm ci && npm run build
working-directory: ./open-api/typescript-sdk
- name: Run npm install (cli)
- name: Run npm install in cli
run: npm ci
- name: Run npm install (server)
- name: Run npm install in server
run: npm ci
working-directory: ./server
@@ -143,14 +126,10 @@ jobs:
with:
submodules: "recursive"
- name: Run setup typescript-sdk
run: npm ci && npm run build
working-directory: ./open-api/typescript-sdk
- name: Run npm install (cli)
- name: Run npm install in cli
run: npm ci
- name: Run npm install (server)
- name: Run npm install in server
run: npm ci
working-directory: ./server
@@ -168,10 +147,6 @@ jobs:
- name: Checkout code
uses: actions/checkout@v4
- name: Run setup typescript-sdk
run: npm ci && npm run build
working-directory: ./open-api/typescript-sdk
- name: Run npm install
run: npm ci
@@ -191,9 +166,9 @@ jobs:
run: npm run check:typescript
if: ${{ !cancelled() }}
- name: Run unit tests & coverage
run: npm run test:cov
if: ${{ !cancelled() }}
# - name: Run unit tests & coverage
# run: npm run test:cov
# if: ${{ !cancelled() }}
mobile-unit-tests:
name: Mobile
@@ -228,7 +203,7 @@ jobs:
poetry install --with dev
- name: Lint with ruff
run: |
poetry run ruff check --output-format=github app export
poetry run ruff check --format=github app export
- name: Check black formatting
run: |
poetry run black --check app export
@@ -245,14 +220,14 @@ jobs:
steps:
- uses: actions/checkout@v4
- name: Run API generation
run: make open-api
run: npm --prefix server run api:generate
- name: Find file changes
uses: tj-actions/verify-changed-files@v13.1
id: verify-changed-files
with:
files: |
mobile/openapi
open-api/typescript-sdk
web/src/api/open-api
- name: Verify files have not changed
if: steps.verify-changed-files.outputs.files_changed == 'true'
run: |
@@ -294,9 +269,6 @@ jobs:
- name: Run existing migrations
run: npm run typeorm:migrations:run
- name: Test npm run schema:reset command works
run: npm run typeorm:schema:reset
- name: Generate new migrations
continue-on-error: true
run: npm run typeorm:migrations:generate ./src/infra/migrations/TestMigration

4
.gitignore vendored
View File

@@ -1,5 +1,3 @@
**/node_modules/**
.DS_Store
.vscode/*
!.vscode/launch.json
@@ -14,5 +12,3 @@ mobile/gradle.properties
mobile/openapi/pubspec.lock
mobile/*.jks
mobile/libisar.dylib
open-api/typescript-sdk/build

View File

@@ -16,8 +16,8 @@ stage:
pull-stage:
docker compose -f ./docker/docker-compose.staging.yml pull
server-e2e-jobs:
docker compose -f ./server/e2e/docker-compose.server-e2e.yml up --renew-anon-volumes --abort-on-container-exit --exit-code-from immich-server --remove-orphans --build
test-server-e2e:
docker compose -f ./server/test/docker-compose.server-e2e.yml up --renew-anon-volumes --abort-on-container-exit --exit-code-from immich-server --remove-orphans --build
prod:
docker compose -f ./docker/docker-compose.prod.yml up --build -V --remove-orphans
@@ -25,15 +25,8 @@ prod:
prod-scale:
docker compose -f ./docker/docker-compose.prod.yml up --build -V --scale immich-server=3 --scale immich-microservices=3 --remove-orphans
.PHONY: open-api
open-api:
cd ./open-api && bash ./bin/generate-open-api.sh
open-api-dart:
cd ./open-api && bash ./bin/generate-open-api.sh dart
open-api-typescript:
cd ./open-api && bash ./bin/generate-open-api.sh typescript
api:
npm --prefix server run api:generate
sql:
npm --prefix server run sql:generate

View File

@@ -28,7 +28,6 @@
<a href="README_nl_NL.md">Nederlands</a>
<a href="README_tr_TR.md">Türkçe</a>
<a href="README_zh_CN.md">中文</a>
<a href="README_ru_RU.md">Русский</a>
</p>
## Disclaimer
@@ -112,7 +111,7 @@ If you feel like this is the right cause and the app is something you are seeing
- [Monthly donation](https://github.com/sponsors/alextran1502) via GitHub Sponsors
- [One-time donation](https://github.com/sponsors/alextran1502?frequency=one-time&sponsor=alextran1502) via GitHub Sponsors
- [Liberapay](https://liberapay.com/alex.tran1502/)
- [Librepay](https://liberapay.com/alex.tran1502/)
- [buymeacoffee](https://www.buymeacoffee.com/altran1502)
- Bitcoin: 1FvEp6P6NM8EZEkpGUFAN2LqJ1gxusNxZX
- ZCash: u1smm4wvqegcp46zss2jf5xptchgeczp4rx7a0wu3mermf2wxahm26yyz5w9mw3f2p4emwlljxjumg774kgs8rntt9yags0whnzane4n67z4c7gppq4yyvcj404ne3r769prwzd9j8ntvqp44fa6d67sf7rmcfjmds3gmeceff4u8e92rh38nd30cr96xw6vfhk6scu4ws90ldzupr3sz

View File

@@ -1,124 +0,0 @@
<p align="center">
<br/>
<a href="https://opensource.org/licenses/MIT"><img src="https://img.shields.io/badge/license-MIT-green.svg?color=3F51B5&style=for-the-badge&label=License&logoColor=000000&labelColor=ececec" alt="License: MIT"></a>
<a href="https://discord.gg/D8JsnBEuKb">
<img src="https://img.shields.io/discord/979116623879368755.svg?label=Discord&logo=Discord&style=for-the-badge&logoColor=000000&labelColor=ececec" alt="Discord"/>
</a>
<br/>
<br/>
</p>
<p align="center">
<img src="design/immich-logo.svg" width="150" title="Login With Custom URL">
</p>
<h3 align="center">Immich - Высокопроизводительное решение для автономоного создания фото и видео архивов</h3>
<br/>
<a href="https://immich.app">
<img src="design/immich-screenshots.png" title="Main Screenshot">
</a>
<br/>
<p align="center">
<a href="README_ca_ES.md">Català</a>
<a href="README_es_ES.md">Español</a>
<a href="README_fr_FR.md">Français</a>
<a href="README_it_IT.md">Italiano</a>
<a href="README_ja_JP.md">日本語</a>
<a href="README_ko_KR.md">한국어</a>
<a href="README_de_DE.md">Deutsch</a>
<a href="README_nl_NL.md">Nederlands</a>
<a href="README_tr_TR.md">Türkçe</a>
<a href="README_zh_CN.md">中文</a>
<a href="README_ru_RU.md">Русский</a>
</p>
## Предупреждение
- ⚠️ Этот проект находится **в очень активной** разработке.
- ⚠️ Ожидайте ошибок и критических изменение.
- ⚠️ **Не используйте это приложение для бекапа ваших фото и видео.**
- ⚠️ Всегда следуйте [3-2-1](https://www.backblaze.com/blog/the-3-2-1-backup-strategy/) плану резервного копирования ваших драгоценных фото и видео!
## Содержание
- [Официальная документация](https://immich.app/docs)
- [План разработки](https://github.com/orgs/immich-app/projects/1)
- [Демо](#demo)
- [Возможности](#features)
- [Введение](https://immich.app/docs/overview/introduction)
- [Инсталяция](https://immich.app/docs/install/requirements)
- [Гайд по доработке проекта](https://immich.app/docs/overview/support-the-project)
- [Поддержки проект](#support-the-project)
## Документация
Вы можете найти основную документация, включая инструкции по установке по ссылке https://immich.app/.
## Демо
Вы можете посмотреть веб демо по ссылке https://demo.immich.app
Для мобильного приложения вы можете использовать адрес `https://demo.immich.app/api` в поле `Server Endpoint URL`
```bash title="Демо доступ"
Реквизиты доступа
логин/почта: demo@immich.app
пароль: demo
```
```
Spec: Free-tier Oracle VM - Amsterdam - 2.4Ghz quad-core ARM64 CPU, 24GB RAM
```
## Возможности
| Возможности | Приложение | Веб |
| --------------------------------------------------- | ---------- | --- |
| Выгрузка на сервер и просмотр видео и фото | Да | Да |
| Авто бекап когда приложение открыто | Да | Н/Д |
| Выбор альбома(ов) для бекапа | Да | Н/Д |
| загрузка с сервера фото и видео на устройство | Да | Да |
| Поддержка нескольких пользователей | Да | Да |
| Альбомы и общие альбомы | Да | Да |
| Прокручиваемая/перетаскиваемая полоса прокрутки | Да | Да |
| Поддержка формата RAW | Да | Да |
| Просмотр метаданных (EXIF, map) | Да | Да |
| Поиск до метаданным, объектам, лицам и CLIP | Да | Да |
| Административные функци (управление пользователями) | Нет | Да |
| Фоновый бекпа | Да | Н/Д |
| Виртуальная прокрутка | Да | Да |
| Поддержка OAuth | Да | Да |
| Ключи API | Н/Д | Да |
| LivePhoto/MotionPhoto бекап и воспроизведение | Да | Да |
| Настраиваемая структура хранилища | Да | Да |
| Публичные альбомы | Нет | Да |
| Архив и Избранное | Да | Да |
| Мировая карта | Да | Да |
| Совместное использование | Да | Да |
| Распознавание лиц и группировка по лицам | Да | Да |
| В этот день (x лет назад) | Да | Да |
| Работа без интернета | Да | Нет |
| Галлереи только для просмотра | Да | Да |
| Колллажи | Да | Да |
## Поддержка проекта
Я посвятил себя этому проекту и не остановлюсь. Я буду продолжать обновлять документацию, добавлять новые функции и исправлять ошибки. Но я не могу сделать это один. Поэтому мне нужна ваша помощь, чтобы дать мне дополнительную мотивацию продолжать идти дальше.
Как сказали наши покровители [selfhosted.show - In the episode 'The-organization-must-not-be-name is a Hostile Actor'](https://selfhosted.show/79?t=1418), это масштабная работа, которую мы с командой делаем. И мне бы очень хотелось когда-нибудь иметь возможность заниматься этим на постоянной основе, и я прошу вашей помощи, чтобы это произошло.
Если вы считаете, что это правильная причина и вы уже давно используете это приложение, рассмотрите возможность финансовой поддержки проекта, выбрав вариант ниже.
### Пожертвование
- [Ежемесячное пожертвование](https://github.com/sponsors/alextran1502) via GitHub Sponsors
- [Одноразовое пожертвование](https://github.com/sponsors/alextran1502?frequency=one-time&sponsor=alextran1502) via GitHub Sponsors
- [Librepay](https://liberapay.com/alex.tran1502/)
- [buymeacoffee](https://www.buymeacoffee.com/altran1502)
- Bitcoin: 1FvEp6P6NM8EZEkpGUFAN2LqJ1gxusNxZX
- ZCash: u1smm4wvqegcp46zss2jf5xptchgeczp4rx7a0wu3mermf2wxahm26yyz5w9mw3f2p4emwlljxjumg774kgs8rntt9yags0whnzane4n67z4c7gppq4yyvcj404ne3r769prwzd9j8ntvqp44fa6d67sf7rmcfjmds3gmeceff4u8e92rh38nd30cr96xw6vfhk6scu4ws90ldzupr3sz
## Авторы
<a href="https://github.com/alextran1502/immich/graphs/contributors">
<img src="https://contrib.rocks/image?repo=immich-app/immich" width="100%"/>
</a>

View File

@@ -17,7 +17,7 @@
</p>
<br/>
<a href="https://immich.app">
<img src="design/immich-screenshots.png" title="界面截图">
<img src="design/immich-screenshots.png" title="Main Screenshot">
</a>
<br/>
@@ -32,7 +32,6 @@
<a href="README_de_DE.md">Deutsch</a>
<a href="README_nl_NL.md">Nederlands</a>
<a href="README_tr_TR.md">Türkçe</a>
<a href="README_ru_RU.md">Русский</a>
</p>
## 免责声明
@@ -40,7 +39,7 @@
- ⚠️ 本项目正在 **非常活跃** 地开发中。
- ⚠️ 可能存在 bug 或者随时有重大变更。
- ⚠️ **不要把本软件作为您存储照片或视频的唯一方式。**
- ⚠️ 为了您宝贵的照片与视频,始终遵守 [3-2-1](https://www.backblaze.com/blog/the-3-2-1-backup-strategy/) 备份方案!
- ⚠️ 为了您宝贵的照片与视频,始终遵守 [3-2-1](https://www.backblaze.com/blog/the-3-2-1-backup-strategy/) 备份方案!
## 目录
@@ -75,41 +74,40 @@
# 功能特性
| 功能特性 | 移动端 | 网页端 |
|---------------------------------------------|--------|--------|
| 上传并查看照片和视频 | 是 | 是 |
| 软件运行时自动备份 | 是 | N/A |
| 选择需要备份的相册 | 是 | N/A |
| 下载照片和视频到本地 | 是 | 是 |
| 多用户支持 | 是 | 是 |
| 相册与共享相册 | 是 | 是 |
| 可拖动的快速导航栏 | 是 | 是 |
| 支持RAW格式 | 是 | 是 |
| 元数据视图EXIF、地图 | 是 | 是 |
| 通过元数据、对象、人脸和标签进行搜索 | 是 | 是 |
| 管理功能(用户管理) | 否 | 是 |
| 后台备份 | 是 | N/A |
| 虚拟滚动 | 是 | 是 |
| OAuth 支持 | 是 | 是 |
| API Keys | N/A | 是 |
| 实况照片备份和查看 | 是 | 是 |
| 用户自定义存储结构 | 是 | 是 |
| 公共分享 | 否 | 是 |
| 归档与收藏功能 | 是 | 是 |
| 足迹地图 | 是 | 是 |
| 好友分享 | 是 | 是 |
| 人脸识别与分组 | 是 | 是 |
| 回忆(那年今日) | 是 | 是 |
| 离线支持 | 是 | 否 |
| 只读相册 | 是 | 是 |
| 照片堆叠 | 是 | 是 |
| 功能特性 | 移动端 | 网页端 |
| ------------------------------------------- | ------- | --- |
| 上传并查看照片和视频 | 是 | 是 |
| 软件运行时自动备份 | 是 | N/A |
| 选择需要备份的相册 | 是 | N/A |
| 下载照片和视频到本地 | 是 | 是 |
| 多用户支持 | 是 | 是 |
| 相册 | 是 | 是 |
| 共享相册 | 是 | 是 |
| 可拖动的快速导航栏 | 是 | 是 |
| 支持RAW格式 (HEIC, HEIF, DNG, Apple ProRaw) | 是 | 是 |
| 元数据视图EXIF, 地图) | 是 | 是 |
| 通过元数据、对象和标签进行搜索 | 是 | 是 |
| 管理功能(用户管理) | 否 | 是 |
| 后台备份 | 是 | N/A |
| 虚拟滚动 | 是 | 是 |
| OAuth 支持 | | 是 |
| API Keys|N/A|是|
| 实况照片备份和查看 | 仅 iOS | 是 |
|用户自定义存储结构|是|是|
|公共分享|否|是|
|归档与收藏功能|是|是|
|全局地图|否|是|
|好友分享|是|是|
|人像识别与分组|是|是|
|回忆(那年今日)|是|是|
|离线支持|是|否|
|只读相册|是|是|
# 支持本项目
我已经致力于本项目并且将会持续更新文档、新增功能和修复问题。但是独木不成林,我需要您给予我坚持下去的动力。
我已经致力于本项目并且将会持续更新文档、新增功能和修复问题。但是独木不成林,我需要您给予我坚持下去的动力。
就像我在 [selfhosted.show - In the episode 'The-organization-must-not-be-name is a Hostile Actor'](https://selfhosted.show/79?t=1418) 节目里说的一样这是我和团队的一项艰巨任务。并且我希望某一天我能够全职开发本项目,在此我请求您能够助我梦想成真。
就像我在 [selfhosted.show - In the episode 'The-organization-must-not-be-name is a Hostile Actor'](https://selfhosted.show/79?t=1418) 节目里说的一样,这是我和团队的一项艰巨任务。并且我希望某一天我能够全职开发本项目,在此我请求您能够助我梦想成真。
如果您使用了本项目一段时间,并且觉得上面的话有道理,那么请您考虑通过下列任一方式支持我吧。
@@ -120,9 +118,3 @@
- [Librepay](https://liberapay.com/alex.tran1502/)
- [buymeacoffee](https://www.buymeacoffee.com/altran1502)
- 比特币: 1FvEp6P6NM8EZEkpGUFAN2LqJ1gxusNxZX
- ZCash: u1smm4wvqegcp46zss2jf5xptchgeczp4rx7a0wu3mermf2wxahm26yyz5w9mw3f2p4emwlljxjumg774kgs8rntt9yags0whnzane4n67z4c7gppq4yyvcj404ne3r769prwzd9j8ntvqp44fa6d67sf7rmcfjmds3gmeceff4u8e92rh38nd30cr96xw6vfhk6scu4ws90ldzupr3sz
## 贡献者
<a href="https://github.com/alextran1502/immich/graphs/contributors">
<img src="https://contrib.rocks/image?repo=immich-app/immich" width="100%"/>
</a>

View File

@@ -5,6 +5,7 @@ node_modules
.env
.env.*
!.env.example
src/api/open-api
*.md
*.json
coverage

View File

@@ -1,4 +1,4 @@
FROM ghcr.io/immich-app/base-server-dev:20240111@sha256:5acf773796f93c7a3216ffdbdb3604dc812f2b2317b84a1b57b65674826b746a as test
FROM ghcr.io/immich-app/base-server-dev:20231109 as test
WORKDIR /usr/src/app/server
COPY server/package.json server/package-lock.json ./
@@ -10,7 +10,7 @@ COPY cli/package.json cli/package-lock.json ./
RUN npm ci
COPY ./cli/ .
FROM ghcr.io/immich-app/base-server-prod:20240111@sha256:e917605008977f68dc3b6f7879c264cae4bff6c4186b119a6e114a60f8f5a354
FROM ghcr.io/immich-app/base-server-prod:20231109
VOLUME /usr/src/app/upload

1681
cli/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{
"name": "@immich/cli",
"version": "2.0.6",
"version": "2.0.5",
"description": "Command Line Interface (CLI) for Immich",
"main": "dist/index.js",
"bin": {
@@ -12,7 +12,6 @@
"cli"
],
"dependencies": {
"@immich/sdk": "file:../open-api/typescript-sdk",
"axios": "^1.6.2",
"byte-size": "^8.1.1",
"cli-progress": "^3.12.0",
@@ -25,17 +24,21 @@
"devDependencies": {
"@testcontainers/postgresql": "^10.4.0",
"@types/byte-size": "^8.1.0",
"@types/chai": "^4.3.5",
"@types/cli-progress": "^3.11.0",
"@types/jest": "^29.5.2",
"@types/js-yaml": "^4.0.5",
"@types/mime-types": "^2.1.1",
"@types/mock-fs": "^4.13.1",
"@types/node": "^20.3.1",
"@typescript-eslint/eslint-plugin": "^6.0.0",
"@typescript-eslint/parser": "^6.0.0",
"chai": "^4.3.7",
"eslint": "^8.43.0",
"eslint-config-prettier": "^9.0.0",
"eslint-plugin-jest": "^27.2.2",
"eslint-plugin-prettier": "^5.0.0",
"eslint-plugin-unicorn": "^50.0.0",
"eslint-plugin-unicorn": "^49.0.0",
"immich": "file:../server",
"jest": "^29.5.0",
"jest-extended": "^4.0.0",
@@ -58,7 +61,7 @@
"format": "prettier --check .",
"format:fix": "prettier --write .",
"check": "tsc --noEmit",
"test:e2e": "jest --config test/e2e/jest-e2e.json --runInBand"
"test:e2e": "NODE_OPTIONS='--experimental-vm-modules' jest --config test/e2e/jest-e2e.json --runInBand"
},
"jest": {
"clearMocks": true,

View File

@@ -9,7 +9,7 @@ import {
ServerInfoApi,
SystemConfigApi,
UserApi,
} from '@immich/sdk';
} from './open-api';
import { ApiConfiguration } from '../cores/api-configuration';
import FormData from 'form-data';

View File

@@ -4,7 +4,7 @@
* Immich
* Immich API
*
* The version of the OpenAPI document: 1.92.1
* The version of the OpenAPI document: 1.91.4
*
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
@@ -355,18 +355,6 @@ export interface AllJobStatusResponseDto {
* @memberof AllJobStatusResponseDto
*/
'backgroundTask': JobStatusDto;
/**
*
* @type {JobStatusDto}
* @memberof AllJobStatusResponseDto
*/
'faceDetection': JobStatusDto;
/**
*
* @type {JobStatusDto}
* @memberof AllJobStatusResponseDto
*/
'facialRecognition': JobStatusDto;
/**
*
* @type {JobStatusDto}
@@ -385,6 +373,12 @@ export interface AllJobStatusResponseDto {
* @memberof AllJobStatusResponseDto
*/
'migration': JobStatusDto;
/**
*
* @type {JobStatusDto}
* @memberof AllJobStatusResponseDto
*/
'recognizeFaces': JobStatusDto;
/**
*
* @type {JobStatusDto}
@@ -1478,12 +1472,6 @@ export interface CreateUserDto {
* @memberof CreateUserDto
*/
'password': string;
/**
*
* @type {number}
* @memberof CreateUserDto
*/
'quotaSizeInBytes'?: number | null;
/**
*
* @type {string}
@@ -1988,8 +1976,7 @@ export const JobName = {
ThumbnailGeneration: 'thumbnailGeneration',
MetadataExtraction: 'metadataExtraction',
VideoConversion: 'videoConversion',
FaceDetection: 'faceDetection',
FacialRecognition: 'facialRecognition',
RecognizeFaces: 'recognizeFaces',
SmartSearch: 'smartSearch',
BackgroundTask: 'backgroundTask',
StorageTemplateMigration: 'storageTemplateMigration',
@@ -2492,18 +2479,6 @@ export interface PartnerResponseDto {
* @memberof PartnerResponseDto
*/
'profileImagePath': string;
/**
*
* @type {number}
* @memberof PartnerResponseDto
*/
'quotaSizeInBytes': number | null;
/**
*
* @type {number}
* @memberof PartnerResponseDto
*/
'quotaUsageInBytes': number | null;
/**
*
* @type {boolean}
@@ -3032,24 +3007,12 @@ export interface SearchResponseDto {
* @interface ServerConfigDto
*/
export interface ServerConfigDto {
/**
*
* @type {string}
* @memberof ServerConfigDto
*/
'externalDomain': string;
/**
*
* @type {boolean}
* @memberof ServerConfigDto
*/
'isInitialized': boolean;
/**
*
* @type {boolean}
* @memberof ServerConfigDto
*/
'isOnboarded': boolean;
/**
*
* @type {string}
@@ -3099,6 +3062,12 @@ export interface ServerFeaturesDto {
* @memberof ServerFeaturesDto
*/
'map': boolean;
/**
*
* @type {boolean}
* @memberof ServerFeaturesDto
*/
'metrics': boolean;
/**
*
* @type {boolean}
@@ -3603,6 +3572,12 @@ export interface SystemConfigDto {
* @memberof SystemConfigDto
*/
'map': SystemConfigMapDto;
/**
*
* @type {SystemConfigMetricsDto}
* @memberof SystemConfigDto
*/
'metrics': SystemConfigMetricsDto;
/**
*
* @type {SystemConfigNewVersionCheckDto}
@@ -3627,12 +3602,6 @@ export interface SystemConfigDto {
* @memberof SystemConfigDto
*/
'reverseGeocoding': SystemConfigReverseGeocodingDto;
/**
*
* @type {SystemConfigServerDto}
* @memberof SystemConfigDto
*/
'server': SystemConfigServerDto;
/**
*
* @type {SystemConfigStorageTemplateDto}
@@ -3781,12 +3750,6 @@ export interface SystemConfigJobDto {
* @memberof SystemConfigJobDto
*/
'backgroundTask': JobSettingsDto;
/**
*
* @type {JobSettingsDto}
* @memberof SystemConfigJobDto
*/
'faceDetection': JobSettingsDto;
/**
*
* @type {JobSettingsDto}
@@ -3805,6 +3768,12 @@ export interface SystemConfigJobDto {
* @memberof SystemConfigJobDto
*/
'migration': JobSettingsDto;
/**
*
* @type {JobSettingsDto}
* @memberof SystemConfigJobDto
*/
'recognizeFaces': JobSettingsDto;
/**
*
* @type {JobSettingsDto}
@@ -3823,6 +3792,12 @@ export interface SystemConfigJobDto {
* @memberof SystemConfigJobDto
*/
'smartSearch': JobSettingsDto;
/**
*
* @type {JobSettingsDto}
* @memberof SystemConfigJobDto
*/
'storageTemplateMigration': JobSettingsDto;
/**
*
* @type {JobSettingsDto}
@@ -3945,6 +3920,19 @@ export interface SystemConfigMapDto {
*/
'lightStyle': string;
}
/**
*
* @export
* @interface SystemConfigMetricsDto
*/
export interface SystemConfigMetricsDto {
/**
*
* @type {boolean}
* @memberof SystemConfigMetricsDto
*/
'enabled': boolean;
}
/**
*
* @export
@@ -4057,43 +4045,12 @@ export interface SystemConfigReverseGeocodingDto {
*/
'enabled': boolean;
}
/**
*
* @export
* @interface SystemConfigServerDto
*/
export interface SystemConfigServerDto {
/**
*
* @type {string}
* @memberof SystemConfigServerDto
*/
'externalDomain': string;
/**
*
* @type {string}
* @memberof SystemConfigServerDto
*/
'loginPageMessage': string;
}
/**
*
* @export
* @interface SystemConfigStorageTemplateDto
*/
export interface SystemConfigStorageTemplateDto {
/**
*
* @type {boolean}
* @memberof SystemConfigStorageTemplateDto
*/
'enabled': boolean;
/**
*
* @type {boolean}
* @memberof SystemConfigStorageTemplateDto
*/
'hashVerificationEnabled': boolean;
/**
*
* @type {string}
@@ -4569,12 +4526,6 @@ export interface UpdateUserDto {
* @memberof UpdateUserDto
*/
'password'?: string;
/**
*
* @type {number}
* @memberof UpdateUserDto
*/
'quotaSizeInBytes'?: number | null;
/**
*
* @type {boolean}
@@ -4602,12 +4553,6 @@ export interface UsageByUserDto {
* @memberof UsageByUserDto
*/
'photos': number;
/**
*
* @type {number}
* @memberof UsageByUserDto
*/
'quotaSizeInBytes': number | null;
/**
*
* @type {number}
@@ -4766,18 +4711,6 @@ export interface UserResponseDto {
* @memberof UserResponseDto
*/
'profileImagePath': string;
/**
*
* @type {number}
* @memberof UserResponseDto
*/
'quotaSizeInBytes': number | null;
/**
*
* @type {number}
* @memberof UserResponseDto
*/
'quotaUsageInBytes': number | null;
/**
*
* @type {boolean}
@@ -12784,6 +12717,109 @@ export class LibraryApi extends BaseAPI {
}
/**
* MetricsApi - axios parameter creator
* @export
*/
export const MetricsApiAxiosParamCreator = function (configuration?: Configuration) {
return {
/**
*
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
getMetrics: async (options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
const localVarPath = `/metrics`;
// use dummy base URL string because the URL constructor only accepts absolute URLs.
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
let baseOptions;
if (configuration) {
baseOptions = configuration.baseOptions;
}
const localVarRequestOptions = { method: 'GET', ...baseOptions, ...options};
const localVarHeaderParameter = {} as any;
const localVarQueryParameter = {} as any;
// authentication cookie required
// authentication api_key required
await setApiKeyToObject(localVarHeaderParameter, "x-api-key", configuration)
// authentication bearer required
// http bearer authentication required
await setBearerAuthToObject(localVarHeaderParameter, configuration)
setSearchParams(localVarUrlObj, localVarQueryParameter);
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
return {
url: toPathString(localVarUrlObj),
options: localVarRequestOptions,
};
},
}
};
/**
* MetricsApi - functional programming interface
* @export
*/
export const MetricsApiFp = function(configuration?: Configuration) {
const localVarAxiosParamCreator = MetricsApiAxiosParamCreator(configuration)
return {
/**
*
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
async getMetrics(options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<object>> {
const localVarAxiosArgs = await localVarAxiosParamCreator.getMetrics(options);
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
},
}
};
/**
* MetricsApi - factory interface
* @export
*/
export const MetricsApiFactory = function (configuration?: Configuration, basePath?: string, axios?: AxiosInstance) {
const localVarFp = MetricsApiFp(configuration)
return {
/**
*
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
getMetrics(options?: AxiosRequestConfig): AxiosPromise<object> {
return localVarFp.getMetrics(options).then((request) => request(axios, basePath));
},
};
};
/**
* MetricsApi - object-oriented interface
* @export
* @class MetricsApi
* @extends {BaseAPI}
*/
export class MetricsApi extends BaseAPI {
/**
*
* @param {*} [options] Override http request option.
* @throws {RequiredError}
* @memberof MetricsApi
*/
public getMetrics(options?: AxiosRequestConfig) {
return MetricsApiFp(this.configuration).getMetrics(options).then((request) => request(this.axios, this.basePath));
}
}
/**
* OAuthApi - axios parameter creator
* @export
@@ -14645,11 +14681,10 @@ export const SearchApiAxiosParamCreator = function (configuration?: Configuratio
* @param {'IMAGE' | 'VIDEO' | 'AUDIO' | 'OTHER'} [type]
* @param {boolean} [recent]
* @param {boolean} [motion]
* @param {boolean} [withArchived]
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
search: async (q?: string, query?: string, clip?: boolean, type?: 'IMAGE' | 'VIDEO' | 'AUDIO' | 'OTHER', recent?: boolean, motion?: boolean, withArchived?: boolean, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
search: async (q?: string, query?: string, clip?: boolean, type?: 'IMAGE' | 'VIDEO' | 'AUDIO' | 'OTHER', recent?: boolean, motion?: boolean, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
const localVarPath = `/search`;
// use dummy base URL string because the URL constructor only accepts absolute URLs.
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
@@ -14695,10 +14730,6 @@ export const SearchApiAxiosParamCreator = function (configuration?: Configuratio
localVarQueryParameter['motion'] = motion;
}
if (withArchived !== undefined) {
localVarQueryParameter['withArchived'] = withArchived;
}
setSearchParams(localVarUrlObj, localVarQueryParameter);
@@ -14787,12 +14818,11 @@ export const SearchApiFp = function(configuration?: Configuration) {
* @param {'IMAGE' | 'VIDEO' | 'AUDIO' | 'OTHER'} [type]
* @param {boolean} [recent]
* @param {boolean} [motion]
* @param {boolean} [withArchived]
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
async search(q?: string, query?: string, clip?: boolean, type?: 'IMAGE' | 'VIDEO' | 'AUDIO' | 'OTHER', recent?: boolean, motion?: boolean, withArchived?: boolean, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<SearchResponseDto>> {
const localVarAxiosArgs = await localVarAxiosParamCreator.search(q, query, clip, type, recent, motion, withArchived, options);
async search(q?: string, query?: string, clip?: boolean, type?: 'IMAGE' | 'VIDEO' | 'AUDIO' | 'OTHER', recent?: boolean, motion?: boolean, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<SearchResponseDto>> {
const localVarAxiosArgs = await localVarAxiosParamCreator.search(q, query, clip, type, recent, motion, options);
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
},
/**
@@ -14831,7 +14861,7 @@ export const SearchApiFactory = function (configuration?: Configuration, basePat
* @throws {RequiredError}
*/
search(requestParameters: SearchApiSearchRequest = {}, options?: AxiosRequestConfig): AxiosPromise<SearchResponseDto> {
return localVarFp.search(requestParameters.q, requestParameters.query, requestParameters.clip, requestParameters.type, requestParameters.recent, requestParameters.motion, requestParameters.withArchived, options).then((request) => request(axios, basePath));
return localVarFp.search(requestParameters.q, requestParameters.query, requestParameters.clip, requestParameters.type, requestParameters.recent, requestParameters.motion, options).then((request) => request(axios, basePath));
},
/**
*
@@ -14892,13 +14922,6 @@ export interface SearchApiSearchRequest {
* @memberof SearchApiSearch
*/
readonly motion?: boolean
/**
*
* @type {boolean}
* @memberof SearchApiSearch
*/
readonly withArchived?: boolean
}
/**
@@ -14947,7 +14970,7 @@ export class SearchApi extends BaseAPI {
* @memberof SearchApi
*/
public search(requestParameters: SearchApiSearchRequest = {}, options?: AxiosRequestConfig) {
return SearchApiFp(this.configuration).search(requestParameters.q, requestParameters.query, requestParameters.clip, requestParameters.type, requestParameters.recent, requestParameters.motion, requestParameters.withArchived, options).then((request) => request(this.axios, this.basePath));
return SearchApiFp(this.configuration).search(requestParameters.q, requestParameters.query, requestParameters.clip, requestParameters.type, requestParameters.recent, requestParameters.motion, options).then((request) => request(this.axios, this.basePath));
}
/**
@@ -15210,44 +15233,6 @@ export const ServerInfoApiAxiosParamCreator = function (configuration?: Configur
setSearchParams(localVarUrlObj, localVarQueryParameter);
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
return {
url: toPathString(localVarUrlObj),
options: localVarRequestOptions,
};
},
/**
*
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
setAdminOnboarding: async (options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
const localVarPath = `/server-info/admin-onboarding`;
// use dummy base URL string because the URL constructor only accepts absolute URLs.
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
let baseOptions;
if (configuration) {
baseOptions = configuration.baseOptions;
}
const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
const localVarHeaderParameter = {} as any;
const localVarQueryParameter = {} as any;
// authentication cookie required
// authentication api_key required
await setApiKeyToObject(localVarHeaderParameter, "x-api-key", configuration)
// authentication bearer required
// http bearer authentication required
await setBearerAuthToObject(localVarHeaderParameter, configuration)
setSearchParams(localVarUrlObj, localVarQueryParameter);
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
@@ -15339,15 +15324,6 @@ export const ServerInfoApiFp = function(configuration?: Configuration) {
const localVarAxiosArgs = await localVarAxiosParamCreator.pingServer(options);
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
},
/**
*
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
async setAdminOnboarding(options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<void>> {
const localVarAxiosArgs = await localVarAxiosParamCreator.setAdminOnboarding(options);
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
},
}
};
@@ -15422,14 +15398,6 @@ export const ServerInfoApiFactory = function (configuration?: Configuration, bas
pingServer(options?: AxiosRequestConfig): AxiosPromise<ServerPingResponse> {
return localVarFp.pingServer(options).then((request) => request(axios, basePath));
},
/**
*
* @param {*} [options] Override http request option.
* @throws {RequiredError}
*/
setAdminOnboarding(options?: AxiosRequestConfig): AxiosPromise<void> {
return localVarFp.setAdminOnboarding(options).then((request) => request(axios, basePath));
},
};
};
@@ -15519,16 +15487,6 @@ export class ServerInfoApi extends BaseAPI {
public pingServer(options?: AxiosRequestConfig) {
return ServerInfoApiFp(this.configuration).pingServer(options).then((request) => request(this.axios, this.basePath));
}
/**
*
* @param {*} [options] Override http request option.
* @throws {RequiredError}
* @memberof ServerInfoApi
*/
public setAdminOnboarding(options?: AxiosRequestConfig) {
return ServerInfoApiFp(this.configuration).setAdminOnboarding(options).then((request) => request(this.axios, this.basePath));
}
}

View File

@@ -4,7 +4,7 @@
* Immich
* Immich API
*
* The version of the OpenAPI document: 1.92.1
* The version of the OpenAPI document: 1.91.4
*
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).

View File

@@ -4,7 +4,7 @@
* Immich
* Immich API
*
* The version of the OpenAPI document: 1.92.1
* The version of the OpenAPI document: 1.91.4
*
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).

View File

@@ -4,7 +4,7 @@
* Immich
* Immich API
*
* The version of the OpenAPI document: 1.92.1
* The version of the OpenAPI document: 1.91.4
*
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).

View File

@@ -4,7 +4,7 @@
* Immich
* Immich API
*
* The version of the OpenAPI document: 1.92.1
* The version of the OpenAPI document: 1.91.4
*
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).

View File

@@ -2,7 +2,7 @@ import { ImmichApi } from '../api/client';
import { SessionService } from '../services/session.service';
import { LoginError } from '../cores/errors/login-error';
import { exit } from 'node:process';
import { ServerVersionResponseDto, UserResponseDto } from '@immich/sdk';
import { ServerVersionResponseDto, UserResponseDto } from 'src/api/open-api';
import { BaseOptionsDto } from 'src/cores/dto/base-options-dto';
export abstract class BaseCommand {

View File

@@ -1,6 +1,6 @@
import { BaseCommand } from '../../cli/base-command';
export class LoginKey extends BaseCommand {
export default class LoginKey extends BaseCommand {
public async run(instanceUrl: string, apiKey: string): Promise<void> {
console.log('Executing API key auth flow...');

View File

@@ -1,6 +1,6 @@
import { BaseCommand } from '../cli/base-command';
export class Logout extends BaseCommand {
export default class Logout extends BaseCommand {
public static readonly description = 'Logout and remove persisted credentials';
public async run(): Promise<void> {

View File

@@ -1,6 +1,6 @@
import { BaseCommand } from '../cli/base-command';
export class ServerInfo extends BaseCommand {
export default class ServerInfo extends BaseCommand {
public async run() {
await this.connect();
const { data: versionInfo } = await this.immichApi.serverInfoApi.getServerVersion();

View File

@@ -6,10 +6,10 @@ import fs from 'node:fs';
import cliProgress from 'cli-progress';
import byteSize from 'byte-size';
import { BaseCommand } from '../cli/base-command';
import axios, { AxiosRequestConfig, AxiosResponse } from 'axios';
import axios, { AxiosRequestConfig } from 'axios';
import FormData from 'form-data';
export class Upload extends BaseCommand {
export default class Upload extends BaseCommand {
uploadLength!: number;
public async run(paths: string[], options: UploadOptionsDto): Promise<void> {
@@ -60,7 +60,7 @@ export class Upload extends BaseCommand {
for (const asset of assetsToUpload) {
// Compute total size first
await asset.prepare();
await asset.process();
totalSize += asset.fileSize;
if (options.albumName) {
@@ -107,8 +107,6 @@ export class Upload extends BaseCommand {
const formData = asset.getUploadFormData();
const res = await this.uploadAsset(formData);
existingAssetId = res.data.id;
uploadCounter++;
totalSizeUploaded += asset.fileSize;
}
if ((options.album || options.albumName) && asset.albumName !== undefined) {
@@ -129,6 +127,9 @@ export class Upload extends BaseCommand {
}
}
}
totalSizeUploaded += asset.fileSize;
uploadCounter++;
}
sizeSoFar += asset.fileSize;
@@ -171,7 +172,7 @@ export class Upload extends BaseCommand {
}
}
private async uploadAsset(data: FormData): Promise<AxiosResponse> {
private async uploadAsset(data: FormData): Promise<axios.AxiosResponse> {
const url = this.immichApi.apiConfiguration.instanceUrl + '/asset/upload';
const config: AxiosRequestConfig = {

View File

@@ -1,17 +1,18 @@
import crypto from 'crypto';
import FormData from 'form-data';
import * as fs from 'graceful-fs';
import { createReadStream } from 'node:fs';
import { basename } from 'node:path';
import crypto from 'crypto';
import Os from 'os';
import FormData from 'form-data';
export class Asset {
readonly path: string;
readonly deviceId!: string;
assetData?: fs.ReadStream;
deviceAssetId?: string;
fileCreatedAt?: string;
fileModifiedAt?: string;
sidecarData?: fs.ReadStream;
sidecarPath?: string;
fileSize!: number;
albumName?: string;
@@ -20,30 +21,32 @@ export class Asset {
this.path = path;
}
async prepare() {
async process() {
const stats = await fs.promises.stat(this.path);
this.deviceAssetId = `${basename(this.path)}-${stats.size}`.replace(/\s+/g, '');
this.fileCreatedAt = stats.mtime.toISOString();
this.fileModifiedAt = stats.mtime.toISOString();
this.fileSize = stats.size;
this.albumName = this.extractAlbumName();
this.assetData = this.getReadStream(this.path);
// TODO: doesn't xmp replace the file extension? Will need investigation
const sideCarPath = `${this.path}.xmp`;
try {
fs.accessSync(sideCarPath, fs.constants.R_OK);
this.sidecarData = this.getReadStream(sideCarPath);
} catch (error) {}
}
getUploadFormData(): FormData {
if (!this.assetData) throw new Error('Asset data not set');
if (!this.deviceAssetId) throw new Error('Device asset id not set');
if (!this.fileCreatedAt) throw new Error('File created at not set');
if (!this.fileModifiedAt) throw new Error('File modified at not set');
// TODO: doesn't xmp replace the file extension? Will need investigation
const sideCarPath = `${this.path}.xmp`;
let sidecarData: fs.ReadStream | undefined = undefined;
try {
fs.accessSync(sideCarPath, fs.constants.R_OK);
sidecarData = createReadStream(sideCarPath);
} catch (error) {}
const data: any = {
assetData: createReadStream(this.path),
assetData: this.assetData as any,
deviceAssetId: this.deviceAssetId,
deviceId: 'CLI',
fileCreatedAt: this.fileCreatedAt,
@@ -56,13 +59,17 @@ export class Asset {
formData.append(prop, data[prop]);
}
if (sidecarData) {
formData.append('sidecarData', sidecarData);
if (this.sidecarData) {
formData.append('sidecarData', this.sidecarData);
}
return formData;
}
private getReadStream(path: string): fs.ReadStream {
return fs.createReadStream(path);
}
async delete(): Promise<void> {
return fs.promises.unlink(this.path);
}

View File

@@ -1,10 +1,10 @@
#! /usr/bin/env node
import { Option, Command } from 'commander';
import { Upload } from './commands/upload';
import { ServerInfo } from './commands/server-info';
import { LoginKey } from './commands/login/key';
import { Logout } from './commands/logout';
import Upload from './commands/upload';
import ServerInfo from './commands/server-info';
import LoginKey from './commands/login/key';
import Logout from './commands/logout';
import { version } from '../package.json';
import path from 'node:path';

View File

@@ -198,58 +198,6 @@ const tests: Test[] = [
[`/photos/3.jpg`]: false,
},
},
{
test: 'should support ignoring full filename',
options: {
pathsToCrawl: ['/photos'],
exclusionPatterns: ['**/image2.jpg'],
},
files: {
'/photos/image1.jpg': true,
'/photos/image2.jpg': false,
'/photos/image3.jpg': true,
},
},
{
test: 'should support ignoring file extensions',
options: {
pathsToCrawl: ['/photos'],
exclusionPatterns: ['**/*.png'],
},
files: {
'/photos/image1.jpg': true,
'/photos/image2.png': false,
'/photos/image3.jpg': true,
},
},
{
test: 'should support ignoring folder names',
options: {
pathsToCrawl: ['/photos'],
recursive: true,
exclusionPatterns: ['**/raw/**'],
},
files: {
'/photos/image1.jpg': true,
'/photos/image/image1.jpg': true,
'/photos/raw/image2.dng': false,
'/photos/raw/image3.dng': false,
'/photos/notraw/image3.jpg': true,
},
},
{
test: 'should support ignoring absolute paths',
options: {
pathsToCrawl: ['/'],
recursive: true,
exclusionPatterns: ['/images/**'],
},
files: {
'/photos/image1.jpg': true,
'/images/image2.jpg': false,
'/assets/image3.jpg': true,
},
},
];
describe(CrawlService.name, () => {

View File

@@ -16,9 +16,10 @@ import {
const mockPingServer = jest.fn(() => Promise.resolve({ data: { res: 'pong' } }));
const mockUserInfo = jest.fn(() => Promise.resolve({ data: { email: 'admin@example.com' } }));
jest.mock('@immich/sdk', () => {
jest.mock('../api/open-api', () => {
return {
...jest.requireActual('@immich/sdk'),
__esModule: true,
...jest.requireActual('../api/open-api'),
UserApi: jest.fn().mockImplementation(() => {
return { getMyUserInfo: mockUserInfo };
}),

View File

@@ -7,7 +7,7 @@
"testRegex": ".e2e-spec.ts$",
"testTimeout": 6000000,
"transform": {
"^.+\\.ts$": "ts-jest"
"^.+\\.(t|j)s$": "ts-jest"
},
"collectCoverageFrom": [
"<rootDir>/src/**/*.(t|j)s",

View File

@@ -1,8 +1,8 @@
import { api } from '@test/api';
import { restoreTempFolder, testApp } from 'immich/test/test-utils';
import { LoginResponseDto } from 'src/api/open-api';
import { APIKeyCreateResponseDto } from '@app/domain';
import { api } from '@test/../e2e/api/client';
import { restoreTempFolder, testApp } from '@test/../e2e/jobs/utils';
import { LoginResponseDto } from '@immich/sdk';
import { LoginKey } from 'src/commands/login/key';
import LoginKey from 'src/commands/login/key';
import { LoginError } from 'src/cores/errors/login-error';
import { CLI_BASE_OPTIONS, spyOnConsole } from 'test/cli-test-utils';

View File

@@ -1,8 +1,8 @@
import { api } from '@test/api';
import { restoreTempFolder, testApp } from 'immich/test/test-utils';
import { LoginResponseDto } from 'src/api/open-api';
import ServerInfo from 'src/commands/server-info';
import { APIKeyCreateResponseDto } from '@app/domain';
import { api } from '@test/../e2e/api/client';
import { restoreTempFolder, testApp } from '@test/../e2e/jobs/utils';
import { LoginResponseDto } from '@immich/sdk';
import { ServerInfo } from 'src/commands/server-info';
import { CLI_BASE_OPTIONS, spyOnConsole } from 'test/cli-test-utils';
describe(`server-info (e2e)`, () => {

View File

@@ -37,6 +37,7 @@ export default async () => {
}
process.env.NODE_ENV = 'development';
process.env.IMMICH_CONFIG_FILE = path.normalize(`${__dirname}/../../../server/e2e/jobs/immich-e2e-config.json`);
process.env.IMMICH_TEST_ENV = 'true';
process.env.IMMICH_CONFIG_FILE = path.normalize(`${__dirname}/../../../server/test/e2e/immich-e2e-config.json`);
process.env.TZ = 'Z';
};

View File

@@ -1,8 +1,8 @@
import { api } from '@test/api';
import { IMMICH_TEST_ASSET_PATH, restoreTempFolder, testApp } from 'immich/test/test-utils';
import { LoginResponseDto } from 'src/api/open-api';
import Upload from 'src/commands/upload';
import { APIKeyCreateResponseDto } from '@app/domain';
import { api } from '@test/../e2e/api/client';
import { IMMICH_TEST_ASSET_PATH, restoreTempFolder, testApp } from '@test/../e2e/jobs/utils';
import { LoginResponseDto } from '@immich/sdk';
import { Upload } from 'src/commands/upload';
import { CLI_BASE_OPTIONS, spyOnConsole } from 'test/cli-test-utils';
describe(`upload (e2e)`, () => {

View File

@@ -1,6 +1,6 @@
{
"compilerOptions": {
"module": "commonjs",
"module": "Node16",
"strict": true,
"declaration": true,
"removeComments": true,
@@ -9,6 +9,7 @@
"allowSyntheticDefaultImports": true,
"resolveJsonModule": true,
"target": "es2021",
"moduleResolution": "node16",
"sourceMap": true,
"outDir": "./dist",
"incremental": true,

View File

@@ -15,9 +15,7 @@ x-server-build: &server-common
restart: always
volumes:
- ../server:/usr/src/app
- ../open-api:/usr/src/open-api
- ${UPLOAD_LOCATION}/photos:/usr/src/app/upload
- ${UPLOAD_LOCATION}/photos/upload:/usr/src/app/upload/upload
- /usr/src/app/node_modules
- /etc/localtime:/etc/localtime:ro
env_file:
@@ -66,7 +64,6 @@ services:
- 24678:24678
volumes:
- ../web:/usr/src/app
- ../open-api/:/usr/src/open-api/
- /usr/src/app/node_modules
ulimits:
nofile:
@@ -95,7 +92,7 @@ services:
redis:
container_name: immich_redis
image: redis:6.2-alpine@sha256:c5a607fb6e1bb15d32bbcf14db22787d19e428d59e31a5da67511b49bb0f1ccc
image: redis:6.2-alpine@sha256:b6124ab2e45cc332e16398022a411d7e37181f21ff7874835e0180f56a09e82a
database:
container_name: immich_postgres

View File

@@ -51,7 +51,7 @@ services:
redis:
container_name: immich_redis
image: redis:6.2-alpine@sha256:c5a607fb6e1bb15d32bbcf14db22787d19e428d59e31a5da67511b49bb0f1ccc
image: redis:6.2-alpine@sha256:b6124ab2e45cc332e16398022a411d7e37181f21ff7874835e0180f56a09e82a
restart: always
database:

View File

@@ -55,7 +55,7 @@ services:
redis:
container_name: immich_redis
image: redis:6.2-alpine@sha256:c5a607fb6e1bb15d32bbcf14db22787d19e428d59e31a5da67511b49bb0f1ccc
image: redis:6.2-alpine@sha256:b6124ab2e45cc332e16398022a411d7e37181f21ff7874835e0180f56a09e82a
restart: always
database:

View File

@@ -1,11 +0,0 @@
version: "3.8"
# ML acceleration on supported Mali ARM GPUs using ARM-NN
services:
mlaccel:
devices:
- /dev/mali0:/dev/mali0
volumes:
- /lib/firmware/mali_csffw.bin:/lib/firmware/mali_csffw.bin:ro # Mali firmware for your chipset (not always required depending on the driver)
- /usr/lib/libmali.so:/usr/lib/libmali.so:ro # Mali driver for your chipset (always required)

122
docs/docs/FAQ.md Normal file
View File

@@ -0,0 +1,122 @@
---
sidebar_position: 7
---
# FAQ
### What is the difference between the cloud icon on the mobile app?
| Icon | Description |
| ---------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------- |
| ![cloud](/img/cloud.svg) | Asset is only available in the cloud and was uploaded from some other device (like the web client) or was deleted from this device after upload |
| ![cloud-cross](/img/cloud-off.svg) | Asset is only available locally and has not yet been backed up |
| ![cloud-done](/img/cloud-done.svg) | Asset was uploaded from this device and is now backed up in the cloud/server and still available in original on the device |
### Can I add my existing photo library?
Yes, with an [external library](/docs/features/libraries.md).
### Why are only photos and not videos being uploaded to Immich?
This often happens when using a reverse proxy or cloudflare tunnel in front of Immich. Make sure to set your reverse proxy to allow large POST requests. In `nginx`, set `client_max_body_size 50000M;` or similar. Cloudflare tunnels are limited to 100 mb file sizes. Also check the disk space of your reverse proxy, in some cases proxies caches requests to disk before passing them on, and if disk space runs out the request fails.
### Why is Immich slow on low-memory systems like the Raspberry Pi?
Immich optionally uses machine learning for several features. However, it can be too heavy to run on a Raspberry Pi. You can [mitigate](/docs/FAQ#how-can-i-lower-immichs-cpu-usage) this or [disable](/docs/FAQ.md#how-can-i-disable-machine-learning) machine learning entirely.
### How can I lower Immich's CPU usage?
The initial backup is the most intensive due to the number of jobs running. The most CPU-intensive ones are transcoding and machine learning jobs (Tag Images, Smart Search, Recognize Faces), and to a lesser extent thumbnail generation. Here are some ways to lower their CPU usage:
- Lower the job concurrency for these jobs to 1.
- Under Settings > Transcoding Settings > Threads, set the number of threads to a low number like 1 or 2.
- Under Settings > Machine Learning Settings > Facial Recognition > Model Name, you can change the facial recognition model to `buffalo_s` instead of `buffalo_l`. The former is a smaller and faster model, albeit not as good.
- You _must_ re-run the Recognize Faces job for all images after this for facial recognition on new images to work properly.
- If these changes are not enough, see [below](/docs/FAQ.md#how-can-i-disable-machine-learning) for how you can disable machine learning.
### How can I disable machine learning?
:::info
Disabling machine learning will result in a poor experience for searching and the 'Explore' page, as these are reliant on it to work as intended.
:::
Machine learning can be disabled under Settings > Machine Learning Settings, either entirely or by model type. For instance, you can choose to disable smart search with CLIP, but keep facial recognition enabled. This means that the machine learning service will only process the enabled jobs.
However, disabling all jobs will not disable the machine learning service itself. To prevent it from starting up at all in this case, you can comment out the `immich-machine-learning` section of the docker-compose.yml.
### I'm getting errors about models being corrupt or failing to download. What do I do?
You can delete the model cache volume, which is where models are downloaded. This will give the service a clean environment to download the model again.
### What happens to existing files after I choose a new [Storage Template](/docs/administration/storage-template.mdx)?
Template changes will only apply to new assets. To retroactively apply the template to previously uploaded assets, run the Storage Migration Job, available on the [Jobs](/docs/administration/jobs.md) page.
### In the uploads folder, why are photos stored in the wrong date?
This is fixed by running the storage migration job.
### Why are there so many thumbnail generation jobs?
Immich generates three thumbnails for each asset (blurred, small, and large), as well as a thumbnail for each recognized face.
### How can I see Immich logs?
Most Immich components are typically deployed using docker. To see logs for deployed docker containers, you can use the [Docker CLI](https://docs.docker.com/engine/reference/commandline/cli/), specifically the `docker logs` command. For examples, see [Docker Help](/docs/guides/docker-help.md)
### How can I run Immich as a non-root user?
1. Set the `PUID`/`PGID` environment variables (in `.env`).
2. Set the corresponding `user` argument in `docker-compose` for each service.
3. Add an additional volume to `immich-microservices` that mounts internally to `/usr/src/app/.reverse-geocoding-dump`.
The non-root user/group needs read/write access to the volume mounts, including `UPLOAD_LOCATION`.
### How can I reset the admin password?
The admin password can be reset by running the [reset-admin-password](/docs/administration/server-commands.md) command on the immich-server.
### How can I backup data from Immich?
See [backup and restore](/docs/administration/backup-and-restore.md).
### How can I **purge** data from Immich?
Data for Immich comes in two forms:
1. **Metadata** stored in a postgres database, persisted via the `pg_data` volume
2. **Files** (originals, thumbs, profile, etc.), stored in the `UPLOAD_LOCATION` folder.
To remove the **Metadata** you can stop Immich and delete the volume.
```bash title="Remove Immich (containers and volumes)"
docker-compose down -v
```
After removing the containers and volumes, the **Files** can be cleaned up (if necessary) from the `UPLOAD_LOCATION` by simply deleting an unwanted files or folders.
### How can I move all data (photos, persons, albums) from one user to another?
This requires some database queries. You can do this on the command line (in the PostgreSQL container using the psql command), or you can add for example an [Adminer](https://www.adminer.org/) container to the `docker-compose.yml` file, so that you can use a web-interface.
:::warning
This is an advanced operation. If you can't to do it with the steps described here, this is not for you.
:::
1. **MAKE A BACKUP** - See [backup and restore](/docs/administration/backup-and-restore.md).
2. Find the id of both the 'source' and the 'destination' user (it's the id column in the users table)
3. Three tables need to be updated:
```sql
// reassign albums
update albums set "ownerId" = '<destinationId>' where "ownerId" = '<sourceId>';
// reassign people
update person set "ownerId" = '<destinationId>' where "ownerId" = '<sourceId>';
// reassign assets
update assets set "ownerId" = '<destinationId>' where "ownerId" = '<sourceId>'
and checksum not in (select checksum from assets where "ownerId" = '<destinationId>');
```
4. There might be left-over assets in the 'source' user's library if they are skipped by the last query because of duplicate checksums. These are probably duplicates anyway, and can probably be removed.

View File

@@ -1,311 +0,0 @@
# FAQ
## User
### How can I reset the admin password?
The admin password can be reset by running the [reset-admin-password](/docs/administration/server-commands.md) command on the immich-server.
### How can I see list of all users in Immich?
You can see the list of all users by running [list-users](/docs/administration/server-commands.md) Command on the Immich-server.
---
## Mobile App
### What is the difference between the cloud icons on the mobile app?
| Icon | Description |
| ---------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------- |
| ![cloud](/img/cloud.svg) | Asset is only available in the cloud and was uploaded from some other device (like the web client) or was deleted from this device after upload |
| ![cloud-cross](/img/cloud-off.svg) | Asset is only available locally and has not yet been backed up |
| ![cloud-done](/img/cloud-done.svg) | Asset was uploaded from this device and is now backed up to the server; the original file is still on the device |
### I cannot log into the application after an update. What can I do?
First, verify that the mobile app and server are both running the same version (major and minor).
:::note
App store updates sometimes take longer because the stores (play store; Google and app store; Apple)
need to approve the update first which may take some time.
:::
If you still cannot login to the app, try the following:
- Check the mobile logs
- Make sure login credentials are correct by logging in on the web app
---
## Assets
### Can I add my existing photo library?
Yes, with an [External Library](/docs/features/libraries.md).
### What happens to existing files after I choose a new [Storage Template](/docs/administration/storage-template.mdx)?
Template changes will only apply to _new_ assets. To retroactively apply the template to previously uploaded assets, run the Storage Migration Job, available on the [Jobs](/docs/administration/jobs.md) page.
### Why are only photos and not videos being uploaded to Immich?
This often happens when using a reverse proxy (such as nginx or Cloudflare tunnel) in front of Immich. Make sure to set your reverse proxy to allow large `POST` requests. In `nginx`, set `client_max_body_size 50000M;` or similar. Also check the disk space of your reverse proxy, in some cases proxies cache requests to disk before passing them on, and if disk space runs out the request fails.
### Why are some photos stored in the file system with the wrong date?
There are a few different scenarios that can lead to this situation. The solution is to simply run the storage migration job again. The job is only _automatically_ run once per asset, after upload. If metadata extraction originally failed, the jobs were cleared/cancelled, etc. the job may not have run automatically the first time.
### How can I hide photos from the timeline?
You can _archive_ them.
### How can I backup data from Immich?
See [Backup and Restore](/docs/administration/backup-and-restore.md).
### Does Immich support reading existing face tag metadata?
No, it currently does not.
### Does Immich support filtering of NSFW images?
No, it currently does not, but there is an [open discussion about it On Github](https://github.com/immich-app/immich/discussions/2451). You can submit a pull request or vote for the discussion.
### Why are there so many thumbnail generation jobs?
There are three thubmanil jobs for each asset:
- Blurred (thumbhash)
- Small (webp)
- Large (jpeg)
Also, there are additional jobs for person (face) thumbnails.
### What happens if an asset exists in more than one account?
There are no requirements for assets to be unique across users. If multiple users upload the same image they are processed as if they were distinct assets and jobs run and thumbnails are generated accordingly.
### How can I delete transcoded videos without deleting the original?
The transcode of an asset can be deleted by setting a transcode policy that makes it unnecessary, then running a transcoding job for that asset. This can be done on a per-asset basis by starting a transcoding job for an asset (with the _Refresh encoded videos_ button in the asset viewer options. Or, for all assets by running transcoding jobs for all assets.
To update the transcode policy, navigate to Administration > Video Transcoding Settings > Transcoding Policy and select a policy from the drop-down. This policy will determine whether an existing transcode will be deleted or overwritten in the transcoding job. If a video should be transcoded according to this policy, an existing transcode is overwritten. If not, then it is deleted.
:::note
For example, say you have existing transcodes with the policy "Videos higher than normal resolution or not in the desired format" and switch to a narrower policy: "Videos not in the desired format". If an asset was only transcoded due to its resolution, then running a transcoding job for it will now delete the existing transcode. This is because resolution is no longer part of the transcode policy and the transcode is unnecessary as a result. Likewise, if you set the policy to "Don't transcode any videos" and run transcoding jobs for all assets, this will delete all existing transcodes as they are all unnecessary.
:::
### Is it possible to compress images during backup?
No. Our golden rule is that the original assets should always be untouched, so we don't think this feature is a good fit for Immich.
### How can I move all data (photos, persons, albums) from one user to another?
This is not officially supported, but can be accomplished with some database updates. You can do this on the command line (in the PostgreSQL container using the psql command), or you can add for example an [Adminer](https://www.adminer.org/) container to the `docker-compose.yml` file, so that you can use a web-interface.
:::warning
This is an advanced operation. If you can't do it with the steps described here, this is not for you.
:::
<details>
<summary>Steps</summary>
1. **MAKE A BACKUP** - See [backup and restore](/docs/administration/backup-and-restore.md).
2. Find the id of both the 'source' and the 'destination' user (it's the id column in the users table)
3. Three tables need to be updated:
```sql
// reassign albums
UPDATE albums SET "ownerId" = '<destinationId>' WHERE "ownerId" = '<sourceId>';
// reassign people
UPDATE person SET "ownerId" = '<destinationId>' WHERE "ownerId" = '<sourceId>';
// reassign assets
UPDATE assets SET "ownerId" = '<destinationId>' WHERE "ownerId" = '<sourceId>'
AND CHECKSUM NOT IN (SELECT CHECKSUM FROM assets WHERE "ownerId" = '<destinationId>');
```
4. There might be left-over assets in the 'source' user's library if they are skipped by the last query because of duplicate checksums. These are probably duplicates anyway, and can probably be removed.
</details>
---
## Albums
### Can I keep my existing album structure while importing assets into Immich?
Yes. You can by use [Immich CLI](/docs/features/command-line-interface) along with the `--album` flag.
### Is there a way to reorder photos within an album?
No, not yet. For updates on this planned feature, follow the [GitHub discussion](https://github.com/immich-app/immich/discussions/1689),
---
## External Library
### Can I add an external library while keeping the existing albums structure?
We haven't put in an official mechanism to create albums from external libraries at the moment., but there are some [workarounds from the community](https://github.com/immich-app/immich/discussions/4279) which you can find here to help you achieve that.
### What happens to duplicates in external libraries?
Duplicate checking only exists for upload libraries, using the file hash. Furthermore, duplicate checking is not global, but _per library_. Therefore, a situation where the same file appears twice in the timeline is possible, especially for external libraries.
---
## Machine Learning
### How does smart search work?
Immich uses CLIP models, for more information about CLIP and its capabilities read about it [here](https://openai.com/research/clip).
### How does facial recognition work?
For face detection and recognition, Immich uses [InsightFace models](https://github.com/deepinsight/insightface/tree/master/model_zoo).
### How can I disable machine learning?
:::info
Disabling machine learning will result in a poor experience for searching and the 'Explore' page, as these are reliant on it to work as intended.
:::
Machine learning can be disabled under Administration > Settings > Machine Learning Settings, either entirely or by model type. For instance, you can choose to disable smart search with CLIP, but keep facial recognition enabled. This means that the machine learning service will only process the enabled jobs.
However, disabling all jobs will not disable the machine learning service itself. To prevent it from starting up at all in this case, you can comment out the `immich-machine-learning` section of the docker-compose.yml.
### I'm getting errors about models being corrupt or failing to download. What do I do?
You can delete the model cache volume, which is where models are downloaded to. This will give the service a clean environment to download the model again.
### Why did Immich decide to remove object detection?
The feature added keywords to images for metadata search, but wasn't used for smart search. Smart search made it unnecessary as it isn't limited to exact keywords. Combined with it causing crashes on some devices, using many dependencies and causing user confusion as to how search worked, it was better to remove the job altogether.
For more info see [here](https://github.com/immich-app/immich/pull/5903)
### Can I use a custom CLIP model?
No, this is not supported. Only models listed in the [Huggingface](https://huggingface.co/immich-app) are compatible. Feel free to make a feature request if there's a model not listed here that you think should be added.
### I want to be able to search in other languages besides English. How can I do that?
You can change to a multilingual model listed [here](https://huggingface.co/collections/immich-app/multilingual-clip-654eb08c2382f591eeb8c2a7) by going to Administration > Machine Learning Settings > Smart Search and replacing the name of the model. Be sure to re-run Smart Search on all assets after this change. You can then search in over 100 languages.
:::note
Feel free to make a feature request if there's a model you want to use that isn't in [Immich Huggingface list](https://huggingface.co/immich-app).
:::
### Does Immich support Facial Recognition for videos ?
This is not currently implemented, but may be in the future.
On the other hand, Immich does scan video thumbnails for faces, so it can perform recognition if the face is clear in the video thumbnail.
### Does Immich have animal recognition?
No.
### The immich_model-cache volume takes up a lot of space, what could be the problem?
If you installed several models and chose not to use some of them, it might be worth deleting the old models that are in immich_model-cache.
To do this you can run:
- `docker run -it --rm -v immich_model-cache:/mnt ubuntu bash`
- `cd mnt`
- `ls`
- and delete unused models with `rm -r <model_name>`.
---
## Performance
### Why is Immich slow on low-memory systems like the Raspberry Pi?
Immich optionally uses machine learning for several features. However, it can be too heavy to run on a Raspberry Pi. You can [mitigate](/docs/FAQ#can-i-lower-cpu-and-ram-usage) this or transfer to host Immich's machine-learning container on a [more powerful system](/docs/guides/remote-machine-learning) ,or [disable](/docs/FAQ#how-can-i-disable-machine-learning) machine learning entirely.
### Can I lower CPU and RAM usage?
The initial backup is the most intensive due to the number of jobs running. The most CPU-intensive ones are transcoding and machine learning jobs (Smart Search, Face Detection), and to a lesser extent thumbnail generation. Here are some ways to lower their CPU usage:
- Lower the job concurrency for these jobs to 1.
- Under Settings > Transcoding Settings > Threads, set the number of threads to a low number like 1 or 2.
- Under Settings > Machine Learning Settings > Facial Recognition > Model Name, you can change the facial recognition model to `buffalo_s` instead of `buffalo_l`. The former is a smaller and faster model, albeit not as good.
- You _must_ re-run the Face Detection job for all images after this for facial recognition on new images to work properly.
- If these changes are not enough, see [below](/docs/FAQ#how-can-i-disable-machine-learning) for how you can disable machine learning.
### Can I limit the amount of CPU and RAM usage?
By default, a container has no resource constraints and can use as much of a given resource as the host's kernel scheduler allows.
You can look at the [original docker docs](https://docs.docker.com/config/containers/resource_constraints/) or use this [guide](https://www.baeldung.com/ops/docker-memory-limit) to learn how to do this.
### How an I boost machine learning speed?
:::note
This advice improves throughput, not latency. This is to say that it will make Smart Search jobs process more quickly, but it won't make searching faster.
:::
You can increase throughput by increasing the job concurrency for machine learning jobs (Smart Search, Face Detection). With higher concurrency, the host will work on more assets in parallel. You can do this by navigating to Administration > Settings > Job Settings and increasing concurrency as needed.
:::danger
On a normal machine, 2 or 3 concurrent jobs can probably max the CPU, so if you're not hitting those maximums with, say, 30 jobs.
Note that storage speed and latency may quickly become the limiting factor; particularly when using HDDs.
Do not exaggerate with the amount of jobs because you're probably thoroughly overloading the server.
more info [here](https://discord.com/channels/979116623879368755/994044917355663450/1174711719994605708)
:::
### Why is Immich using so much of my CPU?
When a large amount of assets are uploaded to Immich it makes sense that the CPU and RAM will be heavily used due to machine learning work and creating image thumbnails after that, the percentage of CPU usage will drop to around 3-5% usage
---
## Docker
### How can I see Immich logs?
Most Immich components are typically deployed using docker. To see logs for deployed docker containers, you can use the [Docker CLI](https://docs.docker.com/engine/reference/commandline/cli/), specifically the `docker logs` command. For examples, see [Docker Help](/docs/guides/docker-help.md).
### How can I run Immich as a non-root user?
1. Set the `PUID`/`PGID` environment variables (in `.env`).
2. Set the corresponding `user` argument in `docker-compose` for each service.
3. Add an additional volume to `immich-microservices` that mounts internally to `/usr/src/app/.reverse-geocoding-dump`.
The non-root user/group needs read/write access to the volume mounts, including `UPLOAD_LOCATION`.
### How can I **purge** data from Immich?
Data for Immich comes in two forms:
1. **Metadata** stored in a postgres database, persisted via the `pg_data` volume
2. **Files** (originals, thumbs, profile, etc.), stored in the `UPLOAD_LOCATION` folder.
To remove the **Metadata** you can stop Immich and delete the volume.
```bash title="Remove Immich (containers and volumes)"
docker compose down -v
```
After removing the containers and volumes, the **Files** can be cleaned up (if necessary) from the `UPLOAD_LOCATION` by simply deleting any unwanted files or folders.
### Why does the machine learning service report workers crashing?
:::note
If the error says the worker is exiting, then this is normal. This is a feature intended to reduce RAM consumption when the service isn't being used.
:::
There are a few reasons why this can happen.
If the error mentions SIGKILL or error code 137, it most likely means the service is running out of memory. Consider either increasing the server's RAM or moving the service to a server with more RAM.
If it mentions SIGILL (note the lack of a K) or error code 132, it most likely means your server's CPU is incompatible. This is unlikely to occur on version 1.92.0 or later. Consider upgrading if your version of Immich is below that.
If your version of Immich is below 1.92.0 and the crash occurs after logs about tracing or exporting a model, consider either upgrading or disabling the Tag Objects job.

View File

@@ -1,7 +1,5 @@
# Backup and Restore
A [3-2-1 backup strategy](https://www.backblaze.com/blog/the-3-2-1-backup-strategy/) is recommended to protect your data. You should keep copies of your uploaded photos/videos as well as the Immich database for a comprehensive backup solution. This page provides an overview on how to backup the database and the location of user-uploaded pictures and videos. A template bash script that can be run as a cron job is provided [here](/docs/guides/template-backup-script.md)
## Database
:::caution
@@ -66,34 +64,3 @@ Immich stores two types of content in the filesystem: (1) original, unmodified c
1. `UPLOAD_LOCATION/library`
1. `UPLOAD_LOCATION/upload`
1. `UPLOAD_LOCATION/profile`
**1. User-Specific Folders:**
- Each user has a unique string representing them.
- The main user is "Admin" (but only for `\library\library\`)
- Other users have different string identifiers.
- You can find your user ID in Account Account Settings > Account > User ID.
**2. Asset Types and Storage Locations:**
- **Source Assets:**
- Original assets uploaded through the browser interface&mobile&CLI.
- Stored in `\library\library\<userID>`.
- **Avatar Images:**
- User profile images.
- Stored in `\library\profile\<userID>`.
- **Thumbs Images:**
- Preview images (blurred, small, large) for each asset and thumbnails for recognized faces.
- Stored in `\library\thumbs\<userID>`.
- **Encoded Assets:**
- By default, unless otherwise specified re-encoded video assets for wider compatibility .
- Stored in `\library\encoded-video\<userID>`.
- **Files in Upload Queue (Mobile):**
- Files uploaded through mobile apps.
- Temporarily located in `\library\upload\<userID>`.
- Transferred to `\library\library\<userID>` upon successful upload.
:::danger
Do not touch the files inside these folders under any circumstances except taking a backup, changing or removing an asset can cause untracked and missing files.
You can think of it as App-Which-Must-Not-Be-Named, the only access to viewing, changing and deleting assets is only through the mobile or browser interface.
:::

View File

@@ -1,4 +1,4 @@
import StorageTemplate from '/docs/partials/_storage-template.md';
import StorageTemplate from '../partials/_storage-template.md';
# Storage Template

View File

@@ -1,5 +1,5 @@
import RegisterAdminUser from '/docs/partials/_register-admin.md';
import UserCreate from '/docs/partials/_user-create.md';
import RegisterAdminUser from '../partials/_register-admin.md';
import UserCreate from '../partials/_user-create.md';
# User Management

View File

@@ -107,4 +107,4 @@ See [Database Migrations](./database-migrations.md) for more information about h
### Redis
Immich uses [Redis](https://redis.com/) via [BullMQ](https://docs.bullmq.io/) to manage job queues. Some jobs trigger subsequent jobs. For example, Smart Search and Facial Recognition relies on thumbnail generation and automatically run after one is generated.
Immich uses [Redis](https://redis.com/) via [BullMQ](https://docs.bullmq.io/) to manage job queues. Some jobs trigger subsequent jobs. For example, object detection relies on thumbnail generation and automatically run after one is generated.

View File

@@ -1,6 +1,6 @@
# Database Migrations
After making any changes in the `server/src/infra/entities`, a database migration need to run in order to register the changes in the database. Follow the steps below to create a new migration.
After making any changes in the `server/src/infra/database/entities`, a database migration need to run in order to register the changes in the database. Follow the steps below to create a new migration.
1. Run the command

View File

@@ -13,5 +13,5 @@ npm run api:generate # Run from the `server/` directory
You can find the generated client SDK in the `web/src/api` for Typescript SDK and `mobile/openapi` for Dart SDK.
:::tip
This can also be run via `make open-api` from the project root directory (not in the `server` folder)
This can also be run via `make api` from the project root directory (not in the `server` folder)
:::

View File

@@ -6,7 +6,7 @@ When contributing code through a pull request, please check the following:
- [ ] `npm run lint` (linting via ESLint)
- [ ] `npm run format` (formatting via Prettier)
- [ ] `npm run check:svelte` (Type checking via SvelteKit)
- [ ] `npm run check` (Type checking via SvelteKit)
- [ ] `npm test` (Tests via Jest)
:::tip

View File

@@ -4,16 +4,6 @@ sidebar_position: 2
# Setup
:::note
If there's a feature you're planning to work on, just give us a heads up in [Discord](https://discord.com/channels/979116623879368755/1071165397228855327) so we can:
1. Let you know if it's something we would accept into Immich
2. Provide any guidance on how something like that would ideally be implemented
3. Ensure nobody is already working on that issue/feature so we don't duplicate effort
Thanks for being interested in contributing 😊
:::
## Environment
### Server and web app
@@ -49,7 +39,7 @@ You can access the web from `http://your-machine-ip:2283` or `http://localhost:2
### Mobile app
The mobile app `(/mobile)` will required Flutter toolchain 3.13.x to be installed on your system.
The mobile app `(/mobile)` will required Flutter toolchain to be installed on your system.
Please refer to the [Flutter's official documentation](https://flutter.dev/docs/get-started/install) for more information on setting up the toolchain on your machine.

View File

@@ -10,8 +10,8 @@ Unit are run by calling `npm run test` from the `server` directory.
The backend has an end-to-end test suite that can be called with `npm run test:e2e` from the `server` directory. This will set up a dummy database inside a temporary container and run the tests against it. Setup and teardown is automatically taken care of. That test, however, can not set up all prerequisites to parse file formats, as that is very complex and error-prone. As such, this test excludes some test cases like HEIC file imports. The test suite will also print a friendly warning to remind you that not all tests are being run.
Note that there is a bug in nodejs \<20.8 that causes segmentation faults when running these tests. If you run into segfaults, ensure you are using at least version 20.8.
Note that there is a bug in nodejs <20.8 that causes segmentation faults when running these tests. If you run into segfaults, ensure you are using at least version 20.8.
To perform a full e2e test, you need to run e2e tests inside docker. The easiest way to do that is to run `make test-e2e` in the root directory. This will build and start a docker-compose consisting of the server, microservices, and a postgres database. It will then perform the tests and exit.
If you manually install the dependencies (see the DOCKERFILE) on your development machine, you can also run the full e2e tests manually by setting the `IMMICH_RUN_ALL_TESTS` environment value to true, i.e. `IMMICH_RUN_ALL_TESTS=true npm run e2e:jobs`.
If you manually install the dependencies (see the DOCKERFILE) on your development machine, you can also run the full e2e tests manually by setting the `IMMICH_RUN_ALL_TESTS` environment value to true, i.e. `IMMICH_RUN_ALL_TESTS=true npm run test:e2e`.

View File

@@ -25,6 +25,6 @@ Additional actions you can do with a detected person are:
- Merge two or more detected faces into one person
- Hide face
It can be found from the app bar when you access the detail view of a person.
It can be found from the app bar when you access the detial view of a person
<img src={require('./img/facial-recognition-4.png').default} title='Facial Recognition 4' width="70%"/>

View File

@@ -43,28 +43,12 @@ As this is a new feature, it is still experimental and may not work on all syste
## Setup
#### Initial Setup
1. If you do not already have it, download the latest [`hwaccel.yml`][hw-file] file and ensure it's in the same folder as the `docker-compose.yml`.
2. Uncomment the lines that apply to your system and desired usage.
3. In the `docker-compose.yml` under `immich-microservices`, uncomment the lines relating to the `hwaccel.yml` file.
4. Redeploy the `immich-microservices` container with these updated settings.
5. In the Admin page under `FFmpeg settings`, change the hardware acceleration setting to the appropriate option and save.
#### All-In-One - Unraid Setup
##### NVENC - NVIDIA GPUs
- If you are using other backends. You will still need to implement [`hwaccel.yml`][hw-file] file into the `immich-microservices` service directly, please see the "Initial Setup" section above on how to do that.
- As of v1.92.0, steps 1 and 2 are no longer necessary. If your version of Immich is below that or missing the environment variables, please follow these steps. Otherwise, skip to step 3.
- Please note that`NVIDIA_DRIVER_CAPABILITIES` is no longer required to enter as a variable.
1. Assuming you already have the Nvidia Driver Plugin installed on your Unraid Server. Please confirm that your Nvida GPU is showing up with its GPU ID in the Nvidia Driver Plugin. The ID will be `GPU-LONG_STRING_OF_CHARACTERS`. Copy the GPU ID.
2. In the Imagegenius/Immich Docker Container app, add two new variables: Key=`NVIDIA_VISIBLE_DEVICES` Value=`GPU-LONG_STRING_OF_CHARACTERS` and Key=`NVIDIA_DRIVER_CAPABILITIES` Value=`all`
3. While you are in the docker container app, change the Container from Basic Mode to Advanced Mode and add the following parameter to the Extra Parameters field: `--runtime=nvidia`
4. Restart the Imagegenius/Immich Docker Container app.
5. In the Admin page under FFmpeg settings, change the hardware acceleration setting to the appropriate option and save.
## Tips
- You may want to choose a slower preset than for software transcoding to maintain quality and efficiency

View File

@@ -34,11 +34,9 @@ If you add assets from an external library to an album and then move the asset t
### Deleted External Assets
Note: Either a manual or scheduled library scan must have been performed to identify offline assets before this process will work.
In all above scan methods, Immich will check if any files are missing. This can happen if files are deleted, or if they are on a storage location that is currently unavailable, like a network drive that is not mounted, or a USB drive that has been unplugged. In order to prevent accidental deletion of assets, Immich will not immediately delete an asset from the library if the file is missing. Instead, the asset will be internally marked as offline and will still be visible in the main timeline. If the file is moved back to its original location and the library is scanned again, the asset will be restored.
Finally, files can be deleted from Immich via the `Remove Offline Files` job. This job can be found by the three dots menu for the associated external storage that was configured under user account settings > libraries (the same location described at [create external libraries](#create-external-libraries)). When this job is run, any assets marked as offline will then be removed from Immich. Run this job whenever files have been deleted from the file system and you want to remove them from Immich.
Finally, files can be deleted from Immich via the `Remove Offline Files` job. Any assets marked as offline will then be removed from Immich. Run this job whenever files have been deleted from the file system and you want to remove them from Immich. Note that a library scan must be performed first to mark the assets as offline.
### Import Paths
@@ -53,7 +51,6 @@ Sometimes, an external library will not scan correctly. This can happen if the i
- Are the volumes identical between the `server` and `microservices` container?
- Are the import paths set correctly, and do they match the path set in docker-compose file?
- Are the permissions set correctly?
- Are you using forward slashes everywhere? (`/`)
If all else fails, you can always start a shell inside the container and check if the path is accessible. For example, `docker exec -it immich_microservices /bin/bash` will start a bash shell. If your import path, for instance, is `/data/import/photos`, you can check if the files are accessible by running `ls /data/import/photos`. Also check the `immich_server` container in the same way.
@@ -105,7 +102,6 @@ First, we need to plan how we want to organize the libraries. The christmas trip
+ - /mnt/nas/christmas-trip:/mnt/media/christmas-trip:ro
+ - /home/user/old-pics:/mnt/media/old-pics:ro
+ - /mnt/media/videos:/mnt/media/videos:ro
+ - "C:/Users/user_name/Desktop/my media:/mnt/media/my-media:ro" # import path in Windows system.
immich-microservices:
@@ -114,7 +110,6 @@ First, we need to plan how we want to organize the libraries. The christmas trip
+ - /mnt/nas/christmas-trip:/mnt/media/christmas-trip:ro
+ - /home/user/old-pics:/mnt/media/old-pics:ro
+ - /mnt/media/videos:/mnt/media/videos:ro
+ - "C:/Users/user_name/Desktop/my media:/mnt/media/my-media:ro" # import path in Windows system.
```
:::tip
@@ -130,14 +125,6 @@ Only an admin can do this.
- Navigate to `Administration > Users` page on the web.
- Click on the user edit button.
- Set `/mnt/media` to be the external path. This folder will only contain the three folders that we want to import, so nothing else can be accessed.
:::note
Spaces in the internal path aren't currently supported.
You must import it as:
`..:/mnt/media/my-media:ro`
instead of
`..:/mnt/media/my media:ro`
:::
### Create External Libraries

View File

@@ -1,6 +1,6 @@
import MobileAppDownload from '/docs/partials/_mobile-app-download.md';
import MobileAppLogin from '/docs/partials/_mobile-app-login.md';
import MobileAppBackup from '/docs/partials/_mobile-app-backup.md';
import MobileAppDownload from '../partials/_mobile-app-download.md';
import MobileAppLogin from '../partials/_mobile-app-login.md';
import MobileAppBackup from '../partials/_mobile-app-backup.md';
# Mobile App

View File

@@ -6,8 +6,6 @@ Smart search is powered by the [pgvecto.rs](https://github.com/tensorchord/pgvec
Metadata search (prefixed with `m:`) can search specifically by text without the use of a model.
Archived photos are not included in search results by default. To include them, add the query parameter `withArchived=true` to the url.
Some search examples:
<img src={require('./img/search-ex-2.webp').default} title='Search Example 1' />

Binary file not shown.

Before

Width:  |  Height:  |  Size: 39 KiB

View File

@@ -1,40 +0,0 @@
# Database GUI
A short guide on connecting [pgAdmin](https://www.pgadmin.org/) to Immich.
:::note
- In order to connect to the database the immich_postgres container **must be running**.
- The passwords and usernames used below match the ones specified in the example `.env` file. If changed, please use actual values instead.
:::
## 1. Install pgAdmin
Download and install [pgAdmin](https://www.pgadmin.org/download/) following the official documentation.
## 2. Add a Server
Open pgAdmin and click "Add New Server".
<img src={require('./img/add-new-server-option.png').default} width="50%" title="new server option" />
## 3. Enter Connection Details
| Name | Value |
| -------------------- | ----------- |
| Host name/address | `localhost` |
| Port | `5432` |
| Maintenance database | `immich` |
| Username | `postgres` |
| Password | `postgres` |
<img src={require('./img/Connection-Pgadmin.png').default} width="75%" title="Connection" />
## 4. Save Connection
Click on "Save" to connect to the Immich database.
:::tip
View [Database Queries](https://immich.app/docs/guides/database-queries/) for common database queries.
:::

View File

@@ -1,6 +1,6 @@
# External Library
This guide walks you through adding an [External Library](/docs/features/libraries#external-libraries).
This guide walks you through adding an [External Library](../features/libraries#external-libraries).
This guide assumes you are running Immich in Docker and that the files you wish to access are stored
in a directory on the same machine.
@@ -78,7 +78,7 @@ In the Immich web UI:
- Click \*_Add path_
<img src={require('./img/add-path-button.png').default} width="50%" title="Add Path button" />
- Enter **/usr/src/app/external** as the path and click Add
- Enter **.** as the path and click Add
<img src={require('./img/add-path-field.png').default} width="50%" title="Add Path field" />
- Save the new path

Binary file not shown.

Before

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 27 KiB

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.5 KiB

After

Width:  |  Height:  |  Size: 11 KiB

View File

@@ -1,6 +1,6 @@
# Remote Machine Learning
To alleviate [performance issues on low-memory systems](/docs/FAQ.mdx#why-is-immich-slow-on-low-memory-systems-like-the-raspberry-pi) like the Raspberry Pi, you may also host Immich's machine-learning container on a more powerful system (e.g. your laptop or desktop computer):
To alleviate [performance issues on low-memory systems](/docs/FAQ.md#why-is-immich-slow-on-low-memory-systems-like-the-raspberry-pi) like the Raspberry Pi, you may also host Immich's machine-learning container on a more powerful system (e.g. your laptop or desktop computer):
- Set the URL in Machine Learning Settings on the Admin Settings page to point to the designated ML system, e.g. `http://workstation:3003`.
- Copy the following `docker-compose.yml` to your ML system.

View File

@@ -1,7 +1,6 @@
# Remote Access
This page gives a few pointers on how to access your Immich instance from outside your LAN.
You can read the [full discussion in Discord](https://discord.com/channels/979116623879368755/1122615710846308484)
:::danger
Never forward port 2283 directly to the internet without additional configuration. This will expose the web interface via http to the internet, making you succeptible to [man in the middle](https://en.wikipedia.org/wiki/Man-in-the-middle_attack) attacks.
@@ -42,7 +41,7 @@ If you are unable to open a port on your router for Wireguard or OpenVPN to your
A reverse proxy is a service that sits between web servers and clients. A reverse proxy can either be hosted on the server itself or remotely. Clients can connect to the reverse proxy via https, and the proxy relays data to Immich. This setup makes most sense if you have your own domain and want to access your Immich instance just like any other website, from outside your LAN. You can also use a DDNS provider like DuckDNS or no-ip if you don't have a domain. This configuration allows the Immich Android and iphone apps to connect to your server without a VPN or tailscale app on the client side.
If you're hosting your own reverse proxy, [Nginx](https://docs.nginx.com/nginx/admin-guide/web-server/reverse-proxy/) is a great option. An example configuration for Nginx is provided [here](/docs/administration/reverse-proxy.md).
If you're hosting your own reverse proxy, [Nginx](https://docs.nginx.com/nginx/admin-guide/web-server/reverse-proxy/) is a great option. An example configuration for Nginx is provided [here](https://immich.app/docs/administration/reverse-proxy).
You'll also need your own certificate to authenticate https connections. If you're making Immich publicly accesible, [Let's Encrypt](https://letsencrypt.org/) can provide a free certificate for your domain and is the recommended option. Alternatively, a [self-signed certificate](https://en.wikipedia.org/wiki/Self-signed_certificate) allows you to encrypt your connection to Immich, but it raises a security warning on the client's browser.

View File

@@ -1,38 +0,0 @@
# Remove Offline Files
:::note
**Before running the script**, please make sure you have a [backup](/docs/administration/backup-and-restore) of your assets and database
:::
This page is a guide to get rid of offline files from the repair page.
This way works by downloading a JSON file that contains a list of all the files that are defined as offline files, running a script that uses the [Immich API](/docs/api/delete-assets) in order to remove the offline files.
1. Create an API key under Admin User -> Account Settings -> API Keys -> New API Key -> Copy to clipboard.
2. Download the JSON file under Administration -> repair -> Export.
3. Replace `YOUR_IP_HERE` and `YOUR_API_KEY_HERE` with your actual IP address and API key in the script.
4. Run the script in the same folder where the JSON file is located.
## Script for Linux based systems:
```
awk -F\" '/entityId/ {print $4}' orphans.json | while read line; do curl --location --request DELETE 'http://YOUR_IP_HERE:2283/api/asset' --header 'Content- Type: application/json' --header 'x-api-key: YOUR_API_KEY_HERE' --data '{ "force": true, "ids": ["'"$line"'"]}';done
```
## Script for the Windows system (run through PowerShell):
```
Get-Content orphans.json | Select-String -Pattern 'entityId' | ForEach-Object {
$line = $_ -split '"' | Select-Object -Index 3
$body = [pscustomobject]@{
'ids' = @($line)
'force' = (' true ' | ConvertFrom-Json)
} | ConvertTo-Json -Depth 3
Invoke-RestMethod -Uri 'http://YOUR_IP_HERE:2283/api/asset' -Method Delete -Headers @{
'Content-Type' = 'application/json'
'x-api-key' = 'YOUR_API_KEY_HERE'
} -Body $body
}
```
Thanks to [DooMRunneR](https://discord.com/channels/979116623879368755/1179655214870040596/1194308198413373482) for writing this script.

View File

@@ -1,82 +0,0 @@
# Backup Script
[Borg](https://www.borgbackup.org/) is a feature-rich deduplicating archiving software with built-in versioning. We provide a template bash script that can be run daily/weekly as a [cron](https://wiki.archlinux.org/title/cron) job to back up your files and database. We encourage you to read the quick-start guide for Borg before running this script.
This script assumes you have a second hard drive connected to your server for on-site backup and ssh access to a remote machine for your third off-site copy. [BorgBase](https://www.borgbase.com/) is an alternative option for off-site backups with a competitive pricing structure. You may choose to skip off-site backups entirely by removing the relevant lines from the template script.
The database is saved to your Immich upload folder in the `database-backup` subdirectory. The database is then backed up and versioned with your assets by Borg. This ensures that the database backup is in sync with your assets in every snapshot.
### Prerequisites
- Borg needs to be installed on your server as well as the remote machine. You can find instructions to install Borg [here](https://borgbackup.readthedocs.io/en/latest/installation.html).
- To run this sript as a non-root user, you should [add your username to the docker group](https://docs.docker.com/engine/install/linux-postinstall/).
- To run this script non-interactively, set up [passwordless ssh](https://www.redhat.com/sysadmin/passwordless-ssh) to your remote machine from your server.
To initialize the borg repository, run the following commands once.
```bash title='Borg set-up'
UPLOAD_LOCATION="/path/to/immich/directory" # Immich database location, as set in your .env file
BACKUP_PATH="/path/to/local/backup/directory"
mkdir "$UPLOAD_LOCATION/database-backup"
mkdir "$BACKUP_PATH/immich-borg"
borg init --encryption=none "$BACKUP_PATH/immich-borg"
## Remote set up
REMOTE_HOST="remote_host@IP"
REMOTE_BACKUP_PATH="/path/to/remote/backup/directory"
ssh "$REMOTE_HOST" "mkdir $REMOTE_BACKUP_PATH/immich-borg"
ssh "$REMOTE_HOST" "borg init --encryption=none $REMOTE_BACKUP_PATH/immich-borg"
```
Edit the following script as necessary and add it to your crontab. Note that this script assumes there are no spaces in your paths. If there are spaces, enclose the paths in double quotes.
```bash title='Borg backup template'
#!/bin/sh
# Paths
UPLOAD_LOCATION="/path/to/immich/directory"
BACKUP_PATH="/path/to/local/backup/directory"
REMOTE_HOST="remote_host@IP"
REMOTE_BACKUP_PATH="/path/to/remote/backup/directory"
### Local
# Backup Immich database
docker exec -t immich_postgres pg_dumpall -c -U postgres | /usr/bin/gzip > $UPLOAD_LOCATION/database-backup/immich-database.sql.gz
### Append to local Borg repository
borg create $BACKUP_PATH/immich-borg::{now} $UPLOAD_LOCATION --exclude $UPLOAD_LOCATION/thumbs/ --exclude $UPLOAD_LOCATION/encoded-video/
borg prune --keep-weekly=4 --keep-monthly=3 $BACKUP_PATH/immich-borg
borg compact $BACKUP_PATH/immich-borg
### Append to remote Borg repository
borg create $REMOTE_HOST:$REMOTE_BACKUP_PATH/immich-borg::{now} $UPLOAD_LOCATION --exclude $UPLOAD_LOCATION/thumbs/ --exclude $UPLOAD_LOCATION/encoded-video/
borg prune --keep-weekly=4 --keep-monthly=3 $REMOTE_HOST:$REMOTE_BACKUP_PATH/immich-borg
borg compact $REMOTE_HOST:$REMOTE_BACKUP_PATH/immich-borg
```
### Restoring
To restore from a backup, use the `borg mount` command.
```bash title='Restore from local backup'
BACKUP_PATH="/path/to/local/backup/directory"
mkdir /tmp/immich-mountpoint
borg mount $BACKUP_PATH/immich-borg /tmp/immich-mountpoint
cd /tmp/immich-mountpoint
```
```bash title='Restore from remote backup'
REMOTE_HOST="remote_host@IP"
REMOTE_BACKUP_PATH="/path/to/remote/backup/directory"
mkdir /tmp/immich-mountpoint
borg mount $REMOTE_HOST:$REMOTE_BACKUP_PATH/immich-borg /tmp/immich-mountpoint
cd /tmp/immich-mountpoint
```
You can find available snapshots in seperate sub-directories at `/tmp/immich-mountpoint`. Restore the files you need, and unmount the Borg repository using `borg umount /tmp/immich-mountpoint`

View File

@@ -79,7 +79,7 @@ The default configuration looks like this:
"modelName": "buffalo_l",
"minScore": 0.7,
"maxDistance": 0.6,
"minFaces": 3
"minFaces": 1
}
},
"map": {
@@ -142,4 +142,4 @@ So you can just grab it from there, paste it into a file and you're pretty much
### Step 2 - Specify the file location
In your `.env` file, set the variable `IMMICH_CONFIG_FILE` to the path of your config.
For more information, refer to the [Environment Variables](/docs/install/environment-variables.md) section.
For more information, refer to the [Environment Variables](https://docs.immich.app/docs/install/environment-variables) section.

View File

@@ -30,15 +30,15 @@ These environment variables are used by the `docker-compose.yml` file and do **N
## General
| Variable | Description | Default | Services |
| :------------------------------ | :------------------------------------------- | :------------------: | :------------------------------------------- |
| `TZ` | Timezone | | microservices |
| `NODE_ENV` | Environment (production, development) | `production` | server, microservices, machine learning, web |
| `LOG_LEVEL` | Log Level (verbose, debug, log, warn, error) | `log` | server, microservices |
| `IMMICH_MEDIA_LOCATION` | Media Location | `./upload` | server, microservices |
| `IMMICH_CONFIG_FILE` | Path to config file | | server |
| `IMMICH_WEB_ROOT` | Path of root index.html | `/usr/src/app/www` | server |
| `IMMICH_REVERSE_GEOCODING_ROOT` | Path of reverse geocoding dump directory | `/usr/src/resources` | microservices |
| Variable | Description | Default | Services |
| :-------------------------- | :------------------------------------------- | :-----------------: | :------------------------------------------- |
| `TZ` | Timezone | | microservices |
| `NODE_ENV` | Environment (production, development) | `production` | server, microservices, machine learning, web |
| `LOG_LEVEL` | Log Level (verbose, debug, log, warn, error) | `log` | server, microservices |
| `IMMICH_MEDIA_LOCATION` | Media Location | `./upload` | server, microservices |
| `PUBLIC_LOGIN_PAGE_MESSAGE` | Public Login Page Message | | web |
| `IMMICH_CONFIG_FILE` | Path to config file | | server |
| `IMMICH_WEB_ROOT` | Path of root index.html | `/usr/src/app/www'` | server |
:::tip
@@ -48,6 +48,12 @@ These environment variables are used by the `docker-compose.yml` file and do **N
:::
## Geocoding
| Variable | Description | Default | Services |
| :--------------------------------- | :------------------------------- | :--------------------------: | :------------ |
| `REVERSE_GEOCODING_DUMP_DIRECTORY` | Reverse Geocoding Dump Directory | `./.reverse-geocoding-dump/` | microservices |
## Ports
| Variable | Description | Default | Services |
@@ -121,16 +127,16 @@ Redis (Sentinel) URL example JSON before encoding:
## Machine Learning
| Variable | Description | Default | Services |
| :----------------------------------------------- | :----------------------------------------------------------------- | :-----------------: | :--------------- |
| `MACHINE_LEARNING_MODEL_TTL` | Inactivity time (s) before a model is unloaded (disabled if \<= 0) | `300` | machine learning |
| `MACHINE_LEARNING_MODEL_TTL_POLL_S` | Interval (s) between checks for the model TTL (disabled if \<= 0) | `10` | machine learning |
| `MACHINE_LEARNING_CACHE_FOLDER` | Directory where models are downloaded | `/cache` | machine learning |
| `MACHINE_LEARNING_REQUEST_THREADS`<sup>\*1</sup> | Thread count of the request thread pool (disabled if \<= 0) | number of CPU cores | machine learning |
| `MACHINE_LEARNING_MODEL_INTER_OP_THREADS` | Number of parallel model operations | `1` | machine learning |
| `MACHINE_LEARNING_MODEL_INTRA_OP_THREADS` | Number of threads for each model operation | `2` | machine learning |
| `MACHINE_LEARNING_WORKERS`<sup>\*2</sup> | Number of worker processes to spawn | `1` | machine learning |
| `MACHINE_LEARNING_WORKER_TIMEOUT` | Maximum time (s) of unresponsiveness before a worker is killed | `120` | machine learning |
| Variable | Description | Default | Services |
| :----------------------------------------------- | :---------------------------------------------------------------- | :-----------------: | :--------------- |
| `MACHINE_LEARNING_MODEL_TTL` | Inactivity time (s) before a model is unloaded (disabled if <= 0) | `300` | machine learning |
| `MACHINE_LEARNING_MODEL_TTL_POLL_S` | Interval (s) between checks for the model TTL (disabled if <= 0) | `10` | machine learning |
| `MACHINE_LEARNING_CACHE_FOLDER` | Directory where models are downloaded | `/cache` | machine learning |
| `MACHINE_LEARNING_REQUEST_THREADS`<sup>\*1</sup> | Thread count of the request thread pool (disabled if <= 0) | number of CPU cores | machine learning |
| `MACHINE_LEARNING_MODEL_INTER_OP_THREADS` | Number of parallel model operations | `1` | machine learning |
| `MACHINE_LEARNING_MODEL_INTRA_OP_THREADS` | Number of threads for each model operation | `2` | machine learning |
| `MACHINE_LEARNING_WORKERS`<sup>\*2</sup> | Number of worker processes to spawn | `1` | machine learning |
| `MACHINE_LEARNING_WORKER_TIMEOUT` | Maximum time (s) of unresponsiveness before a worker is killed | `120` | machine learning |
\*1: It is recommended to begin with this parameter when changing the concurrency levels of the machine learning service and then tune the other ones.

View File

@@ -2,12 +2,12 @@
sidebar_position: 90
---
import RegisterAdminUser from '/docs/partials/_register-admin.md';
import UserCreate from '/docs/partials/_user-create.md';
import StorageTemplate from '/docs/partials/_storage-template.md';
import MobileAppDownload from '/docs/partials/_mobile-app-download.md';
import MobileAppLogin from '/docs/partials/_mobile-app-login.md';
import MobileAppBackup from '/docs/partials/_mobile-app-backup.md';
import RegisterAdminUser from '../partials/_register-admin.md';
import UserCreate from '../partials/_user-create.md';
import StorageTemplate from '../partials/_storage-template.md';
import MobileAppDownload from '../partials/_mobile-app-download.md';
import MobileAppLogin from '../partials/_mobile-app-login.md';
import MobileAppBackup from '../partials/_mobile-app-backup.md';
# Post Install Steps

View File

@@ -1,12 +0,0 @@
---
sidebar_position: 2
---
# Comparison
If you're new here and came from other photo self-hosting alternatives you might want to look at a comparison between Immich and your current self-hosting.
Here you can see a [comparison between the various OpenSource Photo Libraries](https://meichthys.github.io/foss_photo_libraries/) including Immich.
:::note
It is important to remember, Immich is under very active development. Expect bugs and changes. Do not use it as the only way to store your photos and videos!
:::

View File

@@ -1,12 +1,12 @@
---
sidebar_position: 6
sidebar_position: 5
---
# Help Me!
Running into an issue or have a question? Try the following:
1. Check the [FAQs](/docs/FAQ.mdx).
1. Check the [FAQs](/docs/FAQ.md).
2. Read through the [Release Notes][github-releases].
3. Search through existing [GitHub Issues][github-issues].
4. Open a help ticket on [Discord][discord-link].

View File

@@ -1,5 +1,5 @@
---
sidebar_position: 4
sidebar_position: 3
---
# Logo

View File

@@ -1,5 +1,5 @@
---
sidebar_position: 3
sidebar_position: 2
---
# Quick Start
@@ -10,11 +10,11 @@ to install and use it.
## Requirements
Check the [requirements page](/docs/install/requirements) to get started.
Check the [requirements page](../install/requirements) to get started.
## Install and launch via Docker Compose
Follow the [Docker Compose (Recommended)](/docs/install/docker-compose) instructions
Follow the [Docker Compose (Recommended)](../install/docker-compose) instructions
to install the server.
- Where random passwords are required, `pwgen` is a handy utility.
@@ -24,7 +24,7 @@ to install the server.
## Try the Web UI
import RegisterAdminUser from '/docs/partials/_register-admin.md';
import RegisterAdminUser from '../partials/_register-admin.md';
<RegisterAdminUser />
@@ -36,13 +36,13 @@ Try uploading a picture from your browser.
### Download the Mobile App
import MobileAppDownload from '/docs/partials/_mobile-app-download.md';
import MobileAppDownload from '../partials/_mobile-app-download.md';
<MobileAppDownload />
### Login to the Mobile App
import MobileAppLogin from '/docs/partials/_mobile-app-login.md';
import MobileAppLogin from '../partials/_mobile-app-login.md';
<MobileAppLogin />
@@ -50,7 +50,7 @@ In the mobile app, you should see the photo you uploaded from the web UI.
### Transfer Photos from your Mobile Device
import MobileAppBackup from '/docs/partials/_mobile-app-backup.md';
import MobileAppBackup from '../partials/_mobile-app-backup.md';
<MobileAppBackup />
@@ -59,13 +59,7 @@ take quite a while.
You can select the Jobs tab to see Immich processing your photos.
<img src={require('/docs/guides/img/jobs-tab.png').default} title="Jobs tab" />
## Set up your backups
You may want to back up the content of your Immich instance
along with other parts of your server; be sure to read about
[database backup](/docs/administration/backup-and-restore).
<img src={require('../guides/img/jobs-tab.png').default} title="Jobs tab" />
## Where to go from here?
@@ -77,11 +71,15 @@ even those not on your mobile device, via Google Takeout.
You can use [immich-go](https://github.com/simulot/immich-go) for this.
You may want to
[upload photos from your own archive](/docs/features/command-line-interface).
[upload photos from your own archive](../features/command-line-interface).
You may want to incorporate an immutable archive of photos from an
[External Library](/docs/features/libraries#external-libraries);
there's a [Guide](/docs/guides/external-library) for that.
[External Library](../features/libraries#external-libraries);
there's a [Guide](../guides/external-library) for that.
You may want your mobile device to
[back photos up to your server automatically](/docs/features/automatic-backup).
[back photos up to your server automatically](../features/automatic-backup).
You may want to back up the content of your Immich instance
along with other parts of your server; be sure to read about
[database backup](../administration/backup-and-restore).

View File

@@ -1,5 +1,5 @@
---
sidebar_position: 5
sidebar_position: 4
---
# Support The Project

View File

@@ -1,7 +1,8 @@
// @ts-check
// Note: type annotations allow type checking and IDEs autocompletion
const prism = require('prism-react-renderer');
const lightCodeTheme = require('prism-react-renderer/themes/github');
const darkCodeTheme = require('prism-react-renderer/themes/dracula');
/** @type {import('@docusaurus/types').Config} */
const config = {
@@ -55,7 +56,7 @@ const config = {
editUrl: 'https://github.com/immich-app/immich/tree/main/docs/',
},
api: {
path: '../open-api/immich-openapi-specs.json',
path: '../server/immich-openapi-specs.json',
routeBasePath: '/docs/api',
},
// blog: {
@@ -164,8 +165,8 @@ const config = {
copyright: `Immich is available as open source under the terms of the MIT License.`,
},
prism: {
theme: prism.themes.github,
darkTheme: prism.themes.dracula,
theme: lightCodeTheme,
darkTheme: darkCodeTheme,
additionalLanguages: ['sql'],
},
image: 'overview/img/feature-panel.png',

19479
docs/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -13,31 +13,32 @@
"clear": "docusaurus clear",
"serve": "docusaurus serve",
"write-translations": "docusaurus write-translations",
"write-heading-ids": "docusaurus write-heading-ids"
"write-heading-ids": "docusaurus write-heading-ids",
"check": "tsc"
},
"dependencies": {
"@docusaurus/core": "^3.1.0",
"@docusaurus/preset-classic": "^3.1.0",
"@docusaurus/core": "^2.4.3",
"@docusaurus/preset-classic": "^2.4.3",
"@mdi/js": "^7.3.67",
"@mdi/react": "^1.6.1",
"@mdx-js/react": "^3.0.0",
"autoprefixer": "^10.4.17",
"@mdx-js/react": "^1.6.22",
"autoprefixer": "^10.4.13",
"classnames": "^2.3.2",
"clsx": "^2.0.0",
"docusaurus-lunr-search": "^3.3.2",
"docusaurus-preset-openapi": "^0.7.3",
"docusaurus-lunr-search": "^2.3.2",
"docusaurus-preset-openapi": "^0.6.3",
"postcss": "^8.4.25",
"prism-react-renderer": "^2.3.1",
"prism-react-renderer": "^1.3.5",
"raw-loader": "^4.0.2",
"react": "^18.0.0",
"react-dom": "^18.0.0",
"react": "^17.0.2",
"react-dom": "^17.0.2",
"tailwindcss": "^3.2.4",
"url": "^0.11.0"
},
"devDependencies": {
"@docusaurus/module-type-aliases": "^3.1.0",
"@tsconfig/docusaurus": "^2.0.2",
"prettier": "^3.2.4",
"@docusaurus/module-type-aliases": "^2.4.1",
"@tsconfig/docusaurus": "^1.0.5",
"prettier": "^3.0.0",
"typescript": "^5.1.6"
},
"browserslist": {

View File

@@ -4,6 +4,6 @@
"compilerOptions": {
"baseUrl": ".",
"module": "Node16",
},
"module": "Node16"
}
}

View File

@@ -1,4 +1,4 @@
FROM python:3.11-bookworm@sha256:497c00ec2cff14316a6859c4e30fc88e7ab1f11dd254fb43b8f4b201ca657596 as builder
FROM python:3.11-bookworm@sha256:ba7a7ac30c38e119c4304f98ef0e188f90f4f67a958bb6899da9defb99bfb471 as builder
ENV PYTHONDONTWRITEBYTECODE=1 \
PYTHONUNBUFFERED=1 \
@@ -13,40 +13,17 @@ ENV VIRTUAL_ENV="/opt/venv" PATH="/opt/venv/bin:${PATH}"
COPY poetry.lock pyproject.toml ./
RUN poetry install --sync --no-interaction --no-ansi --no-root --only main
FROM python:3.11-slim-bookworm@sha256:cfd7ed5c11a88ce533d69a1da2fd932d647f9eb6791c5b4ddce081aedf7f7876
ARG TARGETPLATFORM
ENV ARMNN_PATH=/opt/armnn
COPY ann /opt/ann
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
mkdir /opt/armnn && \
curl -SL "https://github.com/ARM-software/armnn/releases/download/v23.11/ArmNN-linux-aarch64.tar.gz" | tar -zx -C /opt/armnn && \
cd /opt/ann && \
sh build.sh; \
else \
mkdir /opt/armnn; \
fi
FROM python:3.11-slim-bookworm@sha256:637774748f62b832dc11e7b286e48cd716727ed04b45a0322776c01bc526afc3
ARG TARGETPLATFORM
RUN apt-get update && apt-get install -y --no-install-recommends tini libmimalloc2.0 && rm -rf /var/lib/apt/lists/*
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
apt-get update && apt-get install -y --no-install-recommends ocl-icd-libopencl1 mesa-opencl-icd && \
rm -rf /var/lib/apt/lists/* && \
mkdir --parents /etc/OpenCL/vendors && \
echo "/usr/lib/libmali.so" > /etc/OpenCL/vendors/mali.icd && \
mkdir /opt/armnn; \
fi
WORKDIR /usr/src/app
ENV NODE_ENV=production \
TRANSFORMERS_CACHE=/cache \
PYTHONDONTWRITEBYTECODE=1 \
PYTHONUNBUFFERED=1 \
PATH="/opt/venv/bin:$PATH" \
PYTHONPATH=/usr/src \
LD_LIBRARY_PATH=/opt/armnn
PYTHONPATH=/usr/src
# prevent core dumps
RUN echo "hard core 0" >> /etc/security/limits.conf && \
@@ -54,10 +31,7 @@ RUN echo "hard core 0" >> /etc/security/limits.conf && \
echo 'ulimit -S -c 0 > /dev/null 2>&1' >> /etc/profile
COPY --from=builder /opt/venv /opt/venv
COPY --from=builder /opt/armnn/libarmnn.so.?? /opt/armnn/libarmnnOnnxParser.so.?? /opt/armnn/libarmnnDeserializer.so.?? /opt/armnn/libarmnnTfLiteParser.so.?? /opt/armnn/libprotobuf.so.?.??.?.? /opt/ann/libann.s[o] /opt/ann/build.sh /opt/armnn
COPY ann/ann.py /usr/src/ann/ann.py
COPY start.sh log_conf.json ./
COPY app .
ENTRYPOINT ["tini", "--"]
CMD ["./start.sh"]

View File

@@ -1 +0,0 @@
from .ann import Ann, is_available

View File

@@ -1,281 +0,0 @@
#include <fstream>
#include <mutex>
#include <atomic>
#include "armnn/IRuntime.hpp"
#include "armnn/INetwork.hpp"
#include "armnn/Types.hpp"
#include "armnnDeserializer/IDeserializer.hpp"
#include "armnnTfLiteParser/ITfLiteParser.hpp"
#include "armnnOnnxParser/IOnnxParser.hpp"
using namespace armnn;
struct IOInfos
{
std::vector<BindingPointInfo> inputInfos;
std::vector<BindingPointInfo> outputInfos;
};
// from https://rigtorp.se/spinlock/
struct SpinLock
{
std::atomic<bool> lock_ = {false};
void lock()
{
for (;;)
{
if (!lock_.exchange(true, std::memory_order_acquire))
{
break;
}
while (lock_.load(std::memory_order_relaxed))
;
}
}
void unlock() { lock_.store(false, std::memory_order_release); }
};
class Ann
{
public:
int load(const char *modelPath,
bool fastMath,
bool fp16,
bool saveCachedNetwork,
const char *cachedNetworkPath)
{
INetworkPtr network = loadModel(modelPath);
IOptimizedNetworkPtr optNet = OptimizeNetwork(network.get(), fastMath, fp16, saveCachedNetwork, cachedNetworkPath);
const IOInfos infos = getIOInfos(optNet.get());
NetworkId netId;
mutex.lock();
Status status = runtime->LoadNetwork(netId, std::move(optNet));
mutex.unlock();
if (status != Status::Success)
{
return -1;
}
spinLock.lock();
ioInfos[netId] = infos;
mutexes.emplace(netId, std::make_unique<std::mutex>());
spinLock.unlock();
return netId;
}
void execute(NetworkId netId, const void **inputData, void **outputData)
{
spinLock.lock();
const IOInfos *infos = &ioInfos[netId];
auto m = mutexes[netId].get();
spinLock.unlock();
InputTensors inputTensors;
inputTensors.reserve(infos->inputInfos.size());
size_t i = 0;
for (const BindingPointInfo &info : infos->inputInfos)
inputTensors.emplace_back(info.first, ConstTensor(info.second, inputData[i++]));
OutputTensors outputTensors;
outputTensors.reserve(infos->outputInfos.size());
i = 0;
for (const BindingPointInfo &info : infos->outputInfos)
outputTensors.emplace_back(info.first, Tensor(info.second, outputData[i++]));
m->lock();
runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
m->unlock();
}
void unload(NetworkId netId)
{
mutex.lock();
runtime->UnloadNetwork(netId);
mutex.unlock();
}
int tensors(NetworkId netId, bool isInput = false)
{
spinLock.lock();
const IOInfos *infos = &ioInfos[netId];
spinLock.unlock();
return (int)(isInput ? infos->inputInfos.size() : infos->outputInfos.size());
}
unsigned long shape(NetworkId netId, bool isInput = false, int index = 0)
{
spinLock.lock();
const IOInfos *infos = &ioInfos[netId];
spinLock.unlock();
const TensorShape shape = (isInput ? infos->inputInfos : infos->outputInfos)[index].second.GetShape();
unsigned long s = 0;
for (unsigned int d = 0; d < shape.GetNumDimensions(); d++)
s |= ((unsigned long)shape[d]) << (d * 16); // stores up to 4 16-bit values in a 64-bit value
return s;
}
Ann(int tuningLevel, const char *tuningFile)
{
IRuntime::CreationOptions runtimeOptions;
BackendOptions backendOptions{"GpuAcc",
{
{"TuningLevel", tuningLevel},
{"MemoryOptimizerStrategy", "ConstantMemoryStrategy"}, // SingleAxisPriorityList or ConstantMemoryStrategy
}};
if (tuningFile)
backendOptions.AddOption({"TuningFile", tuningFile});
runtimeOptions.m_BackendOptions.emplace_back(backendOptions);
runtime = IRuntime::CreateRaw(runtimeOptions);
};
~Ann()
{
IRuntime::Destroy(runtime);
};
private:
INetworkPtr loadModel(const char *modelPath)
{
const auto path = std::string(modelPath);
if (path.rfind(".tflite") == path.length() - 7) // endsWith()
{
auto parser = armnnTfLiteParser::ITfLiteParser::CreateRaw();
return parser->CreateNetworkFromBinaryFile(modelPath);
}
else if (path.rfind(".onnx") == path.length() - 5) // endsWith()
{
auto parser = armnnOnnxParser::IOnnxParser::CreateRaw();
return parser->CreateNetworkFromBinaryFile(modelPath);
}
else
{
std::ifstream ifs(path, std::ifstream::in | std::ifstream::binary);
auto parser = armnnDeserializer::IDeserializer::CreateRaw();
return parser->CreateNetworkFromBinary(ifs);
}
}
static BindingPointInfo getInputTensorInfo(LayerBindingId inputBindingId, TensorInfo info)
{
const auto newInfo = TensorInfo{info.GetShape(), info.GetDataType(),
info.GetQuantizationScale(),
info.GetQuantizationOffset(),
true};
return {inputBindingId, newInfo};
}
IOptimizedNetworkPtr OptimizeNetwork(INetwork *network, bool fastMath, bool fp16, bool saveCachedNetwork, const char *cachedNetworkPath)
{
const bool allowExpandedDims = false;
const ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly;
OptimizerOptionsOpaque options;
options.SetReduceFp32ToFp16(fp16);
options.SetShapeInferenceMethod(shapeInferenceMethod);
options.SetAllowExpandedDims(allowExpandedDims);
BackendOptions gpuAcc("GpuAcc", {{"FastMathEnabled", fastMath}});
if (cachedNetworkPath)
{
gpuAcc.AddOption({"SaveCachedNetwork", saveCachedNetwork});
gpuAcc.AddOption({"CachedNetworkFilePath", cachedNetworkPath});
}
options.AddModelOption(gpuAcc);
// No point in using ARMNN for CPU, use ONNX (quantized) instead.
// BackendOptions cpuAcc("CpuAcc",
// {
// {"FastMathEnabled", fastMath},
// {"NumberOfThreads", 0},
// });
// options.AddModelOption(cpuAcc);
BackendOptions allowExDimOpt("AllowExpandedDims",
{{"AllowExpandedDims", allowExpandedDims}});
options.AddModelOption(allowExDimOpt);
BackendOptions shapeInferOpt("ShapeInferenceMethod",
{{"InferAndValidate", shapeInferenceMethod == ShapeInferenceMethod::InferAndValidate}});
options.AddModelOption(shapeInferOpt);
std::vector<BackendId> backends = {
BackendId("GpuAcc"),
// BackendId("CpuAcc"),
// BackendId("CpuRef"),
};
return Optimize(*network, backends, runtime->GetDeviceSpec(), options);
}
IOInfos getIOInfos(IOptimizedNetwork *optNet)
{
struct InfoStrategy : IStrategy
{
void ExecuteStrategy(const IConnectableLayer *layer,
const BaseDescriptor &descriptor,
const std::vector<ConstTensor> &constants,
const char *name,
const LayerBindingId id = 0) override
{
IgnoreUnused(descriptor, constants, id);
const LayerType lt = layer->GetType();
if (lt == LayerType::Input)
ioInfos.inputInfos.push_back(getInputTensorInfo(id, layer->GetOutputSlot(0).GetTensorInfo()));
else if (lt == LayerType::Output)
ioInfos.outputInfos.push_back({id, layer->GetInputSlot(0).GetTensorInfo()});
}
IOInfos ioInfos;
};
InfoStrategy infoStrategy;
optNet->ExecuteStrategy(infoStrategy);
return infoStrategy.ioInfos;
}
IRuntime *runtime;
std::map<NetworkId, IOInfos> ioInfos;
std::map<NetworkId, std::unique_ptr<std::mutex>> mutexes; // mutex per network to not execute the same the same network concurrently
std::mutex mutex; // global mutex for load/unload calls to the runtime
SpinLock spinLock; // fast spin lock to guard access to the ioInfos and mutexes maps
};
extern "C" void *init(int logLevel, int tuningLevel, const char *tuningFile)
{
LogSeverity level = static_cast<LogSeverity>(logLevel);
ConfigureLogging(true, true, level);
Ann *ann = new Ann(tuningLevel, tuningFile);
return ann;
}
extern "C" void destroy(void *ann)
{
delete ((Ann *)ann);
}
extern "C" int load(void *ann,
const char *path,
bool fastMath,
bool fp16,
bool saveCachedNetwork,
const char *cachedNetworkPath)
{
return ((Ann *)ann)->load(path, fastMath, fp16, saveCachedNetwork, cachedNetworkPath);
}
extern "C" void unload(void *ann, NetworkId netId)
{
((Ann *)ann)->unload(netId);
}
extern "C" void execute(void *ann, NetworkId netId, const void **inputData, void **outputData)
{
((Ann *)ann)->execute(netId, inputData, outputData);
}
extern "C" unsigned long shape(void *ann, NetworkId netId, bool isInput, int index)
{
return ((Ann *)ann)->shape(netId, isInput, index);
}
extern "C" int tensors(void *ann, NetworkId netId, bool isInput)
{
return ((Ann *)ann)->tensors(netId, isInput);
}

View File

@@ -1,162 +0,0 @@
from __future__ import annotations
from ctypes import CDLL, Array, c_bool, c_char_p, c_int, c_ulong, c_void_p
from os.path import exists
from typing import Any, Protocol, TypeVar
import numpy as np
from numpy.typing import NDArray
from app.config import log
try:
CDLL("libmali.so") # fail if libmali.so is not mounted into container
libann = CDLL("libann.so")
libann.init.argtypes = c_int, c_int, c_char_p
libann.init.restype = c_void_p
libann.load.argtypes = c_void_p, c_char_p, c_bool, c_bool, c_bool, c_char_p
libann.load.restype = c_int
libann.execute.argtypes = c_void_p, c_int, Array[c_void_p], Array[c_void_p]
libann.unload.argtypes = c_void_p, c_int
libann.destroy.argtypes = (c_void_p,)
libann.shape.argtypes = c_void_p, c_int, c_bool, c_int
libann.shape.restype = c_ulong
libann.tensors.argtypes = c_void_p, c_int, c_bool
libann.tensors.restype = c_int
is_available = True
except OSError as e:
log.debug("Could not load ANN shared libraries, using ONNX: %s", e)
is_available = False
T = TypeVar("T", covariant=True)
class Newable(Protocol[T]):
def new(self) -> None:
...
class _Singleton(type, Newable[T]):
_instances: dict[_Singleton[T], Newable[T]] = {}
def __call__(cls, *args: Any, **kwargs: Any) -> Newable[T]:
if cls not in cls._instances:
obj: Newable[T] = super(_Singleton, cls).__call__(*args, **kwargs)
cls._instances[cls] = obj
else:
obj = cls._instances[cls]
obj.new()
return obj
class Ann(metaclass=_Singleton):
def __init__(self, log_level: int = 3, tuning_level: int = 1, tuning_file: str | None = None) -> None:
if not is_available:
raise RuntimeError("libann is not available!")
if tuning_file and not exists(tuning_file):
raise ValueError("tuning_file must point to an existing (possibly empty) file!")
if tuning_level == 0 and tuning_file is None:
raise ValueError("tuning_level == 0 reads existing tuning information and requires a tuning_file")
if tuning_level < 0 or tuning_level > 3:
raise ValueError("tuning_level must be 0 (load from tuning_file), 1, 2 or 3.")
if log_level < 0 or log_level > 5:
raise ValueError("log_level must be 0 (trace), 1 (debug), 2 (info), 3 (warning), 4 (error) or 5 (fatal)")
self.log_level = log_level
self.tuning_level = tuning_level
self.tuning_file = tuning_file
self.output_shapes: dict[int, tuple[tuple[int], ...]] = {}
self.input_shapes: dict[int, tuple[tuple[int], ...]] = {}
self.ann: int | None = None
self.new()
def new(self) -> None:
if self.ann is None:
self.ann = libann.init(
self.log_level,
self.tuning_level,
self.tuning_file.encode() if self.tuning_file is not None else None,
)
self.ref_count = 0
self.ref_count += 1
def destroy(self) -> None:
self.ref_count -= 1
if self.ref_count <= 0 and self.ann is not None:
libann.destroy(self.ann)
self.ann = None
def __del__(self) -> None:
if self.ann is not None:
libann.destroy(self.ann)
self.ann = None
def load(
self,
model_path: str,
fast_math: bool = True,
fp16: bool = False,
save_cached_network: bool = False,
cached_network_path: str | None = None,
) -> int:
if not model_path.endswith((".armnn", ".tflite", ".onnx")):
raise ValueError("model_path must be a file with extension .armnn, .tflite or .onnx")
if not exists(model_path):
raise ValueError("model_path must point to an existing file!")
if cached_network_path is not None and not exists(cached_network_path):
raise ValueError("cached_network_path must point to an existing (possibly empty) file!")
if save_cached_network and cached_network_path is None:
raise ValueError("save_cached_network is True, cached_network_path must be specified!")
net_id: int = libann.load(
self.ann,
model_path.encode(),
fast_math,
fp16,
save_cached_network,
cached_network_path.encode() if cached_network_path is not None else None,
)
self.input_shapes[net_id] = tuple(
self.shape(net_id, input=True, index=i) for i in range(self.tensors(net_id, input=True))
)
self.output_shapes[net_id] = tuple(
self.shape(net_id, input=False, index=i) for i in range(self.tensors(net_id, input=False))
)
return net_id
def unload(self, network_id: int) -> None:
libann.unload(self.ann, network_id)
del self.output_shapes[network_id]
def execute(self, network_id: int, input_tensors: list[NDArray[np.float32]]) -> list[NDArray[np.float32]]:
if not isinstance(input_tensors, list):
raise ValueError("input_tensors needs to be a list!")
net_input_shapes = self.input_shapes[network_id]
if len(input_tensors) != len(net_input_shapes):
raise ValueError(f"input_tensors lengths {len(input_tensors)} != network inputs {len(net_input_shapes)}")
for net_input_shape, input_tensor in zip(net_input_shapes, input_tensors):
if net_input_shape != input_tensor.shape:
raise ValueError(f"input_tensor shape {input_tensor.shape} != network input shape {net_input_shape}")
if not input_tensor.flags.c_contiguous:
raise ValueError("input_tensors must be c_contiguous numpy ndarrays")
output_tensors: list[NDArray[np.float32]] = [
np.ndarray(s, dtype=np.float32) for s in self.output_shapes[network_id]
]
input_type = c_void_p * len(input_tensors)
inputs = input_type(*[t.ctypes.data_as(c_void_p) for t in input_tensors])
output_type = c_void_p * len(output_tensors)
outputs = output_type(*[t.ctypes.data_as(c_void_p) for t in output_tensors])
libann.execute(self.ann, network_id, inputs, outputs)
return output_tensors
def shape(self, network_id: int, input: bool = False, index: int = 0) -> tuple[int]:
s = libann.shape(self.ann, network_id, input, index)
a = []
while s != 0:
a.append(s & 0xFFFF)
s >>= 16
return tuple(a)
def tensors(self, network_id: int, input: bool = False) -> int:
tensors: int = libann.tensors(self.ann, network_id, input)
return tensors

View File

@@ -1 +0,0 @@
g++ -shared -O3 -o libann.so -fuse-ld=gold -std=c++17 -I$ARMNN_PATH/include -larmnn -larmnnDeserializer -larmnnTfLiteParser -larmnnOnnxParser -L$ARMNN_PATH ann.cpp

View File

@@ -1,2 +0,0 @@
armnn*
output/

View File

@@ -1,4 +0,0 @@
#!/bin/sh
cd armnn-23.11/
g++ -o ../armnnconverter -O1 -DARMNN_ONNX_PARSER -DARMNN_SERIALIZER -DARMNN_TF_LITE_PARSER -fuse-ld=gold -std=c++17 -Iinclude -Isrc/armnnUtils -Ithird-party -larmnn -larmnnDeserializer -larmnnTfLiteParser -larmnnOnnxParser -larmnnSerializer -L../armnn src/armnnConverter/ArmnnConverter.cpp

View File

@@ -1,8 +0,0 @@
#!/bin/sh
# binaries
mkdir armnn
curl -SL "https://github.com/ARM-software/armnn/releases/download/v23.11/ArmNN-linux-x86_64.tar.gz" | tar -zx -C armnn
# source to build ArmnnConverter
curl -SL "https://github.com/ARM-software/armnn/archive/refs/tags/v23.11.tar.gz" | tar -zx

View File

@@ -1,201 +0,0 @@
name: annexport
channels:
- pytorch
- nvidia
- conda-forge
dependencies:
- _libgcc_mutex=0.1=conda_forge
- _openmp_mutex=4.5=2_kmp_llvm
- aiohttp=3.9.1=py310h2372a71_0
- aiosignal=1.3.1=pyhd8ed1ab_0
- arpack=3.8.0=nompi_h0baa96a_101
- async-timeout=4.0.3=pyhd8ed1ab_0
- attrs=23.1.0=pyh71513ae_1
- aws-c-auth=0.7.3=h28f7589_1
- aws-c-cal=0.6.1=hc309b26_1
- aws-c-common=0.9.0=hd590300_0
- aws-c-compression=0.2.17=h4d4d85c_2
- aws-c-event-stream=0.3.1=h2e3709c_4
- aws-c-http=0.7.11=h00aa349_4
- aws-c-io=0.13.32=he9a53bd_1
- aws-c-mqtt=0.9.3=hb447be9_1
- aws-c-s3=0.3.14=hf3aad02_1
- aws-c-sdkutils=0.1.12=h4d4d85c_1
- aws-checksums=0.1.17=h4d4d85c_1
- aws-crt-cpp=0.21.0=hb942446_5
- aws-sdk-cpp=1.10.57=h85b1a90_19
- blas=2.120=openblas
- blas-devel=3.9.0=20_linux64_openblas
- brotli-python=1.0.9=py310hd8f1fbe_9
- bzip2=1.0.8=hd590300_5
- c-ares=1.23.0=hd590300_0
- ca-certificates=2023.11.17=hbcca054_0
- certifi=2023.11.17=pyhd8ed1ab_0
- charset-normalizer=3.3.2=pyhd8ed1ab_0
- click=8.1.7=unix_pyh707e725_0
- colorama=0.4.6=pyhd8ed1ab_0
- coloredlogs=15.0.1=pyhd8ed1ab_3
- cuda-cudart=11.7.99=0
- cuda-cupti=11.7.101=0
- cuda-libraries=11.7.1=0
- cuda-nvrtc=11.7.99=0
- cuda-nvtx=11.7.91=0
- cuda-runtime=11.7.1=0
- dataclasses=0.8=pyhc8e2a94_3
- datasets=2.14.7=pyhd8ed1ab_0
- dill=0.3.7=pyhd8ed1ab_0
- filelock=3.13.1=pyhd8ed1ab_0
- flatbuffers=23.5.26=h59595ed_1
- freetype=2.12.1=h267a509_2
- frozenlist=1.4.0=py310h2372a71_1
- fsspec=2023.10.0=pyhca7485f_0
- ftfy=6.1.3=pyhd8ed1ab_0
- gflags=2.2.2=he1b5a44_1004
- glog=0.6.0=h6f12383_0
- glpk=5.0=h445213a_0
- gmp=6.3.0=h59595ed_0
- gmpy2=2.1.2=py310h3ec546c_1
- huggingface_hub=0.17.3=pyhd8ed1ab_0
- humanfriendly=10.0=pyhd8ed1ab_6
- icu=73.2=h59595ed_0
- idna=3.6=pyhd8ed1ab_0
- importlib-metadata=7.0.0=pyha770c72_0
- importlib_metadata=7.0.0=hd8ed1ab_0
- joblib=1.3.2=pyhd8ed1ab_0
- keyutils=1.6.1=h166bdaf_0
- krb5=1.21.2=h659d440_0
- lcms2=2.15=h7f713cb_2
- ld_impl_linux-64=2.40=h41732ed_0
- lerc=4.0.0=h27087fc_0
- libabseil=20230125.3=cxx17_h59595ed_0
- libarrow=12.0.1=hb87d912_8_cpu
- libblas=3.9.0=20_linux64_openblas
- libbrotlicommon=1.0.9=h166bdaf_9
- libbrotlidec=1.0.9=h166bdaf_9
- libbrotlienc=1.0.9=h166bdaf_9
- libcblas=3.9.0=20_linux64_openblas
- libcrc32c=1.1.2=h9c3ff4c_0
- libcublas=11.10.3.66=0
- libcufft=10.7.2.124=h4fbf590_0
- libcufile=1.8.1.2=0
- libcurand=10.3.4.101=0
- libcurl=8.5.0=hca28451_0
- libcusolver=11.4.0.1=0
- libcusparse=11.7.4.91=0
- libdeflate=1.19=hd590300_0
- libedit=3.1.20191231=he28a2e2_2
- libev=4.33=hd590300_2
- libevent=2.1.12=hf998b51_1
- libffi=3.4.2=h7f98852_5
- libgcc-ng=13.2.0=h807b86a_3
- libgfortran-ng=13.2.0=h69a702a_3
- libgfortran5=13.2.0=ha4646dd_3
- libgoogle-cloud=2.12.0=hac9eb74_1
- libgrpc=1.54.3=hb20ce57_0
- libhwloc=2.9.3=default_h554bfaf_1009
- libiconv=1.17=hd590300_1
- libjpeg-turbo=2.1.5.1=hd590300_1
- liblapack=3.9.0=20_linux64_openblas
- liblapacke=3.9.0=20_linux64_openblas
- libnghttp2=1.58.0=h47da74e_1
- libnpp=11.7.4.75=0
- libnsl=2.0.1=hd590300_0
- libnuma=2.0.16=h0b41bf4_1
- libnvjpeg=11.8.0.2=0
- libopenblas=0.3.25=pthreads_h413a1c8_0
- libpng=1.6.39=h753d276_0
- libprotobuf=3.21.12=hfc55251_2
- libsentencepiece=0.1.99=h180e1df_0
- libsqlite=3.44.2=h2797004_0
- libssh2=1.11.0=h0841786_0
- libstdcxx-ng=13.2.0=h7e041cc_3
- libthrift=0.18.1=h8fd135c_2
- libtiff=4.6.0=h29866fb_1
- libutf8proc=2.8.0=h166bdaf_0
- libuuid=2.38.1=h0b41bf4_0
- libwebp-base=1.3.2=hd590300_0
- libxcb=1.15=h0b41bf4_0
- libxml2=2.11.6=h232c23b_0
- libzlib=1.2.13=hd590300_5
- llvm-openmp=17.0.6=h4dfa4b3_0
- lz4-c=1.9.4=hcb278e6_0
- mkl=2022.2.1=h84fe81f_16997
- mkl-devel=2022.2.1=ha770c72_16998
- mkl-include=2022.2.1=h84fe81f_16997
- mpc=1.3.1=hfe3b2da_0
- mpfr=4.2.1=h9458935_0
- mpmath=1.3.0=pyhd8ed1ab_0
- multidict=6.0.4=py310h2372a71_1
- multiprocess=0.70.15=py310h2372a71_1
- ncurses=6.4=h59595ed_2
- numpy=1.26.2=py310hb13e2d6_0
- onnx=1.14.0=py310ha3deec4_1
- onnx2torch=1.5.13=pyhd8ed1ab_0
- onnxruntime=1.16.3=py310hd4b7fbc_1_cpu
- open-clip-torch=2.23.0=pyhd8ed1ab_1
- openblas=0.3.25=pthreads_h7a3da1a_0
- openjpeg=2.5.0=h488ebb8_3
- openssl=3.2.0=hd590300_1
- orc=1.9.0=h2f23424_1
- packaging=23.2=pyhd8ed1ab_0
- pandas=2.1.4=py310hcc13569_0
- pillow=10.0.1=py310h29da1c1_1
- pip=23.3.1=pyhd8ed1ab_0
- protobuf=4.21.12=py310heca2aa9_0
- pthread-stubs=0.4=h36c2ea0_1001
- pyarrow=12.0.1=py310h0576679_8_cpu
- pyarrow-hotfix=0.6=pyhd8ed1ab_0
- pysocks=1.7.1=pyha2e5f31_6
- python=3.10.13=hd12c33a_0_cpython
- python-dateutil=2.8.2=pyhd8ed1ab_0
- python-flatbuffers=23.5.26=pyhd8ed1ab_0
- python-tzdata=2023.3=pyhd8ed1ab_0
- python-xxhash=3.4.1=py310h2372a71_0
- python_abi=3.10=4_cp310
- pytorch=1.13.1=cpu_py310hd11e9c7_1
- pytorch-cuda=11.7=h778d358_5
- pytorch-mutex=1.0=cuda
- pytz=2023.3.post1=pyhd8ed1ab_0
- pyyaml=6.0.1=py310h2372a71_1
- rdma-core=28.9=h59595ed_1
- re2=2023.03.02=h8c504da_0
- readline=8.2=h8228510_1
- regex=2023.10.3=py310h2372a71_0
- requests=2.31.0=pyhd8ed1ab_0
- s2n=1.3.49=h06160fa_0
- sacremoses=0.0.53=pyhd8ed1ab_0
- safetensors=0.3.3=py310hcb5633a_1
- sentencepiece=0.1.99=hff52083_0
- sentencepiece-python=0.1.99=py310hebdb9f0_0
- sentencepiece-spm=0.1.99=h180e1df_0
- setuptools=68.2.2=pyhd8ed1ab_0
- six=1.16.0=pyh6c4a22f_0
- sleef=3.5.1=h9b69904_2
- snappy=1.1.10=h9fff704_0
- sympy=1.12=pypyh9d50eac_103
- tbb=2021.11.0=h00ab1b0_0
- texttable=1.7.0=pyhd8ed1ab_0
- timm=0.9.12=pyhd8ed1ab_0
- tk=8.6.13=noxft_h4845f30_101
- tokenizers=0.14.1=py310h320607d_2
- torchvision=0.14.1=cpu_py310hd3d2ac3_1
- tqdm=4.66.1=pyhd8ed1ab_0
- transformers=4.35.2=pyhd8ed1ab_0
- typing-extensions=4.9.0=hd8ed1ab_0
- typing_extensions=4.9.0=pyha770c72_0
- tzdata=2023c=h71feb2d_0
- ucx=1.14.1=h64cca9d_5
- urllib3=2.1.0=pyhd8ed1ab_0
- wcwidth=0.2.12=pyhd8ed1ab_0
- wheel=0.42.0=pyhd8ed1ab_0
- xorg-libxau=1.0.11=hd590300_0
- xorg-libxdmcp=1.1.3=h7f98852_0
- xxhash=0.8.2=hd590300_0
- xz=5.2.6=h166bdaf_0
- yaml=0.2.5=h7f98852_2
- yarl=1.9.3=py310h2372a71_0
- zipp=3.17.0=pyhd8ed1ab_0
- zlib=1.2.13=hd590300_5
- zstd=1.5.5=hfc55251_0
- pip:
- git+https://github.com/fyfrey/TinyNeuralNetwork.git

View File

@@ -1,157 +0,0 @@
import logging
import os
import platform
import subprocess
from abc import abstractmethod
import onnx
import open_clip
import torch
from onnx2torch import convert
from onnxruntime.tools.onnx_model_utils import fix_output_shapes, make_input_shape_fixed
from tinynn.converter import TFLiteConverter
class ExportBase(torch.nn.Module):
input_shape: tuple[int, ...]
def __init__(self, device: torch.device, name: str):
super().__init__()
self.device = device
self.name = name
self.optimize = 5
self.nchw_transpose = False
@abstractmethod
def forward(self, input_tensor: torch.Tensor) -> torch.Tensor | tuple[torch.Tensor]:
pass
def dummy_input(self) -> torch.FloatTensor:
return torch.rand((1, 3, 224, 224), device=self.device)
class ArcFace(ExportBase):
input_shape = (1, 3, 112, 112)
def __init__(self, onnx_model_path: str, device: torch.device):
name, _ = os.path.splitext(os.path.basename(onnx_model_path))
super().__init__(device, name)
onnx_model = onnx.load_model(onnx_model_path)
make_input_shape_fixed(onnx_model.graph, onnx_model.graph.input[0].name, self.input_shape)
fix_output_shapes(onnx_model)
self.model = convert(onnx_model).to(device)
if self.device.type == "cuda":
self.model = self.model.half()
def forward(self, input_tensor: torch.Tensor) -> torch.FloatTensor:
embedding: torch.FloatTensor = self.model(
input_tensor.half() if self.device.type == "cuda" else input_tensor
).float()
assert isinstance(embedding, torch.FloatTensor)
return embedding
def dummy_input(self) -> torch.FloatTensor:
return torch.rand(self.input_shape, device=self.device)
class RetinaFace(ExportBase):
input_shape = (1, 3, 640, 640)
def __init__(self, onnx_model_path: str, device: torch.device):
name, _ = os.path.splitext(os.path.basename(onnx_model_path))
super().__init__(device, name)
self.optimize = 3
self.model = convert(onnx_model_path).eval().to(device)
if self.device.type == "cuda":
self.model = self.model.half()
def forward(self, input_tensor: torch.Tensor) -> tuple[torch.FloatTensor]:
out: torch.Tensor = self.model(input_tensor.half() if self.device.type == "cuda" else input_tensor)
return tuple(o.float() for o in out)
def dummy_input(self) -> torch.FloatTensor:
return torch.rand(self.input_shape, device=self.device)
class ClipVision(ExportBase):
input_shape = (1, 3, 224, 224)
def __init__(self, model_name: str, weights: str, device: torch.device):
super().__init__(device, model_name + "__" + weights)
self.model = open_clip.create_model(
model_name,
weights,
precision="fp16" if device.type == "cuda" else "fp32",
jit=False,
require_pretrained=True,
device=device,
)
def forward(self, input_tensor: torch.Tensor) -> torch.FloatTensor:
embedding: torch.Tensor = self.model.encode_image(
input_tensor.half() if self.device.type == "cuda" else input_tensor,
normalize=True,
).float()
return embedding
def export(model: ExportBase) -> None:
model.eval()
for param in model.parameters():
param.requires_grad = False
dummy_input = model.dummy_input()
model(dummy_input)
jit = torch.jit.trace(model, dummy_input) # type: ignore[no-untyped-call,attr-defined]
tflite_model_path = f"output/{model.name}.tflite"
os.makedirs("output", exist_ok=True)
converter = TFLiteConverter(
jit,
dummy_input,
tflite_model_path,
optimize=model.optimize,
nchw_transpose=model.nchw_transpose,
)
# segfaults on ARM, must run on x86_64 / AMD64
converter.convert()
armnn_model_path = f"output/{model.name}.armnn"
os.environ["LD_LIBRARY_PATH"] = "armnn"
subprocess.run(
[
"./armnnconverter",
"-f",
"tflite-binary",
"-m",
tflite_model_path,
"-i",
"input_tensor",
"-o",
"output_tensor",
"-p",
armnn_model_path,
]
)
def main() -> None:
if platform.machine() not in ("x86_64", "AMD64"):
raise RuntimeError(f"Can only run on x86_64 / AMD64, not {platform.machine()}")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if device.type != "cuda":
logging.warning(
"No CUDA available, cannot create fp16 model! proceeding to create a fp32 model (use only for testing)"
)
models = [
ClipVision("ViT-B-32", "openai", device),
ArcFace("buffalo_l_rec.onnx", device),
RetinaFace("buffalo_l_det.onnx", device),
]
for model in models:
export(model)
if __name__ == "__main__":
with torch.no_grad():
main()

View File

@@ -26,7 +26,6 @@ class Settings(BaseSettings):
request_threads: int = os.cpu_count() or 4
model_inter_op_threads: int = 1
model_intra_op_threads: int = 2
ann: bool = True
class Config:
env_prefix = "MACHINE_LEARNING_"

View File

@@ -5,10 +5,10 @@ from unittest import mock
import numpy as np
import pytest
from fastapi.testclient import TestClient
from numpy.typing import NDArray
from PIL import Image
from .main import app
from .schemas import ndarray_f32
@pytest.fixture
@@ -17,7 +17,7 @@ def pil_image() -> Image.Image:
@pytest.fixture
def cv_image(pil_image: Image.Image) -> NDArray[np.float32]:
def cv_image(pil_image: Image.Image) -> ndarray_f32:
return np.asarray(pil_image)[:, :, ::-1] # PIL uses RGB while cv2 uses BGR

View File

@@ -2,11 +2,11 @@ import asyncio
import gc
import os
import signal
import sys
import threading
import time
from concurrent.futures import ThreadPoolExecutor
from contextlib import asynccontextmanager
from typing import Any, AsyncGenerator, Callable, Iterator
from typing import Any, Iterator
from zipfile import BadZipFile
import orjson
@@ -26,6 +26,7 @@ from .schemas import (
)
MultiPartParser.max_file_size = 2**26 # spools to disk if payload is 64 MiB or larger
app = FastAPI()
model_cache = ModelCache(ttl=settings.model_ttl, revalidate=settings.model_ttl > 0)
thread_pool: ThreadPoolExecutor | None = None
@@ -34,8 +35,8 @@ active_requests = 0
last_called: float | None = None
@asynccontextmanager
async def lifespan(_: FastAPI) -> AsyncGenerator[None, None]:
@app.on_event("startup")
def startup() -> None:
global thread_pool
log.info(
(
@@ -43,22 +44,21 @@ async def lifespan(_: FastAPI) -> AsyncGenerator[None, None]:
f"{f'after {settings.model_ttl}s of inactivity' if settings.model_ttl > 0 else 'disabled'}."
)
)
# asyncio is a huge bottleneck for performance, so we use a thread pool to run blocking code
thread_pool = ThreadPoolExecutor(settings.request_threads) if settings.request_threads > 0 else None
if settings.model_ttl > 0 and settings.model_ttl_poll_s > 0:
asyncio.ensure_future(idle_shutdown_task())
log.info(f"Initialized request thread pool with {settings.request_threads} threads.")
try:
if settings.request_threads > 0:
# asyncio is a huge bottleneck for performance, so we use a thread pool to run blocking code
thread_pool = ThreadPoolExecutor(settings.request_threads) if settings.request_threads > 0 else None
log.info(f"Initialized request thread pool with {settings.request_threads} threads.")
if settings.model_ttl > 0 and settings.model_ttl_poll_s > 0:
asyncio.ensure_future(idle_shutdown_task())
yield
finally:
log.handlers.clear()
for model in model_cache.cache._cache.values():
del model
if thread_pool is not None:
thread_pool.shutdown()
gc.collect()
@app.on_event("shutdown")
def shutdown() -> None:
log.handlers.clear()
for model in model_cache.cache._cache.values():
del model
if thread_pool is not None:
thread_pool.shutdown()
gc.collect()
def update_state() -> Iterator[None]:
@@ -71,9 +71,6 @@ def update_state() -> Iterator[None]:
active_requests -= 1
app = FastAPI(lifespan=lifespan)
@app.get("/", response_model=MessageResponse)
async def root() -> dict[str, str]:
return {"message": "Immich ML"}
@@ -105,14 +102,14 @@ async def predict(
model = await load(await model_cache.get(model_name, model_type, **kwargs))
model.configure(**kwargs)
outputs = await run(model.predict, inputs)
outputs = await run(model, inputs)
return ORJSONResponse(outputs)
async def run(func: Callable[..., Any], inputs: Any) -> Any:
async def run(model: InferenceModel, inputs: Any) -> Any:
if thread_pool is None:
return func(inputs)
return await asyncio.get_running_loop().run_in_executor(thread_pool, func, inputs)
return model.predict(inputs)
return await asyncio.get_running_loop().run_in_executor(thread_pool, model.predict, inputs)
async def load(model: InferenceModel) -> InferenceModel:

View File

@@ -1,68 +0,0 @@
from __future__ import annotations
from pathlib import Path
from typing import Any, NamedTuple
import numpy as np
from numpy.typing import NDArray
from ann.ann import Ann
from ..config import log, settings
class AnnSession:
"""
Wrapper for ANN to be drop-in replacement for ONNX session.
"""
def __init__(self, model_path: Path):
tuning_file = Path(settings.cache_folder) / "gpu-tuning.ann"
with tuning_file.open(mode="a"):
# make sure tuning file exists (without clearing contents)
# once filled, the tuning file reduces the cost/time of the first
# inference after model load by 10s of seconds
pass
self.ann = Ann(tuning_level=3, tuning_file=tuning_file.as_posix())
log.info("Loading ANN model %s ...", model_path)
cache_file = model_path.with_suffix(".anncache")
save = False
if not cache_file.is_file():
save = True
with cache_file.open(mode="a"):
# create empty model cache file
pass
self.model = self.ann.load(
model_path.as_posix(),
save_cached_network=save,
cached_network_path=cache_file.as_posix(),
)
log.info("Loaded ANN model with ID %d", self.model)
def __del__(self) -> None:
self.ann.unload(self.model)
log.info("Unloaded ANN model %d", self.model)
self.ann.destroy()
def get_inputs(self) -> list[AnnNode]:
shapes = self.ann.input_shapes[self.model]
return [AnnNode(None, s) for s in shapes]
def get_outputs(self) -> list[AnnNode]:
shapes = self.ann.output_shapes[self.model]
return [AnnNode(None, s) for s in shapes]
def run(
self,
output_names: list[str] | None,
input_feed: dict[str, NDArray[np.float32]] | dict[str, NDArray[np.int32]],
run_options: Any = None,
) -> list[NDArray[np.float32]]:
inputs: list[NDArray[np.float32]] = [np.ascontiguousarray(v) for v in input_feed.values()]
return self.ann.execute(self.model, inputs)
class AnnNode(NamedTuple):
name: str | None
shape: tuple[int, ...]

View File

@@ -10,11 +10,8 @@ import onnxruntime as ort
from huggingface_hub import snapshot_download
from typing_extensions import Buffer
import ann.ann
from ..config import get_cache_dir, get_hf_model_name, log, settings
from ..schemas import ModelType
from .ann import AnnSession
class InferenceModel(ABC):
@@ -141,21 +138,6 @@ class InferenceModel(ABC):
self.cache_dir.unlink()
self.cache_dir.mkdir(parents=True, exist_ok=True)
def _make_session(self, model_path: Path) -> AnnSession | ort.InferenceSession:
armnn_path = model_path.with_suffix(".armnn")
if settings.ann and ann.ann.is_available and armnn_path.is_file():
session = AnnSession(armnn_path)
elif model_path.is_file():
session = ort.InferenceSession(
model_path.as_posix(),
sess_options=self.sess_options,
providers=self.providers,
provider_options=self.provider_options,
)
else:
raise ValueError(f"the file model_path='{model_path}' does not exist")
return session
# HF deep copies configs, so we need to make session options picklable
class PicklableSessionOptions(ort.SessionOptions): # type: ignore[misc]

View File

@@ -6,13 +6,13 @@ from pathlib import Path
from typing import Any, Literal
import numpy as np
from numpy.typing import NDArray
import onnxruntime as ort
from PIL import Image
from tokenizers import Encoding, Tokenizer
from app.config import clean_name, log
from app.models.transforms import crop, get_pil_resampling, normalize, resize, to_numpy
from app.schemas import ModelType
from app.schemas import ModelType, ndarray_f32, ndarray_i32
from .base import InferenceModel
@@ -33,15 +33,27 @@ class BaseCLIPEncoder(InferenceModel):
def _load(self) -> None:
if self.mode == "text" or self.mode is None:
log.debug(f"Loading clip text model '{self.model_name}'")
self.text_model = self._make_session(self.textual_path)
self.text_model = ort.InferenceSession(
self.textual_path.as_posix(),
sess_options=self.sess_options,
providers=self.providers,
provider_options=self.provider_options,
)
log.debug(f"Loaded clip text model '{self.model_name}'")
if self.mode == "vision" or self.mode is None:
log.debug(f"Loading clip vision model '{self.model_name}'")
self.vision_model = self._make_session(self.visual_path)
self.vision_model = ort.InferenceSession(
self.visual_path.as_posix(),
sess_options=self.sess_options,
providers=self.providers,
provider_options=self.provider_options,
)
log.debug(f"Loaded clip vision model '{self.model_name}'")
def _predict(self, image_or_text: Image.Image | str) -> NDArray[np.float32]:
def _predict(self, image_or_text: Image.Image | str) -> ndarray_f32:
if isinstance(image_or_text, bytes):
image_or_text = Image.open(BytesIO(image_or_text))
@@ -49,10 +61,12 @@ class BaseCLIPEncoder(InferenceModel):
case Image.Image():
if self.mode == "text":
raise TypeError("Cannot encode image as text-only model")
outputs: NDArray[np.float32] = self.vision_model.run(None, self.transform(image_or_text))[0][0]
outputs: ndarray_f32 = self.vision_model.run(None, self.transform(image_or_text))[0][0]
case str():
if self.mode == "vision":
raise TypeError("Cannot encode text as vision-only model")
outputs = self.text_model.run(None, self.tokenize(image_or_text))[0][0]
case _:
raise TypeError(f"Expected Image or str, but got: {type(image_or_text)}")
@@ -60,11 +74,11 @@ class BaseCLIPEncoder(InferenceModel):
return outputs
@abstractmethod
def tokenize(self, text: str) -> dict[str, NDArray[np.int32]]:
def tokenize(self, text: str) -> dict[str, ndarray_i32]:
pass
@abstractmethod
def transform(self, image: Image.Image) -> dict[str, NDArray[np.float32]]:
def transform(self, image: Image.Image) -> dict[str, ndarray_f32]:
pass
@property
@@ -148,9 +162,9 @@ class OpenCLIPEncoder(BaseCLIPEncoder):
context_length = self.model_cfg["text_cfg"]["context_length"]
pad_token = self.tokenizer_cfg["pad_token"]
size = self.preprocess_cfg["size"]
self.size = size[0] if isinstance(size, list) else size
self.size = (
self.preprocess_cfg["size"][0] if type(self.preprocess_cfg["size"]) == list else self.preprocess_cfg["size"]
)
self.resampling = get_pil_resampling(self.preprocess_cfg["interpolation"])
self.mean = np.array(self.preprocess_cfg["mean"], dtype=np.float32)
self.std = np.array(self.preprocess_cfg["std"], dtype=np.float32)
@@ -162,11 +176,11 @@ class OpenCLIPEncoder(BaseCLIPEncoder):
self.tokenizer.enable_truncation(max_length=context_length)
log.debug(f"Loaded tokenizer for CLIP model '{self.model_name}'")
def tokenize(self, text: str) -> dict[str, NDArray[np.int32]]:
def tokenize(self, text: str) -> dict[str, ndarray_i32]:
tokens: Encoding = self.tokenizer.encode(text)
return {"text": np.array([tokens.ids], dtype=np.int32)}
def transform(self, image: Image.Image) -> dict[str, NDArray[np.float32]]:
def transform(self, image: Image.Image) -> dict[str, ndarray_f32]:
image = resize(image, self.size)
image = crop(image, self.size)
image_np = to_numpy(image)
@@ -175,7 +189,7 @@ class OpenCLIPEncoder(BaseCLIPEncoder):
class MCLIPEncoder(OpenCLIPEncoder):
def tokenize(self, text: str) -> dict[str, NDArray[np.int32]]:
def tokenize(self, text: str) -> dict[str, ndarray_i32]:
tokens: Encoding = self.tokenizer.encode(text)
return {
"input_ids": np.array([tokens.ids], dtype=np.int32),

Some files were not shown because too many files have changed in this diff Show More