Compare commits

..

1 Commits

Author SHA1 Message Date
Alex Tran
9f72d47942 fix: remote sync clean up 2025-11-02 21:16:20 +00:00
34 changed files with 388 additions and 294 deletions

View File

@@ -20,30 +20,6 @@ on:
required: true
ANDROID_STORE_PASSWORD:
required: true
APP_STORE_CONNECT_API_KEY_ID:
required: true
APP_STORE_CONNECT_API_KEY_ISSUER_ID:
required: true
APP_STORE_CONNECT_API_KEY:
required: true
IOS_CERTIFICATE_P12:
required: true
IOS_CERTIFICATE_PASSWORD:
required: true
IOS_PROVISIONING_PROFILE:
required: true
IOS_PROVISIONING_PROFILE_SHARE_EXTENSION:
required: true
IOS_PROVISIONING_PROFILE_WIDGET_EXTENSION:
required: true
IOS_DEVELOPMENT_PROVISIONING_PROFILE:
required: true
IOS_DEVELOPMENT_PROVISIONING_PROFILE_SHARE_EXTENSION:
required: true
IOS_DEVELOPMENT_PROVISIONING_PROFILE_WIDGET_EXTENSION:
required: true
FASTLANE_TEAM_ID:
required: true
pull_request:
push:
branches: [main]

View File

@@ -99,20 +99,6 @@ jobs:
ALIAS: ${{ secrets.ALIAS }}
ANDROID_KEY_PASSWORD: ${{ secrets.ANDROID_KEY_PASSWORD }}
ANDROID_STORE_PASSWORD: ${{ secrets.ANDROID_STORE_PASSWORD }}
# iOS secrets
APP_STORE_CONNECT_API_KEY_ID: ${{ secrets.APP_STORE_CONNECT_API_KEY_ID }}
APP_STORE_CONNECT_API_KEY_ISSUER_ID: ${{ secrets.APP_STORE_CONNECT_API_KEY_ISSUER_ID }}
APP_STORE_CONNECT_API_KEY: ${{ secrets.APP_STORE_CONNECT_API_KEY }}
IOS_CERTIFICATE_P12: ${{ secrets.IOS_CERTIFICATE_P12 }}
IOS_CERTIFICATE_PASSWORD: ${{ secrets.IOS_CERTIFICATE_PASSWORD }}
IOS_PROVISIONING_PROFILE: ${{ secrets.IOS_PROVISIONING_PROFILE }}
IOS_PROVISIONING_PROFILE_SHARE_EXTENSION: ${{ secrets.IOS_PROVISIONING_PROFILE_SHARE_EXTENSION }}
IOS_PROVISIONING_PROFILE_WIDGET_EXTENSION: ${{ secrets.IOS_PROVISIONING_PROFILE_WIDGET_EXTENSION }}
IOS_DEVELOPMENT_PROVISIONING_PROFILE: ${{ secrets.IOS_DEVELOPMENT_PROVISIONING_PROFILE }}
IOS_DEVELOPMENT_PROVISIONING_PROFILE_SHARE_EXTENSION: ${{ secrets.IOS_DEVELOPMENT_PROVISIONING_PROFILE_SHARE_EXTENSION }}
IOS_DEVELOPMENT_PROVISIONING_PROFILE_WIDGET_EXTENSION: ${{ secrets.IOS_DEVELOPMENT_PROVISIONING_PROFILE_WIDGET_EXTENSION }}
FASTLANE_TEAM_ID: ${{ secrets.FASTLANE_TEAM_ID }}
with:
ref: ${{ needs.bump_version.outputs.ref }}
environment: production

View File

@@ -1,6 +1,6 @@
{
"name": "@immich/cli",
"version": "2.2.100",
"version": "2.2.99",
"description": "Command Line Interface (CLI) for Immich",
"type": "module",
"exports": "./dist/index.js",

View File

@@ -1,8 +1,4 @@
[
{
"label": "v2.2.2",
"url": "https://docs.v2.2.2.archive.immich.app"
},
{
"label": "v2.2.1",
"url": "https://docs.v2.2.1.archive.immich.app"

View File

@@ -1,6 +1,6 @@
{
"name": "immich-e2e",
"version": "2.2.2",
"version": "2.2.1",
"description": "",
"main": "index.js",
"type": "module",

View File

@@ -1140,16 +1140,6 @@ describe('/asset', () => {
},
},
},
{
input: 'metadata/gps-position/empty_gps.jpg',
expected: {
type: AssetTypeEnum.Image,
exifInfo: {
latitude: null,
longitude: null,
},
},
},
];
it.each(tests)(`should upload and generate a thumbnail for different file types`, async ({ input, expected }) => {

View File

@@ -1,10 +1,8 @@
from typing import Any
import cv2
import numpy as np
from numpy.typing import NDArray
from PIL import Image
from rapidocr.ch_ppocr_det.utils import DBPostProcess
from rapidocr.ch_ppocr_det import TextDetector as RapidTextDetector
from rapidocr.inference_engine.base import FileInfo, InferSession
from rapidocr.utils import DownloadFile, DownloadFileInput
from rapidocr.utils.typings import EngineType, LangDet, OCRVersion, TaskType
@@ -12,10 +10,11 @@ from rapidocr.utils.typings import ModelType as RapidModelType
from immich_ml.config import log
from immich_ml.models.base import InferenceModel
from immich_ml.models.transforms import decode_cv2
from immich_ml.schemas import ModelFormat, ModelSession, ModelTask, ModelType
from immich_ml.sessions.ort import OrtSession
from .schemas import TextDetectionOutput
from .schemas import OcrOptions, TextDetectionOutput
class TextDetector(InferenceModel):
@@ -25,20 +24,13 @@ class TextDetector(InferenceModel):
def __init__(self, model_name: str, **model_kwargs: Any) -> None:
super().__init__(model_name, **model_kwargs, model_format=ModelFormat.ONNX)
self.max_resolution = 736
self.mean = np.array([0.5, 0.5, 0.5], dtype=np.float32)
self.std_inv = np.float32(1.0) / (np.array([0.5, 0.5, 0.5], dtype=np.float32) * 255.0)
self.min_score = 0.5
self.score_mode = "fast"
self._empty: TextDetectionOutput = {
"image": np.empty(0, dtype=np.float32),
"boxes": np.empty(0, dtype=np.float32),
"scores": np.empty(0, dtype=np.float32),
}
self.postprocess = DBPostProcess(
thresh=0.3,
box_thresh=model_kwargs.get("minScore", 0.5),
max_candidates=1000,
unclip_ratio=1.6,
use_dilation=True,
score_mode="fast",
)
def _download(self) -> None:
model_info = InferSession.get_model_url(
@@ -60,65 +52,35 @@ class TextDetector(InferenceModel):
def _load(self) -> ModelSession:
# TODO: support other runtime sessions
return OrtSession(self.model_path)
session = OrtSession(self.model_path)
self.model = RapidTextDetector(
OcrOptions(
session=session.session,
limit_side_len=self.max_resolution,
limit_type="min",
box_thresh=self.min_score,
score_mode=self.score_mode,
)
)
return session
# partly adapted from RapidOCR
def _predict(self, inputs: Image.Image) -> TextDetectionOutput:
w, h = inputs.size
if w < 32 or h < 32:
return self._empty
out = self.session.run(None, {"x": self._transform(inputs)})[0]
boxes, scores = self.postprocess(out, (h, w))
if len(boxes) == 0:
def _predict(self, inputs: bytes | Image.Image) -> TextDetectionOutput:
results = self.model(decode_cv2(inputs))
if results.boxes is None or results.scores is None or results.img is None:
return self._empty
return {
"boxes": self.sorted_boxes(boxes),
"scores": np.array(scores, dtype=np.float32),
"image": results.img,
"boxes": np.array(results.boxes, dtype=np.float32),
"scores": np.array(results.scores, dtype=np.float32),
}
# adapted from RapidOCR
def _transform(self, img: Image.Image) -> NDArray[np.float32]:
if img.height < img.width:
ratio = float(self.max_resolution) / img.height
else:
ratio = float(self.max_resolution) / img.width
resize_h = int(img.height * ratio)
resize_w = int(img.width * ratio)
resize_h = int(round(resize_h / 32) * 32)
resize_w = int(round(resize_w / 32) * 32)
resized_img = img.resize((int(resize_w), int(resize_h)), resample=Image.Resampling.LANCZOS)
img_np: NDArray[np.float32] = cv2.cvtColor(np.array(resized_img, dtype=np.float32), cv2.COLOR_RGB2BGR) # type: ignore
img_np -= self.mean
img_np *= self.std_inv
img_np = np.transpose(img_np, (2, 0, 1))
return np.expand_dims(img_np, axis=0)
def sorted_boxes(self, dt_boxes: NDArray[np.float32]) -> NDArray[np.float32]:
if len(dt_boxes) == 0:
return dt_boxes
# Sort by y, then identify lines, then sort by (line, x)
y_order = np.argsort(dt_boxes[:, 0, 1], kind="stable")
sorted_y = dt_boxes[y_order, 0, 1]
line_ids = np.empty(len(dt_boxes), dtype=np.int32)
line_ids[0] = 0
np.cumsum(np.abs(np.diff(sorted_y)) >= 10, out=line_ids[1:])
# Create composite sort key for final ordering
# Shift line_ids by large factor, add x for tie-breaking
sort_key = line_ids[y_order] * 1e6 + dt_boxes[y_order, 0, 0]
final_order = np.argsort(sort_key, kind="stable")
sorted_boxes: NDArray[np.float32] = dt_boxes[y_order[final_order]]
return sorted_boxes
def configure(self, **kwargs: Any) -> None:
if (max_resolution := kwargs.get("maxResolution")) is not None:
self.max_resolution = max_resolution
self.model.limit_side_len = max_resolution
if (min_score := kwargs.get("minScore")) is not None:
self.postprocess.box_thresh = min_score
self.min_score = min_score
self.model.postprocess_op.box_thresh = min_score
if (score_mode := kwargs.get("scoreMode")) is not None:
self.postprocess.score_mode = score_mode
self.score_mode = score_mode
self.model.postprocess_op.score_mode = score_mode

View File

@@ -1,8 +1,9 @@
from typing import Any
import cv2
import numpy as np
from numpy.typing import NDArray
from PIL import Image
from PIL.Image import Image
from rapidocr.ch_ppocr_rec import TextRecInput
from rapidocr.ch_ppocr_rec import TextRecognizer as RapidTextRecognizer
from rapidocr.inference_engine.base import FileInfo, InferSession
@@ -13,7 +14,6 @@ from rapidocr.utils.vis_res import VisRes
from immich_ml.config import log, settings
from immich_ml.models.base import InferenceModel
from immich_ml.models.transforms import pil_to_cv2
from immich_ml.schemas import ModelFormat, ModelSession, ModelTask, ModelType
from immich_ml.sessions.ort import OrtSession
@@ -65,16 +65,17 @@ class TextRecognizer(InferenceModel):
)
return session
def _predict(self, img: Image.Image, texts: TextDetectionOutput) -> TextRecognitionOutput:
boxes, box_scores = texts["boxes"], texts["scores"]
def _predict(self, _: Image, texts: TextDetectionOutput) -> TextRecognitionOutput:
boxes, img, box_scores = texts["boxes"], texts["image"], texts["scores"]
if boxes.shape[0] == 0:
return self._empty
rec = self.model(TextRecInput(img=self.get_crop_img_list(img, boxes)))
if rec.txts is None:
return self._empty
boxes[:, :, 0] /= img.width
boxes[:, :, 1] /= img.height
height, width = img.shape[0:2]
boxes[:, :, 0] /= width
boxes[:, :, 1] /= height
text_scores = np.array(rec.scores)
valid_text_score_idx = text_scores > self.min_score
@@ -86,7 +87,7 @@ class TextRecognizer(InferenceModel):
"textScore": text_scores[valid_text_score_idx],
}
def get_crop_img_list(self, img: Image.Image, boxes: NDArray[np.float32]) -> list[NDArray[np.uint8]]:
def get_crop_img_list(self, img: NDArray[np.float32], boxes: NDArray[np.float32]) -> list[NDArray[np.float32]]:
img_crop_width = np.maximum(
np.linalg.norm(boxes[:, 1] - boxes[:, 0], axis=1), np.linalg.norm(boxes[:, 2] - boxes[:, 3], axis=1)
).astype(np.int32)
@@ -97,55 +98,22 @@ class TextRecognizer(InferenceModel):
pts_std[:, 1:3, 0] = img_crop_width[:, None]
pts_std[:, 2:4, 1] = img_crop_height[:, None]
img_crop_sizes = np.stack([img_crop_width, img_crop_height], axis=1)
all_coeffs = self._get_perspective_transform(pts_std, boxes)
imgs: list[NDArray[np.uint8]] = []
for coeffs, dst_size in zip(all_coeffs, img_crop_sizes):
dst_img = img.transform(
size=tuple(dst_size),
method=Image.Transform.PERSPECTIVE,
data=tuple(coeffs),
resample=Image.Resampling.BICUBIC,
)
dst_width, dst_height = dst_img.size
img_crop_sizes = np.stack([img_crop_width, img_crop_height], axis=1).tolist()
imgs: list[NDArray[np.float32]] = []
for box, pts_std, dst_size in zip(list(boxes), list(pts_std), img_crop_sizes):
M = cv2.getPerspectiveTransform(box, pts_std)
dst_img: NDArray[np.float32] = cv2.warpPerspective(
img,
M,
dst_size,
borderMode=cv2.BORDER_REPLICATE,
flags=cv2.INTER_CUBIC,
) # type: ignore
dst_height, dst_width = dst_img.shape[0:2]
if dst_height * 1.0 / dst_width >= 1.5:
dst_img = dst_img.rotate(90, expand=True)
imgs.append(pil_to_cv2(dst_img))
dst_img = np.rot90(dst_img)
imgs.append(dst_img)
return imgs
def _get_perspective_transform(self, src: NDArray[np.float32], dst: NDArray[np.float32]) -> NDArray[np.float32]:
N = src.shape[0]
x, y = src[:, :, 0], src[:, :, 1]
u, v = dst[:, :, 0], dst[:, :, 1]
A = np.zeros((N, 8, 9), dtype=np.float32)
# Fill even rows (0, 2, 4, 6): [x, y, 1, 0, 0, 0, -u*x, -u*y, -u]
A[:, ::2, 0] = x
A[:, ::2, 1] = y
A[:, ::2, 2] = 1
A[:, ::2, 6] = -u * x
A[:, ::2, 7] = -u * y
A[:, ::2, 8] = -u
# Fill odd rows (1, 3, 5, 7): [0, 0, 0, x, y, 1, -v*x, -v*y, -v]
A[:, 1::2, 3] = x
A[:, 1::2, 4] = y
A[:, 1::2, 5] = 1
A[:, 1::2, 6] = -v * x
A[:, 1::2, 7] = -v * y
A[:, 1::2, 8] = -v
# Solve using SVD for all matrices at once
_, _, Vt = np.linalg.svd(A)
H = Vt[:, -1, :].reshape(N, 3, 3)
H = H / H[:, 2:3, 2:3]
# Extract the 8 coefficients for each transformation
return np.column_stack(
[H[:, 0, 0], H[:, 0, 1], H[:, 0, 2], H[:, 1, 0], H[:, 1, 1], H[:, 1, 2], H[:, 2, 0], H[:, 2, 1]]
) # pyright: ignore[reportReturnType]
def configure(self, **kwargs: Any) -> None:
self.min_score = kwargs.get("minScore", self.min_score)

View File

@@ -7,6 +7,7 @@ from typing_extensions import TypedDict
class TextDetectionOutput(TypedDict):
image: npt.NDArray[np.float32]
boxes: npt.NDArray[np.float32]
scores: npt.NDArray[np.float32]

View File

@@ -1,6 +1,6 @@
[project]
name = "immich-ml"
version = "2.2.2"
version = "2.2.1"
description = ""
authors = [{ name = "Hau Tran", email = "alex.tran1502@gmail.com" }]
requires-python = ">=3.10,<4.0"

View File

@@ -43,8 +43,8 @@ class BackgroundEngineLock(context: Context) : BackgroundWorkerLockApi, ImmichPl
override fun onAttachedToEngine(binding: FlutterPlugin.FlutterPluginBinding) {
super.onAttachedToEngine(binding)
engineCount.incrementAndGet()
checkAndEnforceBackgroundLock(binding.applicationContext)
engineCount.incrementAndGet()
Log.i(TAG, "Flutter engine attached. Attached Engines count: $engineCount")
}

View File

@@ -295,12 +295,12 @@ class BackgroundWorkerFlutterApi(private val binaryMessenger: BinaryMessenger, p
}
}
}
fun onAndroidUpload(maxMinutesArg: Long?, callback: (Result<Unit>) -> Unit)
fun onAndroidUpload(callback: (Result<Unit>) -> Unit)
{
val separatedMessageChannelSuffix = if (messageChannelSuffix.isNotEmpty()) ".$messageChannelSuffix" else ""
val channelName = "dev.flutter.pigeon.immich_mobile.BackgroundWorkerFlutterApi.onAndroidUpload$separatedMessageChannelSuffix"
val channel = BasicMessageChannel<Any?>(binaryMessenger, channelName, codec)
channel.send(listOf(maxMinutesArg)) {
channel.send(null) {
if (it is List<*>) {
if (it.size > 1) {
callback(Result.failure(FlutterError(it[0] as String, it[1] as String, it[2] as String?)))

View File

@@ -107,7 +107,7 @@ class BackgroundWorker(context: Context, params: WorkerParameters) :
* This method acts as a bridge between the native Android background task system and Flutter.
*/
override fun onInitialized() {
flutterApi?.onAndroidUpload(maxMinutesArg = 20) { handleHostResult(it) }
flutterApi?.onAndroidUpload { handleHostResult(it) }
}
// TODO: Move this to a separate NotificationManager class

View File

@@ -5,10 +5,8 @@ import android.provider.MediaStore
import android.util.Log
import androidx.work.BackoffPolicy
import androidx.work.Constraints
import androidx.work.ExistingPeriodicWorkPolicy
import androidx.work.ExistingWorkPolicy
import androidx.work.OneTimeWorkRequestBuilder
import androidx.work.PeriodicWorkRequestBuilder
import androidx.work.OneTimeWorkRequest
import androidx.work.WorkManager
import io.flutter.embedding.engine.FlutterEngineCache
import java.util.concurrent.TimeUnit
@@ -20,7 +18,6 @@ class BackgroundWorkerApiImpl(context: Context) : BackgroundWorkerFgHostApi {
override fun enable() {
enqueueMediaObserver(ctx)
enqueuePeriodicWorker(ctx)
}
override fun saveNotificationMessage(title: String, body: String) {
@@ -30,14 +27,12 @@ class BackgroundWorkerApiImpl(context: Context) : BackgroundWorkerFgHostApi {
override fun configure(settings: BackgroundWorkerSettings) {
BackgroundWorkerPreferences(ctx).updateSettings(settings)
enqueueMediaObserver(ctx)
enqueuePeriodicWorker(ctx)
}
override fun disable() {
WorkManager.getInstance(ctx).apply {
cancelUniqueWork(OBSERVER_WORKER_NAME)
cancelUniqueWork(BACKGROUND_WORKER_NAME)
cancelUniqueWork(PERIODIC_WORKER_NAME)
}
Log.i(TAG, "Cancelled background upload tasks")
}
@@ -45,7 +40,6 @@ class BackgroundWorkerApiImpl(context: Context) : BackgroundWorkerFgHostApi {
companion object {
private const val BACKGROUND_WORKER_NAME = "immich/BackgroundWorkerV1"
private const val OBSERVER_WORKER_NAME = "immich/MediaObserverV1"
private const val PERIODIC_WORKER_NAME = "immich/PeriodicBackgroundWorkerV1"
const val ENGINE_CACHE_KEY = "immich::background_worker::engine"
@@ -61,7 +55,7 @@ class BackgroundWorkerApiImpl(context: Context) : BackgroundWorkerFgHostApi {
setRequiresCharging(settings.requiresCharging)
}.build()
val work = OneTimeWorkRequestBuilder<MediaObserver>()
val work = OneTimeWorkRequest.Builder(MediaObserver::class.java)
.setConstraints(constraints)
.build()
WorkManager.getInstance(ctx)
@@ -73,30 +67,10 @@ class BackgroundWorkerApiImpl(context: Context) : BackgroundWorkerFgHostApi {
)
}
fun enqueuePeriodicWorker(ctx: Context) {
val settings = BackgroundWorkerPreferences(ctx).getSettings()
val constraints = Constraints.Builder().apply {
setRequiresCharging(settings.requiresCharging)
}.build()
val work =
PeriodicWorkRequestBuilder<PeriodicWorker>(
1,
TimeUnit.HOURS,
15,
TimeUnit.MINUTES
).setConstraints(constraints)
.build()
WorkManager.getInstance(ctx)
.enqueueUniquePeriodicWork(PERIODIC_WORKER_NAME, ExistingPeriodicWorkPolicy.UPDATE, work)
Log.i(TAG, "Enqueued periodic background worker with name: $PERIODIC_WORKER_NAME")
}
fun enqueueBackgroundWorker(ctx: Context) {
val constraints = Constraints.Builder().setRequiresBatteryNotLow(true).build()
val work = OneTimeWorkRequestBuilder<BackgroundWorker>()
val work = OneTimeWorkRequest.Builder(BackgroundWorker::class.java)
.setConstraints(constraints)
.setBackoffCriteria(BackoffPolicy.EXPONENTIAL, 1, TimeUnit.MINUTES)
.build()

View File

@@ -1,16 +0,0 @@
package app.alextran.immich.background
import android.content.Context
import android.util.Log
import androidx.work.Worker
import androidx.work.WorkerParameters
class PeriodicWorker(context: Context, params: WorkerParameters) : Worker(context, params) {
private val ctx: Context = context.applicationContext
override fun doWork(): Result {
Log.i("PeriodicWorker", "Periodic worker triggered, starting background worker")
BackgroundWorkerApiImpl.enqueueBackgroundWorker(ctx)
return Result.success()
}
}

View File

@@ -35,8 +35,8 @@ platform :android do
task: 'bundle',
build_type: 'Release',
properties: {
"android.injected.version.code" => 3025,
"android.injected.version.name" => "2.2.2",
"android.injected.version.code" => 3024,
"android.injected.version.name" => "2.2.1",
}
)
upload_to_play_store(skip_upload_apk: true, skip_upload_images: true, skip_upload_screenshots: true, aab: '../build/app/outputs/bundle/release/app-release.aab')

View File

@@ -295,7 +295,7 @@ class BackgroundWorkerBgHostApiSetup {
/// Generated protocol from Pigeon that represents Flutter messages that can be called from Swift.
protocol BackgroundWorkerFlutterApiProtocol {
func onIosUpload(isRefresh isRefreshArg: Bool, maxSeconds maxSecondsArg: Int64?, completion: @escaping (Result<Void, PigeonError>) -> Void)
func onAndroidUpload(maxMinutes maxMinutesArg: Int64?, completion: @escaping (Result<Void, PigeonError>) -> Void)
func onAndroidUpload(completion: @escaping (Result<Void, PigeonError>) -> Void)
func cancel(completion: @escaping (Result<Void, PigeonError>) -> Void)
}
class BackgroundWorkerFlutterApi: BackgroundWorkerFlutterApiProtocol {
@@ -326,10 +326,10 @@ class BackgroundWorkerFlutterApi: BackgroundWorkerFlutterApiProtocol {
}
}
}
func onAndroidUpload(maxMinutes maxMinutesArg: Int64?, completion: @escaping (Result<Void, PigeonError>) -> Void) {
func onAndroidUpload(completion: @escaping (Result<Void, PigeonError>) -> Void) {
let channelName: String = "dev.flutter.pigeon.immich_mobile.BackgroundWorkerFlutterApi.onAndroidUpload\(messageChannelSuffix)"
let channel = FlutterBasicMessageChannel(name: channelName, binaryMessenger: binaryMessenger, codec: codec)
channel.sendMessage([maxMinutesArg] as [Any?]) { response in
channel.sendMessage(nil) { response in
guard let listResponse = response as? [Any?] else {
completion(.failure(createConnectionError(withChannelName: channelName)))
return

View File

@@ -169,7 +169,7 @@ platform :ios do
targets: ["Runner", "ShareExtension", "WidgetExtension"]
)
increment_version_number(
version_number: "2.2.2"
version_number: "2.2.1"
)
increment_build_number(
build_number: latest_testflight_build_number + 1,

View File

@@ -122,54 +122,46 @@ class BackgroundWorkerBgService extends BackgroundWorkerFlutterApi {
}
@override
Future<void> onAndroidUpload(int? maxMinutes) async {
final hashTimeout = Duration(minutes: _isBackupEnabled ? 3 : 6);
final backupTimeout = maxMinutes != null ? Duration(minutes: maxMinutes - 1) : null;
return _backgroundLoop(
hashTimeout: hashTimeout,
backupTimeout: backupTimeout,
debugLabel: 'Android background upload',
);
Future<void> onAndroidUpload() async {
_logger.info('Android background processing started');
final sw = Stopwatch()..start();
try {
if (!await _syncAssets(hashTimeout: Duration(minutes: _isBackupEnabled ? 3 : 6))) {
_logger.warning("Remote sync did not complete successfully, skipping backup");
return;
}
await _handleBackup();
} catch (error, stack) {
_logger.severe("Failed to complete Android background processing", error, stack);
} finally {
sw.stop();
_logger.info("Android background processing completed in ${sw.elapsed.inSeconds}s");
await _cleanup();
}
}
@override
Future<void> onIosUpload(bool isRefresh, int? maxSeconds) async {
final hashTimeout = isRefresh ? const Duration(seconds: 5) : Duration(minutes: _isBackupEnabled ? 3 : 6);
final backupTimeout = maxSeconds != null ? Duration(seconds: maxSeconds - 1) : null;
return _backgroundLoop(hashTimeout: hashTimeout, backupTimeout: backupTimeout, debugLabel: 'iOS background upload');
}
Future<void> _backgroundLoop({
required Duration hashTimeout,
required Duration? backupTimeout,
required String debugLabel,
}) async {
_logger.info(
'$debugLabel started hashTimeout: ${hashTimeout.inSeconds}s, backupTimeout: ${backupTimeout?.inMinutes ?? '~'}m',
);
_logger.info('iOS background upload started with maxSeconds: ${maxSeconds}s');
final sw = Stopwatch()..start();
try {
if (!await _syncAssets(hashTimeout: hashTimeout)) {
final timeout = isRefresh ? const Duration(seconds: 5) : Duration(minutes: _isBackupEnabled ? 3 : 6);
if (!await _syncAssets(hashTimeout: timeout)) {
_logger.warning("Remote sync did not complete successfully, skipping backup");
return;
}
final backupFuture = _handleBackup();
if (backupTimeout != null) {
await backupFuture.timeout(
backupTimeout,
onTimeout: () {
_cancellationToken.cancel();
},
);
if (maxSeconds != null) {
await backupFuture.timeout(Duration(seconds: maxSeconds - 1), onTimeout: () {});
} else {
await backupFuture;
}
} catch (error, stack) {
_logger.severe("Failed to complete $debugLabel", error, stack);
_logger.severe("Failed to complete iOS background upload", error, stack);
} finally {
sw.stop();
_logger.info("$debugLabel completed in ${sw.elapsed.inSeconds}s");
_logger.info("iOS background upload completed in ${sw.elapsed.inSeconds}s");
await _cleanup();
}
}

View File

@@ -132,8 +132,7 @@ class SyncStreamService {
return;
// SyncCompleteV1 is used to signal the completion of the sync process. Cleanup stale assets and signal completion
case SyncEntityType.syncCompleteV1:
return;
// return _syncStreamRepository.pruneAssets();
return _syncStreamRepository.pruneAssets();
// Request to reset the client state. Clear everything related to remote entities
case SyncEntityType.syncResetV1:
return _syncStreamRepository.reset();

View File

@@ -612,12 +612,15 @@ class SyncStreamRepository extends DriftDatabaseRepository {
final validUsers = {currentUserId, ...partnerIds.nonNulls};
// Asset is not owned by the current user or any of their partners and is not part of any (shared) album
// Asset is not owned by the current user or any of their partners and is not part of any (shared) album or memory
// Likely a stale asset that was previously shared but has been removed
await _db.remoteAssetEntity.deleteWhere((asset) {
return asset.ownerId.isNotIn(validUsers) &
asset.id.isNotInQuery(
_db.remoteAlbumAssetEntity.selectOnly()..addColumns([_db.remoteAlbumAssetEntity.assetId]),
) &
asset.id.isNotInQuery(
_db.memoryAssetEntity.selectOnly()..addColumns([_db.memoryAssetEntity.assetId]),
);
});
});

View File

@@ -273,7 +273,7 @@ abstract class BackgroundWorkerFlutterApi {
Future<void> onIosUpload(bool isRefresh, int? maxSeconds);
Future<void> onAndroidUpload(int? maxMinutes);
Future<void> onAndroidUpload();
Future<void> cancel();
@@ -327,14 +327,8 @@ abstract class BackgroundWorkerFlutterApi {
pigeonVar_channel.setMessageHandler(null);
} else {
pigeonVar_channel.setMessageHandler((Object? message) async {
assert(
message != null,
'Argument for dev.flutter.pigeon.immich_mobile.BackgroundWorkerFlutterApi.onAndroidUpload was null.',
);
final List<Object?> args = (message as List<Object?>?)!;
final int? arg_maxMinutes = (args[0] as int?);
try {
await api.onAndroidUpload(arg_maxMinutes);
await api.onAndroidUpload();
return wrapResponse(empty: true);
} on PlatformException catch (e) {
return wrapResponse(error: e);

View File

@@ -3,7 +3,7 @@ Immich API
This Dart package is automatically generated by the [OpenAPI Generator](https://openapi-generator.tech) project:
- API version: 2.2.2
- API version: 2.2.1
- Generator version: 7.8.0
- Build package: org.openapitools.codegen.languages.DartClientCodegen

View File

@@ -47,7 +47,7 @@ abstract class BackgroundWorkerFlutterApi {
// Android Only: Called when the Android background upload is triggered
@async
void onAndroidUpload(int? maxMinutes);
void onAndroidUpload();
@async
void cancel();

View File

@@ -2,7 +2,7 @@ name: immich_mobile
description: Immich - selfhosted backup media file on mobile phone
publish_to: 'none'
version: 2.2.2+3025
version: 2.2.1+3024
environment:
sdk: '>=3.8.0 <4.0.0'

View File

@@ -0,0 +1,266 @@
import 'package:drift/native.dart';
import 'package:flutter_test/flutter_test.dart';
import 'package:immich_mobile/infrastructure/repositories/db.repository.dart';
import 'package:immich_mobile/infrastructure/repositories/sync_stream.repository.dart';
import 'package:openapi/api.dart';
/// This test reproduces the bug where pruneAssets() deletes assets that are part of memories,
/// causing foreign key constraint failures when trying to insert memory-asset relationships.
void main() {
late DbRepository db;
late SyncStreamRepository sut;
setUp(() async {
db = DbRepository(NativeDatabase.memory());
sut = SyncStreamRepository(db);
// Set up test data: Create a user and a partner
await sut.updateAuthUsersV1([
SyncAuthUserV1(
email: 'current-user@test.com',
id: 'user-1',
isAdmin: false,
name: 'Current User',
avatarColor: null,
hasProfileImage: false,
profileChangedAt: DateTime(2025),
),
]);
await sut.updateUsersV1([
SyncUserV1(
deletedAt: null,
email: 'partner@test.com',
id: 'partner-1',
name: 'Partner User',
avatarColor: null,
hasProfileImage: false,
profileChangedAt: DateTime(2025),
),
]);
await sut.updatePartnerV1([
SyncPartnerV1(
inTimeline: true,
sharedById: 'partner-1',
sharedWithId: 'user-1',
),
]);
});
tearDown(() async {
await db.close();
});
group('pruneAssets - Memory Asset Bug', () {
test('BEFORE FIX: pruneAssets() should NOT delete assets that are part of memories', () async {
// Step 1: Create an asset owned by someone else (not current user or partner)
await sut.updateAssetsV1([
SyncAssetV1(
checksum: 'checksum-1'.codeUnits,
deletedAt: null,
deviceAssetId: 'device-1',
deviceId: 'device-1',
duplicateId: null,
duration: null,
fileCreatedAt: DateTime(2025, 1, 1),
fileModifiedAt: DateTime(2025, 1, 1),
id: 'asset-shared-memory',
isArchived: false,
isFavorite: false,
isOffline: false,
isTrashed: false,
libraryId: null,
livePhotoVideoId: null,
localDateTime: DateTime(2025, 1, 1),
originalFileName: 'shared-memory.jpg',
// Asset owned by someone else - should be pruned if not in album/memory
ownerId: 'other-user-not-partner',
resized: true,
stackId: null,
thumbhash: null,
type: AssetTypeEnum.IMAGE,
updatedAt: DateTime(2025, 1, 1),
visibility: AssetVisibility.public_,
),
]);
// Step 2: Create a memory owned by current user
await sut.updateMemoriesV1([
SyncMemoryV1(
createdAt: DateTime(2025, 1, 1),
data: {'year': 2025, 'title': 'Test Memory'},
deletedAt: null,
hideAt: null,
id: 'memory-1',
isSaved: false,
memoryAt: DateTime(2025, 1, 1),
ownerId: 'user-1',
seenAt: null,
showAt: DateTime(2025, 1, 1),
type: MemoryType.onThisDay,
updatedAt: DateTime(2025, 1, 1),
),
]);
// Step 3: Link the shared asset to the memory
await sut.updateMemoryAssetsV1([
SyncMemoryAssetV1(
assetId: 'asset-shared-memory',
memoryId: 'memory-1',
),
]);
// Verify the asset and memory-asset relationship exist
final assetsBefore = await db.remoteAssetEntity.select().get();
final memoryAssetsBefore = await db.memoryAssetEntity.select().get();
expect(assetsBefore.length, 1);
expect(assetsBefore.first.id, 'asset-shared-memory');
expect(memoryAssetsBefore.length, 1);
// Step 4: Call pruneAssets() - This is where the bug happens
await sut.pruneAssets();
// Step 5: Verify the asset is NOT deleted (because it's in a memory)
final assetsAfter = await db.remoteAssetEntity.select().get();
expect(
assetsAfter.length,
1,
reason: 'Asset should NOT be pruned because it is part of a memory',
);
expect(assetsAfter.first.id, 'asset-shared-memory');
// Step 6: Verify we can still work with memory-asset relationships
// This simulates receiving more sync events after pruning
await expectLater(
sut.updateMemoryAssetsV1([
SyncMemoryAssetV1(
assetId: 'asset-shared-memory',
memoryId: 'memory-1',
),
]),
completes,
reason: 'Should not throw foreign key constraint error',
);
});
test('pruneAssets() SHOULD delete assets not in albums or memories', () async {
// Step 1: Create an asset that's truly orphaned (not in album or memory)
await sut.updateAssetsV1([
SyncAssetV1(
checksum: 'checksum-2'.codeUnits,
deletedAt: null,
deviceAssetId: 'device-2',
deviceId: 'device-2',
duplicateId: null,
duration: null,
fileCreatedAt: DateTime(2025, 1, 1),
fileModifiedAt: DateTime(2025, 1, 1),
id: 'asset-orphaned',
isArchived: false,
isFavorite: false,
isOffline: false,
isTrashed: false,
libraryId: null,
livePhotoVideoId: null,
localDateTime: DateTime(2025, 1, 1),
originalFileName: 'orphaned.jpg',
ownerId: 'other-user-not-partner',
resized: true,
stackId: null,
thumbhash: null,
type: AssetTypeEnum.IMAGE,
updatedAt: DateTime(2025, 1, 1),
visibility: AssetVisibility.public_,
),
]);
// Verify the asset exists
final assetsBefore = await db.remoteAssetEntity.select().get();
expect(assetsBefore.length, 1);
// Call pruneAssets()
await sut.pruneAssets();
// Verify the orphaned asset IS deleted
final assetsAfter = await db.remoteAssetEntity.select().get();
expect(
assetsAfter.length,
0,
reason: 'Orphaned asset should be pruned',
);
});
test('pruneAssets() should NOT delete assets in albums', () async {
// Step 1: Create an asset and an album
await sut.updateAssetsV1([
SyncAssetV1(
checksum: 'checksum-3'.codeUnits,
deletedAt: null,
deviceAssetId: 'device-3',
deviceId: 'device-3',
duplicateId: null,
duration: null,
fileCreatedAt: DateTime(2025, 1, 1),
fileModifiedAt: DateTime(2025, 1, 1),
id: 'asset-in-album',
isArchived: false,
isFavorite: false,
isOffline: false,
isTrashed: false,
libraryId: null,
livePhotoVideoId: null,
localDateTime: DateTime(2025, 1, 1),
originalFileName: 'in-album.jpg',
ownerId: 'other-user-not-partner',
resized: true,
stackId: null,
thumbhash: null,
type: AssetTypeEnum.IMAGE,
updatedAt: DateTime(2025, 1, 1),
visibility: AssetVisibility.public_,
),
]);
await sut.updateAlbumsV1([
SyncAlbumV1(
albumName: 'Test Album',
albumThumbnailAssetId: null,
createdAt: DateTime(2025, 1, 1),
deletedAt: null,
description: 'Test',
id: 'album-1',
isActivityEnabled: false,
lastModifiedAssetTimestamp: DateTime(2025, 1, 1),
order: AlbumUserRole.editor,
ownerId: 'user-1',
startDate: DateTime(2025, 1, 1),
endDate: DateTime(2025, 1, 2),
updatedAt: DateTime(2025, 1, 1),
),
]);
await sut.updateAlbumToAssetsV1([
SyncAlbumToAssetV1(
albumId: 'album-1',
assetId: 'asset-in-album',
),
]);
// Verify setup
final assetsBefore = await db.remoteAssetEntity.select().get();
expect(assetsBefore.length, 1);
// Call pruneAssets()
await sut.pruneAssets();
// Verify asset is NOT deleted (protected by album membership)
final assetsAfter = await db.remoteAssetEntity.select().get();
expect(
assetsAfter.length,
1,
reason: 'Asset should NOT be pruned because it is in an album',
);
});
});
}

View File

@@ -10006,7 +10006,7 @@
"info": {
"title": "Immich",
"description": "Immich API",
"version": "2.2.2",
"version": "2.2.1",
"contact": {}
},
"tags": [],

View File

@@ -1,6 +1,6 @@
{
"name": "@immich/sdk",
"version": "2.2.2",
"version": "2.2.1",
"description": "Auto-generated TypeScript SDK for the Immich API",
"type": "module",
"main": "./build/index.js",

View File

@@ -1,6 +1,6 @@
/**
* Immich
* 2.2.2
* 2.2.1
* DO NOT MODIFY - This file has been generated using oazapfts.
* See https://www.npmjs.com/package/oazapfts
*/

View File

@@ -1,6 +1,6 @@
{
"name": "immich",
"version": "2.2.2",
"version": "2.2.1",
"description": "",
"author": "",
"private": true,

View File

@@ -236,8 +236,8 @@ export class MetadataService extends BaseService {
latitude: number | null = null,
longitude: number | null = null;
if (this.hasGeo(exifTags)) {
latitude = Number(exifTags.GPSLatitude);
longitude = Number(exifTags.GPSLongitude);
latitude = exifTags.GPSLatitude;
longitude = exifTags.GPSLongitude;
if (reverseGeocoding.enabled) {
geo = await this.mapRepository.reverseGeocode({ latitude, longitude });
}
@@ -894,10 +894,12 @@ export class MetadataService extends BaseService {
};
}
private hasGeo(tags: ImmichTags) {
const lat = Number(tags.GPSLatitude);
const lng = Number(tags.GPSLongitude);
return !Number.isNaN(lat) && !Number.isNaN(lng) && (lat !== 0 || lng !== 0);
private hasGeo(tags: ImmichTags): tags is ImmichTags & { GPSLatitude: number; GPSLongitude: number } {
return (
tags.GPSLatitude !== undefined &&
tags.GPSLongitude !== undefined &&
(tags.GPSLatitude !== 0 || tags.GPSLatitude !== 0)
);
}
private getAutoStackId(tags: ImmichTags | null): string | null {

View File

@@ -1,6 +1,6 @@
{
"name": "immich-web",
"version": "2.2.2",
"version": "2.2.1",
"license": "GNU Affero General Public License version 3",
"type": "module",
"scripts": {

View File

@@ -30,10 +30,10 @@
let showSuggestions = $state(false);
let isSearchSuggestions = $state(false);
let selectedId: string | undefined = $state();
let isFocus = $state(false);
let close: (() => Promise<void>) | undefined;
const listboxId = generateId();
const searchTypeId = generateId();
onDestroy(() => {
searchStore.isSearchEnabled = false;
@@ -161,10 +161,12 @@
const openDropdown = () => {
showSuggestions = true;
isFocus = true;
};
const closeDropdown = () => {
showSuggestions = false;
isFocus = false;
searchHistoryBox?.clearSelection();
};
@@ -249,7 +251,6 @@
aria-activedescendant={selectedId ?? ''}
aria-expanded={showSuggestions && isSearchSuggestions}
aria-autocomplete="list"
aria-describedby={searchTypeId}
use:shortcuts={[
{ shortcut: { key: 'Escape' }, onShortcut: onEscape },
{ shortcut: { ctrl: true, shift: true, key: 'k' }, onShortcut: onFilterClick },
@@ -286,12 +287,12 @@
/>
</div>
{#if searchStore.isSearchEnabled}
{#if isFocus}
<div
id={searchTypeId}
class="absolute inset-y-0 flex items-center end-16"
class="absolute inset-y-0 flex items-center"
class:max-md:hidden={value}
class:end-28={value.length > 0}
class:end-16={isFocus}
class:end-28={isFocus && value.length > 0}
>
<p
class="bg-immich-primary text-white dark:bg-immich-dark-primary/90 dark:text-black/75 rounded-full px-3 py-1 text-xs"