diff --git a/fastanime/cli/commands/anilist/cmd.py b/fastanime/cli/commands/anilist/cmd.py
index f5dfc40..57e22a1 100644
--- a/fastanime/cli/commands/anilist/cmd.py
+++ b/fastanime/cli/commands/anilist/cmd.py
@@ -10,6 +10,7 @@ commands = {
"download": "download.download",
# "downloads": "downloads.downloads",
"auth": "auth.auth",
+ "stats": "stats.stats",
}
diff --git a/fastanime/cli/commands/anilist/commands/download.py b/fastanime/cli/commands/anilist/commands/download.py
index 391b763..30dcdd2 100644
--- a/fastanime/cli/commands/anilist/commands/download.py
+++ b/fastanime/cli/commands/anilist/commands/download.py
@@ -68,10 +68,10 @@ if TYPE_CHECKING:
epilog=examples.download,
)
@click.option(
- "--title",
- "-t",
+ "--title",
+ "-t",
shell_complete=anime_titles_shell_complete,
- help="Title of the anime to search for"
+ help="Title of the anime to search for",
)
@click.option(
"--episode-range",
@@ -239,7 +239,9 @@ def download(config: AppConfig, **options: "Unpack[DownloadOptions]"):
# Initialize services
feedback.info("Initializing services...")
- api_client, provider, selector, media_registry, download_service = _initialize_services(config)
+ api_client, provider, selector, media_registry, download_service = (
+ _initialize_services(config)
+ )
feedback.info(f"Using provider: {provider.__class__.__name__}")
feedback.info(f"Using media API: {config.general.media_api}")
feedback.info(f"Translation type: {config.stream.translation_type}")
@@ -256,16 +258,22 @@ def download(config: AppConfig, **options: "Unpack[DownloadOptions]"):
# Process each selected anime
for selected_anime in selected_anime_list:
- feedback.info(f"Processing: {selected_anime.title.english or selected_anime.title.romaji}")
+ feedback.info(
+ f"Processing: {selected_anime.title.english or selected_anime.title.romaji}"
+ )
feedback.info(f"AniList ID: {selected_anime.id}")
-
+
# Get available episodes from provider
- episodes_result = _get_available_episodes(provider, selected_anime, config, feedback)
+ episodes_result = _get_available_episodes(
+ provider, selected_anime, config, feedback
+ )
if not episodes_result:
- feedback.warning(f"No episodes found for {selected_anime.title.english or selected_anime.title.romaji}")
+ feedback.warning(
+ f"No episodes found for {selected_anime.title.english or selected_anime.title.romaji}"
+ )
_suggest_alternatives(selected_anime, provider, config, feedback)
continue
-
+
# Unpack the result
if len(episodes_result) == 2:
available_episodes, provider_anime_data = episodes_result
@@ -282,32 +290,51 @@ def download(config: AppConfig, **options: "Unpack[DownloadOptions]"):
feedback.warning("No episodes selected for download")
continue
- feedback.info(f"About to download {len(episodes_to_download)} episodes: {', '.join(episodes_to_download)}")
-
+ feedback.info(
+ f"About to download {len(episodes_to_download)} episodes: {', '.join(episodes_to_download)}"
+ )
+
# Test stream availability before attempting download (using provider anime data)
if episodes_to_download and provider_anime_data:
test_episode = episodes_to_download[0]
- feedback.info(f"Testing stream availability for episode {test_episode}...")
- success = _test_episode_stream_availability(provider, provider_anime_data, test_episode, config, feedback)
+ feedback.info(
+ f"Testing stream availability for episode {test_episode}..."
+ )
+ success = _test_episode_stream_availability(
+ provider, provider_anime_data, test_episode, config, feedback
+ )
if not success:
feedback.warning(f"Stream test failed for episode {test_episode}.")
feedback.info("Possible solutions:")
feedback.info("1. Try a different provider (check your config)")
feedback.info("2. Check if the episode number is correct")
feedback.info("3. Try a different translation type (sub/dub)")
- feedback.info("4. The anime might not be available on this provider")
-
+ feedback.info(
+ "4. The anime might not be available on this provider"
+ )
+
# Ask user if they want to continue anyway
- continue_anyway = input("\nContinue with download anyway? (y/N): ").strip().lower()
- if continue_anyway not in ['y', 'yes']:
+ continue_anyway = (
+ input("\nContinue with download anyway? (y/N): ")
+ .strip()
+ .lower()
+ )
+ if continue_anyway not in ["y", "yes"]:
feedback.info("Download cancelled by user")
continue
# Download episodes (using provider anime data if available, otherwise AniList data)
- anime_for_download = provider_anime_data if provider_anime_data else selected_anime
+ anime_for_download = (
+ provider_anime_data if provider_anime_data else selected_anime
+ )
_download_episodes(
- download_service, anime_for_download, episodes_to_download,
- quality, force_redownload, max_concurrent, feedback
+ download_service,
+ anime_for_download,
+ episodes_to_download,
+ quality,
+ force_redownload,
+ max_concurrent,
+ feedback,
)
# Show final statistics
@@ -333,18 +360,36 @@ def _validate_options(options: "DownloadOptions") -> None:
end_date_lesser = options.get("end_date_lesser")
# Score validation
- if score_greater is not None and score_lesser is not None and score_greater > score_lesser:
+ if (
+ score_greater is not None
+ and score_lesser is not None
+ and score_greater > score_lesser
+ ):
raise FastAnimeError("Minimum score cannot be higher than maximum score")
-
+
# Popularity validation
- if popularity_greater is not None and popularity_lesser is not None and popularity_greater > popularity_lesser:
- raise FastAnimeError("Minimum popularity cannot be higher than maximum popularity")
-
+ if (
+ popularity_greater is not None
+ and popularity_lesser is not None
+ and popularity_greater > popularity_lesser
+ ):
+ raise FastAnimeError(
+ "Minimum popularity cannot be higher than maximum popularity"
+ )
+
# Date validation
- if start_date_greater is not None and start_date_lesser is not None and start_date_greater > start_date_lesser:
+ if (
+ start_date_greater is not None
+ and start_date_lesser is not None
+ and start_date_greater > start_date_lesser
+ ):
raise FastAnimeError("Minimum start date cannot be after maximum start date")
-
- if end_date_greater is not None and end_date_lesser is not None and end_date_greater > end_date_lesser:
+
+ if (
+ end_date_greater is not None
+ and end_date_lesser is not None
+ and end_date_greater > end_date_lesser
+ ):
raise FastAnimeError("Minimum end date cannot be after maximum end date")
@@ -353,27 +398,47 @@ def _initialize_services(config: AppConfig) -> tuple:
api_client = create_api_client(config.general.media_api, config)
provider = create_provider(config.general.provider)
selector = create_selector(config)
- media_registry = MediaRegistryService(config.general.media_api, config.media_registry)
+ media_registry = MediaRegistryService(
+ config.general.media_api, config.media_registry
+ )
download_service = DownloadService(config, media_registry, provider)
-
+
return api_client, provider, selector, media_registry, download_service
-def _build_search_params(options: "DownloadOptions", config: AppConfig) -> MediaSearchParams:
+def _build_search_params(
+ options: "DownloadOptions", config: AppConfig
+) -> MediaSearchParams:
"""Build MediaSearchParams from command options."""
return MediaSearchParams(
query=options.get("title"),
page=options.get("page", 1),
per_page=options.get("per_page") or config.anilist.per_page or 50,
sort=MediaSort(options.get("sort")) if options.get("sort") else None,
- status_in=[MediaStatus(s) for s in options.get("status", ())] if options.get("status") else None,
- status_not_in=[MediaStatus(s) for s in options.get("status_not", ())] if options.get("status_not") else None,
- genre_in=[MediaGenre(g) for g in options.get("genres", ())] if options.get("genres") else None,
- genre_not_in=[MediaGenre(g) for g in options.get("genres_not", ())] if options.get("genres_not") else None,
- tag_in=[MediaTag(t) for t in options.get("tags", ())] if options.get("tags") else None,
- tag_not_in=[MediaTag(t) for t in options.get("tags_not", ())] if options.get("tags_not") else None,
- format_in=[MediaFormat(f) for f in options.get("media_format", ())] if options.get("media_format") else None,
- type=MediaType(options.get("media_type")) if options.get("media_type") else None,
+ status_in=[MediaStatus(s) for s in options.get("status", ())]
+ if options.get("status")
+ else None,
+ status_not_in=[MediaStatus(s) for s in options.get("status_not", ())]
+ if options.get("status_not")
+ else None,
+ genre_in=[MediaGenre(g) for g in options.get("genres", ())]
+ if options.get("genres")
+ else None,
+ genre_not_in=[MediaGenre(g) for g in options.get("genres_not", ())]
+ if options.get("genres_not")
+ else None,
+ tag_in=[MediaTag(t) for t in options.get("tags", ())]
+ if options.get("tags")
+ else None,
+ tag_not_in=[MediaTag(t) for t in options.get("tags_not", ())]
+ if options.get("tags_not")
+ else None,
+ format_in=[MediaFormat(f) for f in options.get("media_format", ())]
+ if options.get("media_format")
+ else None,
+ type=MediaType(options.get("media_type"))
+ if options.get("media_type")
+ else None,
season=MediaSeason(options.get("season")) if options.get("season") else None,
seasonYear=int(year) if (year := options.get("year")) else None,
popularity_greater=options.get("popularity_greater"),
@@ -393,20 +458,24 @@ def _search_anime(api_client, search_params, feedback):
from rich.progress import Progress, SpinnerColumn, TextColumn
# Check if we have any search criteria at all
- has_criteria = any([
- search_params.query,
- search_params.genre_in,
- search_params.tag_in,
- search_params.status_in,
- search_params.season,
- search_params.seasonYear,
- search_params.format_in,
- search_params.popularity_greater,
- search_params.averageScore_greater,
- ])
-
+ has_criteria = any(
+ [
+ search_params.query,
+ search_params.genre_in,
+ search_params.tag_in,
+ search_params.status_in,
+ search_params.season,
+ search_params.seasonYear,
+ search_params.format_in,
+ search_params.popularity_greater,
+ search_params.averageScore_greater,
+ ]
+ )
+
if not has_criteria:
- raise FastAnimeError("Please provide at least one search criterion (title, genre, tag, status, etc.)")
+ raise FastAnimeError(
+ "Please provide at least one search criterion (title, genre, tag, status, etc.)"
+ )
with Progress(
SpinnerColumn(),
@@ -426,7 +495,9 @@ def _select_anime(search_result, selector, feedback):
"""Let user select anime from search results."""
if len(search_result.media) == 1:
selected_anime = search_result.media[0]
- feedback.info(f"Auto-selected: {selected_anime.title.english or selected_anime.title.romaji}")
+ feedback.info(
+ f"Auto-selected: {selected_anime.title.english or selected_anime.title.romaji}"
+ )
return [selected_anime]
# Create choice strings with additional info
@@ -467,27 +538,37 @@ def _get_available_episodes(provider, anime, config, feedback):
try:
# Search for anime in provider first
media_title = anime.title.english or anime.title.romaji
- feedback.info(f"Searching provider '{provider.__class__.__name__}' for: '{media_title}'")
+ feedback.info(
+ f"Searching provider '{provider.__class__.__name__}' for: '{media_title}'"
+ )
feedback.info(f"Using translation type: '{config.stream.translation_type}'")
-
+
provider_search_results = provider.search(
- SearchParams(query=media_title, translation_type=config.stream.translation_type)
+ SearchParams(
+ query=media_title, translation_type=config.stream.translation_type
+ )
)
if not provider_search_results or not provider_search_results.results:
- feedback.warning(f"Could not find '{media_title}' on provider '{provider.__class__.__name__}'")
+ feedback.warning(
+ f"Could not find '{media_title}' on provider '{provider.__class__.__name__}'"
+ )
return []
- feedback.info(f"Found {len(provider_search_results.results)} results on provider")
-
+ feedback.info(
+ f"Found {len(provider_search_results.results)} results on provider"
+ )
+
# Show the first few results for debugging
for i, result in enumerate(provider_search_results.results[:3]):
- feedback.info(f"Result {i+1}: ID={result.id}, Title='{getattr(result, 'title', 'Unknown')}'")
-
+ feedback.info(
+ f"Result {i + 1}: ID={result.id}, Title='{getattr(result, 'title', 'Unknown')}'"
+ )
+
# Get the first result (could be enhanced with fuzzy matching)
first_result = provider_search_results.results[0]
feedback.info(f"Using first result: ID={first_result.id}")
-
+
# Now get the full anime data using the PROVIDER'S ID, not AniList ID
provider_anime_data = provider.get(
AnimeParams(id=first_result.id, query=media_title)
@@ -498,10 +579,12 @@ def _get_available_episodes(provider, anime, config, feedback):
return []
# Check all available translation types
- translation_types = ['sub', 'dub']
+ translation_types = ["sub", "dub"]
for trans_type in translation_types:
episodes = getattr(provider_anime_data.episodes, trans_type, [])
- feedback.info(f"Translation '{trans_type}': {len(episodes)} episodes available")
+ feedback.info(
+ f"Translation '{trans_type}': {len(episodes)} episodes available"
+ )
available_episodes = getattr(
provider_anime_data.episodes, config.stream.translation_type, []
@@ -512,33 +595,46 @@ def _get_available_episodes(provider, anime, config, feedback):
# Suggest alternative translation type if available
for trans_type in translation_types:
if trans_type != config.stream.translation_type:
- other_episodes = getattr(provider_anime_data.episodes, trans_type, [])
+ other_episodes = getattr(
+ provider_anime_data.episodes, trans_type, []
+ )
if other_episodes:
- feedback.info(f"Suggestion: Try using translation type '{trans_type}' (has {len(other_episodes)} episodes)")
+ feedback.info(
+ f"Suggestion: Try using translation type '{trans_type}' (has {len(other_episodes)} episodes)"
+ )
return []
- feedback.info(f"Found {len(available_episodes)} episodes available for download")
-
+ feedback.info(
+ f"Found {len(available_episodes)} episodes available for download"
+ )
+
# Return both episodes and the provider anime data for later use
return available_episodes, provider_anime_data
except Exception as e:
feedback.error(f"Error getting episodes from provider: {e}")
import traceback
+
feedback.error("Full traceback", traceback.format_exc())
return []
-def _determine_episodes_to_download(episode_range, available_episodes, selector, feedback):
+def _determine_episodes_to_download(
+ episode_range, available_episodes, selector, feedback
+):
"""Determine which episodes to download based on range or user selection."""
if not available_episodes:
feedback.warning("No episodes available to download")
return []
-
+
if episode_range:
try:
- episodes_to_download = list(parse_episode_range(episode_range, available_episodes))
- feedback.info(f"Episodes from range '{episode_range}': {', '.join(episodes_to_download)}")
+ episodes_to_download = list(
+ parse_episode_range(episode_range, available_episodes)
+ )
+ feedback.info(
+ f"Episodes from range '{episode_range}': {', '.join(episodes_to_download)}"
+ )
return episodes_to_download
except (ValueError, IndexError) as e:
feedback.error(f"Invalid episode range '{episode_range}': {e}")
@@ -550,10 +646,10 @@ def _determine_episodes_to_download(episode_range, available_episodes, selector,
choices=available_episodes,
header="Use TAB to select multiple episodes, ENTER to confirm",
)
-
+
if selected_episodes:
feedback.info(f"Selected episodes: {', '.join(selected_episodes)}")
-
+
return selected_episodes
@@ -563,13 +659,17 @@ def _suggest_alternatives(anime, provider, config, feedback):
feedback.info(f"1. Current provider: {provider.__class__.__name__}")
feedback.info(f"2. AniList ID being used: {anime.id}")
feedback.info(f"3. Translation type: {config.stream.translation_type}")
-
+
# Special message for AllAnime provider
if provider.__class__.__name__ == "AllAnimeProvider":
- feedback.info("4. AllAnime ID mismatch: AllAnime uses different IDs than AniList")
+ feedback.info(
+ "4. AllAnime ID mismatch: AllAnime uses different IDs than AniList"
+ )
feedback.info(" The provider searches by title, but episodes use AniList ID")
- feedback.info(" This can cause episodes to not be found even if the anime exists")
-
+ feedback.info(
+ " This can cause episodes to not be found even if the anime exists"
+ )
+
# Check if provider has different ID mapping
anime_titles = []
if anime.title.english:
@@ -578,7 +678,7 @@ def _suggest_alternatives(anime, provider, config, feedback):
anime_titles.append(anime.title.romaji)
if anime.title.native:
anime_titles.append(anime.title.native)
-
+
feedback.info(f"5. Available titles: {', '.join(anime_titles)}")
feedback.info("6. Possible solutions:")
feedback.info(" - Try a different provider (GogoAnime, 9anime, etc.)")
@@ -588,7 +688,15 @@ def _suggest_alternatives(anime, provider, config, feedback):
feedback.info(" - Check if anime is available in your region")
-def _download_episodes(download_service, anime, episodes, quality, force_redownload, max_concurrent, feedback):
+def _download_episodes(
+ download_service,
+ anime,
+ episodes,
+ quality,
+ force_redownload,
+ max_concurrent,
+ feedback,
+):
"""Download the specified episodes."""
from concurrent.futures import ThreadPoolExecutor, as_completed
from rich.console import Console
@@ -607,18 +715,19 @@ def _download_episodes(download_service, anime, episodes, quality, force_redownl
anime_title = anime.title.english or anime.title.romaji
console.print(f"\n[bold green]Starting downloads for: {anime_title}[/bold green]")
-
+
# Set up logging capture to get download errors
log_messages = []
+
class ListHandler(logging.Handler):
def emit(self, record):
log_messages.append(self.format(record))
-
+
handler = ListHandler()
handler.setLevel(logging.ERROR)
- logger = logging.getLogger('fastanime')
+ logger = logging.getLogger("fastanime")
logger.addHandler(handler)
-
+
try:
with Progress(
SpinnerColumn(),
@@ -628,18 +737,19 @@ def _download_episodes(download_service, anime, episodes, quality, force_redownl
TaskProgressColumn(),
TimeElapsedColumn(),
) as progress:
-
task = progress.add_task("Downloading episodes...", total=len(episodes))
-
+
if max_concurrent == 1:
# Sequential downloads
results = {}
for episode in episodes:
- progress.update(task, description=f"Downloading episode {episode}...")
-
+ progress.update(
+ task, description=f"Downloading episode {episode}..."
+ )
+
# Clear previous log messages for this episode
log_messages.clear()
-
+
try:
success = download_service.download_episode(
media_item=anime,
@@ -648,19 +758,26 @@ def _download_episodes(download_service, anime, episodes, quality, force_redownl
force_redownload=force_redownload,
)
results[episode] = success
-
+
if not success:
# Try to get more detailed error from registry
- error_msg = _get_episode_error_details(download_service, anime, episode)
+ error_msg = _get_episode_error_details(
+ download_service, anime, episode
+ )
if error_msg:
feedback.error(f"Episode {episode}", error_msg)
elif log_messages:
# Show any log messages that were captured
- for msg in log_messages[-3:]: # Show last 3 error messages
+ for msg in log_messages[
+ -3:
+ ]: # Show last 3 error messages
feedback.error(f"Episode {episode}", msg)
else:
- feedback.error(f"Episode {episode}", "Download failed - check logs for details")
-
+ feedback.error(
+ f"Episode {episode}",
+ "Download failed - check logs for details",
+ )
+
except Exception as e:
results[episode] = False
feedback.error(f"Episode {episode} failed", str(e))
@@ -681,7 +798,7 @@ def _download_episodes(download_service, anime, episodes, quality, force_redownl
): episode
for episode in episodes
}
-
+
# Process completed downloads
for future in as_completed(future_to_episode):
episode = future_to_episode[future]
@@ -690,15 +807,22 @@ def _download_episodes(download_service, anime, episodes, quality, force_redownl
results[episode] = success
if not success:
# Try to get more detailed error from registry
- error_msg = _get_episode_error_details(download_service, anime, episode)
+ error_msg = _get_episode_error_details(
+ download_service, anime, episode
+ )
if error_msg:
feedback.error(f"Episode {episode}", error_msg)
else:
- feedback.error(f"Episode {episode}", "Download failed - check logs for details")
+ feedback.error(
+ f"Episode {episode}",
+ "Download failed - check logs for details",
+ )
except Exception as e:
results[episode] = False
- feedback.error(f"Download failed for episode {episode}", str(e))
-
+ feedback.error(
+ f"Download failed for episode {episode}", str(e)
+ )
+
progress.advance(task)
finally:
# Remove the log handler
@@ -715,13 +839,13 @@ def _get_episode_error_details(download_service, anime, episode_number):
media_record = download_service.media_registry.get_record(anime.id)
if not media_record:
return None
-
+
# Find the episode in the record
for episode_record in media_record.episodes:
if episode_record.episode_number == episode_number:
if episode_record.error_message:
error_msg = episode_record.error_message
-
+
# Provide more helpful error messages for common issues
if "Failed to get server for episode" in error_msg:
return f"Episode {episode_number} not available on current provider. Try a different provider or check episode number."
@@ -732,20 +856,24 @@ def _get_episode_error_details(download_service, anime, episode_number):
elif episode_record.download_status:
return f"Download status: {episode_record.download_status.value}"
break
-
+
return None
except Exception:
return None
-def _test_episode_stream_availability(provider, anime, episode_number, config, feedback):
+def _test_episode_stream_availability(
+ provider, anime, episode_number, config, feedback
+):
"""Test if streams are available for a specific episode."""
try:
from .....libs.provider.anime.params import EpisodeStreamsParams
-
+
media_title = anime.title.english or anime.title.romaji
- feedback.info(f"Testing stream availability for '{media_title}' episode {episode_number}")
-
+ feedback.info(
+ f"Testing stream availability for '{media_title}' episode {episode_number}"
+ )
+
# Test episode streams
streams = provider.episode_streams(
EpisodeStreamsParams(
@@ -755,29 +883,39 @@ def _test_episode_stream_availability(provider, anime, episode_number, config, f
translation_type=config.stream.translation_type,
)
)
-
+
if not streams:
feedback.warning(f"No streams found for episode {episode_number}")
return False
-
+
# Convert to list to check actual availability
stream_list = list(streams)
if not stream_list:
- feedback.warning(f"No stream servers available for episode {episode_number}")
+ feedback.warning(
+ f"No stream servers available for episode {episode_number}"
+ )
return False
-
- feedback.info(f"Found {len(stream_list)} stream server(s) for episode {episode_number}")
-
+
+ feedback.info(
+ f"Found {len(stream_list)} stream server(s) for episode {episode_number}"
+ )
+
# Show details about the first server for debugging
first_server = stream_list[0]
- feedback.info(f"First server: name='{first_server.name}', type='{type(first_server).__name__}'")
-
+ feedback.info(
+ f"First server: name='{first_server.name}', type='{type(first_server).__name__}'"
+ )
+
return True
-
+
except TypeError as e:
if "'NoneType' object is not subscriptable" in str(e):
- feedback.warning(f"Episode {episode_number} not available on provider (API returned null)")
- feedback.info("This usually means the episode doesn't exist on this provider or isn't accessible")
+ feedback.warning(
+ f"Episode {episode_number} not available on provider (API returned null)"
+ )
+ feedback.info(
+ "This usually means the episode doesn't exist on this provider or isn't accessible"
+ )
return False
else:
feedback.error(f"Type error testing stream availability: {e}")
@@ -785,6 +923,7 @@ def _test_episode_stream_availability(provider, anime, episode_number, config, f
except Exception as e:
feedback.error(f"Error testing stream availability: {e}")
import traceback
+
feedback.error("Stream test traceback", traceback.format_exc())
return False
@@ -793,25 +932,31 @@ def _display_download_results(console, results: dict[str, bool], anime):
"""Display download results in a formatted table."""
from rich.table import Table
- table = Table(title=f"Download Results for {anime.title.english or anime.title.romaji}")
+ table = Table(
+ title=f"Download Results for {anime.title.english or anime.title.romaji}"
+ )
table.add_column("Episode", justify="center", style="cyan")
table.add_column("Status", justify="center")
-
+
for episode, success in sorted(results.items(), key=lambda x: float(x[0])):
status = "[green]โ Success[/green]" if success else "[red]โ Failed[/red]"
table.add_row(episode, status)
-
+
console.print(table)
-
+
# Summary
total = len(results)
successful = sum(results.values())
failed = total - successful
-
+
if failed == 0:
- console.print(f"\n[bold green]All {total} episodes downloaded successfully![/bold green]")
+ console.print(
+ f"\n[bold green]All {total} episodes downloaded successfully![/bold green]"
+ )
else:
- console.print(f"\n[yellow]Download complete: {successful}/{total} successful, {failed} failed[/yellow]")
+ console.print(
+ f"\n[yellow]Download complete: {successful}/{total} successful, {failed} failed[/yellow]"
+ )
def _show_final_statistics(download_service, feedback):
@@ -820,17 +965,17 @@ def _show_final_statistics(download_service, feedback):
console = Console()
stats = download_service.get_download_statistics()
-
+
if stats:
console.print(f"\n[bold blue]Overall Download Statistics:[/bold blue]")
console.print(f"Total episodes tracked: {stats.get('total_episodes', 0)}")
console.print(f"Successfully downloaded: {stats.get('downloaded', 0)}")
console.print(f"Failed downloads: {stats.get('failed', 0)}")
console.print(f"Queued downloads: {stats.get('queued', 0)}")
-
- if stats.get('total_size_bytes', 0) > 0:
- size_mb = stats['total_size_bytes'] / (1024 * 1024)
+
+ if stats.get("total_size_bytes", 0) > 0:
+ size_mb = stats["total_size_bytes"] / (1024 * 1024)
if size_mb > 1024:
- console.print(f"Total size: {size_mb/1024:.2f} GB")
+ console.print(f"Total size: {size_mb / 1024:.2f} GB")
else:
console.print(f"Total size: {size_mb:.2f} MB")
diff --git a/fastanime/cli/commands/anilist/commands/search.py b/fastanime/cli/commands/anilist/commands/search.py
index 17bf74d..175c2fe 100644
--- a/fastanime/cli/commands/anilist/commands/search.py
+++ b/fastanime/cli/commands/anilist/commands/search.py
@@ -229,17 +229,39 @@ def search(config: AppConfig, **options: "Unpack[SearchOptions]"):
on_list = options.get("on_list")
# Validate logical relationships
- if score_greater is not None and score_lesser is not None and score_greater > score_lesser:
+ if (
+ score_greater is not None
+ and score_lesser is not None
+ and score_greater > score_lesser
+ ):
raise FastAnimeError("Minimum score cannot be higher than maximum score")
-
- if popularity_greater is not None and popularity_lesser is not None and popularity_greater > popularity_lesser:
- raise FastAnimeError("Minimum popularity cannot be higher than maximum popularity")
-
- if start_date_greater is not None and start_date_lesser is not None and start_date_greater > start_date_lesser:
- raise FastAnimeError("Start date greater cannot be later than start date lesser")
-
- if end_date_greater is not None and end_date_lesser is not None and end_date_greater > end_date_lesser:
- raise FastAnimeError("End date greater cannot be later than end date lesser")
+
+ if (
+ popularity_greater is not None
+ and popularity_lesser is not None
+ and popularity_greater > popularity_lesser
+ ):
+ raise FastAnimeError(
+ "Minimum popularity cannot be higher than maximum popularity"
+ )
+
+ if (
+ start_date_greater is not None
+ and start_date_lesser is not None
+ and start_date_greater > start_date_lesser
+ ):
+ raise FastAnimeError(
+ "Start date greater cannot be later than start date lesser"
+ )
+
+ if (
+ end_date_greater is not None
+ and end_date_lesser is not None
+ and end_date_greater > end_date_lesser
+ ):
+ raise FastAnimeError(
+ "End date greater cannot be later than end date lesser"
+ )
# Build search parameters
search_params = MediaSearchParams(
@@ -287,7 +309,7 @@ def search(config: AppConfig, **options: "Unpack[SearchOptions]"):
feedback.info(
f"Found {len(search_result.media)} anime matching your search. Launching interactive mode..."
)
-
+
# Create initial state with search results
initial_state = State(
menu_name=MenuName.RESULTS,
@@ -299,7 +321,7 @@ def search(config: AppConfig, **options: "Unpack[SearchOptions]"):
page_info=search_result.page_info,
),
)
-
+
session.load_menus_from_folder("media")
session.run(config, history=[initial_state])
diff --git a/fastanime/cli/commands/anilist/commands/stats.py b/fastanime/cli/commands/anilist/commands/stats.py
index 04bbb21..548c306 100644
--- a/fastanime/cli/commands/anilist/commands/stats.py
+++ b/fastanime/cli/commands/anilist/commands/stats.py
@@ -40,19 +40,19 @@ def stats(config: "AppConfig"):
"Authentication Required",
f"You must be logged in to {config.general.media_api} to sync your media list.",
)
- feedback.info("Run this command to authenticate:", f"fastanime {config.general.media_api} auth")
+ feedback.info(
+ "Run this command to authenticate:",
+ f"fastanime {config.general.media_api} auth",
+ )
raise click.Abort()
-
-
-
# Check if kitten is available for image display
KITTEN_EXECUTABLE = shutil.which("kitten")
if not KITTEN_EXECUTABLE:
feedback.warning("Kitten not found - profile image will not be displayed")
else:
# Display profile image using kitten icat
- if user_profile.avatar_url:
+ if profile.user_profile.avatar_url:
console.clear()
image_x = int(console.size.width * 0.1)
image_y = int(console.size.height * 0.1)
@@ -66,7 +66,7 @@ def stats(config: "AppConfig"):
"--clear",
"--place",
f"{img_w}x{img_h}@{image_x}x{image_y}",
- user_profile.avatar_url,
+ profile.user_profile.avatar_url,
],
check=False,
)
@@ -75,21 +75,16 @@ def stats(config: "AppConfig"):
feedback.warning("Failed to display profile image")
# Display user information
- about_text = getattr(user_profile, "about", "") or "No description available"
+ about_text = (
+ getattr(profile.user_profile, "about", "") or "No description available"
+ )
console.print(
Panel(
Markdown(about_text),
- title=f"๐ {user_profile.name}'s Profile",
+ title=f"๐ {profile.user_profile.name}'s Profile",
)
)
# You can add more stats here if the API provides them
feedback.success("User profile displayed successfully")
-
- except FastAnimeError as e:
- feedback.error("Failed to fetch user stats", str(e))
- raise click.Abort()
- except Exception as e:
- feedback.error("Unexpected error occurred", str(e))
- raise click.Abort()
diff --git a/fastanime/cli/commands/download.py b/fastanime/cli/commands/download.py
index b782c4e..d2bb92b 100644
--- a/fastanime/cli/commands/download.py
+++ b/fastanime/cli/commands/download.py
@@ -150,20 +150,19 @@ def download(config: AppConfig, **options: "Unpack[Options]"):
if not anime:
raise FastAnimeError(f"Failed to fetch anime {anime_result.title}")
-
+
available_episodes: list[str] = sorted(
getattr(anime.episodes, config.stream.translation_type), key=float
)
-
+
if options["episode_range"]:
from ..utils.parser import parse_episode_range
-
+
try:
episodes_range = parse_episode_range(
- options["episode_range"],
- available_episodes
+ options["episode_range"], available_episodes
)
-
+
for episode in episodes_range:
download_anime(
config, options, provider, selector, anime, anime_title, episode
diff --git a/fastanime/cli/commands/registry/commands/backup.py b/fastanime/cli/commands/registry/commands/backup.py
index 9b18798..2c8bd3c 100644
--- a/fastanime/cli/commands/registry/commands/backup.py
+++ b/fastanime/cli/commands/registry/commands/backup.py
@@ -19,31 +19,22 @@ from ....utils.feedback import create_feedback_manager
"--output",
"-o",
type=click.Path(),
- help="Output backup file path (auto-generated if not specified)"
-)
-@click.option(
- "--compress",
- "-c",
- is_flag=True,
- help="Compress the backup archive"
-)
-@click.option(
- "--include-cache",
- is_flag=True,
- help="Include cache files in backup"
+ help="Output backup file path (auto-generated if not specified)",
)
+@click.option("--compress", "-c", is_flag=True, help="Compress the backup archive")
+@click.option("--include-cache", is_flag=True, help="Include cache files in backup")
@click.option(
"--format",
"backup_format",
type=click.Choice(["tar", "zip"], case_sensitive=False),
default="tar",
- help="Backup archive format"
+ help="Backup archive format",
)
@click.option(
"--api",
default="anilist",
type=click.Choice(["anilist"], case_sensitive=False),
- help="Media API registry to backup"
+ help="Media API registry to backup",
)
@click.pass_obj
def backup(
@@ -52,35 +43,37 @@ def backup(
compress: bool,
include_cache: bool,
backup_format: str,
- api: str
+ api: str,
):
"""
Create a complete backup of your media registry.
-
+
Includes all media records, index files, and optionally cache data.
Backups can be compressed and are suitable for restoration.
"""
feedback = create_feedback_manager(config.general.icons)
-
+
try:
registry_service = MediaRegistryService(api, config.registry)
-
+
# Generate output filename if not specified
if not output:
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
- extension = "tar.gz" if compress and backup_format == "tar" else backup_format
+ extension = (
+ "tar.gz" if compress and backup_format == "tar" else backup_format
+ )
if backup_format == "zip":
extension = "zip"
output = f"fastanime_registry_backup_{api}_{timestamp}.{extension}"
-
+
output_path = Path(output)
-
+
# Get backup statistics before starting
stats = registry_service.get_registry_stats()
- total_media = stats.get('total_media', 0)
-
+ total_media = stats.get("total_media", 0)
+
feedback.info("Starting Backup", f"Backing up {total_media} media entries...")
-
+
# Create backup based on format
if backup_format.lower() == "tar":
_create_tar_backup(
@@ -90,101 +83,111 @@ def backup(
_create_zip_backup(
registry_service, output_path, include_cache, feedback, api
)
-
+
# Get final backup size
backup_size = _format_file_size(output_path)
-
+
feedback.success(
- "Backup Complete",
- f"Registry backed up to {output_path} ({backup_size})"
+ "Backup Complete", f"Registry backed up to {output_path} ({backup_size})"
)
-
+
# Show backup contents summary
_show_backup_summary(output_path, backup_format, feedback)
-
+
except Exception as e:
feedback.error("Backup Error", f"Failed to create backup: {e}")
raise click.Abort()
-def _create_tar_backup(registry_service, output_path: Path, compress: bool, include_cache: bool, feedback, api: str):
+def _create_tar_backup(
+ registry_service,
+ output_path: Path,
+ compress: bool,
+ include_cache: bool,
+ feedback,
+ api: str,
+):
"""Create a tar-based backup."""
-
+
mode = "w:gz" if compress else "w"
-
+
with tarfile.open(output_path, mode) as tar:
# Add registry directory
registry_dir = registry_service.config.media_dir / api
if registry_dir.exists():
tar.add(registry_dir, arcname=f"registry/{api}")
feedback.info("Added to backup", f"Registry data ({api})")
-
+
# Add index directory
index_dir = registry_service.config.index_dir
if index_dir.exists():
tar.add(index_dir, arcname="index")
feedback.info("Added to backup", "Registry index")
-
+
# Add cache if requested
if include_cache:
cache_dir = registry_service.config.media_dir.parent / "cache"
if cache_dir.exists():
tar.add(cache_dir, arcname="cache")
feedback.info("Added to backup", "Cache data")
-
+
# Add metadata file
metadata = _create_backup_metadata(registry_service, api, include_cache)
metadata_path = output_path.parent / "backup_metadata.json"
-
+
try:
import json
- with open(metadata_path, 'w', encoding='utf-8') as f:
+
+ with open(metadata_path, "w", encoding="utf-8") as f:
json.dump(metadata, f, indent=2, default=str)
-
+
tar.add(metadata_path, arcname="backup_metadata.json")
metadata_path.unlink() # Clean up temp file
-
+
except Exception as e:
feedback.warning("Metadata Error", f"Failed to add metadata: {e}")
-def _create_zip_backup(registry_service, output_path: Path, include_cache: bool, feedback, api: str):
+def _create_zip_backup(
+ registry_service, output_path: Path, include_cache: bool, feedback, api: str
+):
"""Create a zip-based backup."""
import zipfile
-
- with zipfile.ZipFile(output_path, 'w', zipfile.ZIP_DEFLATED) as zip_file:
+
+ with zipfile.ZipFile(output_path, "w", zipfile.ZIP_DEFLATED) as zip_file:
# Add registry directory
registry_dir = registry_service.config.media_dir / api
if registry_dir.exists():
- for file_path in registry_dir.rglob('*'):
+ for file_path in registry_dir.rglob("*"):
if file_path.is_file():
arcname = f"registry/{api}/{file_path.relative_to(registry_dir)}"
zip_file.write(file_path, arcname)
feedback.info("Added to backup", f"Registry data ({api})")
-
+
# Add index directory
index_dir = registry_service.config.index_dir
if index_dir.exists():
- for file_path in index_dir.rglob('*'):
+ for file_path in index_dir.rglob("*"):
if file_path.is_file():
arcname = f"index/{file_path.relative_to(index_dir)}"
zip_file.write(file_path, arcname)
feedback.info("Added to backup", "Registry index")
-
+
# Add cache if requested
if include_cache:
cache_dir = registry_service.config.media_dir.parent / "cache"
if cache_dir.exists():
- for file_path in cache_dir.rglob('*'):
+ for file_path in cache_dir.rglob("*"):
if file_path.is_file():
arcname = f"cache/{file_path.relative_to(cache_dir)}"
zip_file.write(file_path, arcname)
feedback.info("Added to backup", "Cache data")
-
+
# Add metadata
metadata = _create_backup_metadata(registry_service, api, include_cache)
try:
import json
+
metadata_json = json.dumps(metadata, indent=2, default=str)
zip_file.writestr("backup_metadata.json", metadata_json)
except Exception as e:
@@ -194,13 +197,13 @@ def _create_zip_backup(registry_service, output_path: Path, include_cache: bool,
def _create_backup_metadata(registry_service, api: str, include_cache: bool) -> dict:
"""Create backup metadata."""
stats = registry_service.get_registry_stats()
-
+
return {
"backup_timestamp": datetime.now().isoformat(),
"fastanime_version": "unknown", # You might want to get this from somewhere
- "registry_version": stats.get('version'),
+ "registry_version": stats.get("version"),
"api": api,
- "total_media": stats.get('total_media', 0),
+ "total_media": stats.get("total_media", 0),
"include_cache": include_cache,
"registry_stats": stats,
"backup_type": "full",
@@ -209,22 +212,23 @@ def _create_backup_metadata(registry_service, api: str, include_cache: bool) ->
def _show_backup_summary(backup_path: Path, format_type: str, feedback):
"""Show summary of backup contents."""
-
+
try:
if format_type.lower() == "tar":
- with tarfile.open(backup_path, 'r:*') as tar:
+ with tarfile.open(backup_path, "r:*") as tar:
members = tar.getmembers()
file_count = len([m for m in members if m.isfile()])
dir_count = len([m for m in members if m.isdir()])
else: # zip
import zipfile
- with zipfile.ZipFile(backup_path, 'r') as zip_file:
+
+ with zipfile.ZipFile(backup_path, "r") as zip_file:
info_list = zip_file.infolist()
file_count = len([info for info in info_list if not info.is_dir()])
dir_count = len([info for info in info_list if info.is_dir()])
-
+
feedback.info("Backup Contents", f"{file_count} files, {dir_count} directories")
-
+
except Exception as e:
feedback.warning("Summary Error", f"Could not analyze backup contents: {e}")
@@ -233,7 +237,7 @@ def _format_file_size(file_path: Path) -> str:
"""Format file size in human-readable format."""
try:
size = file_path.stat().st_size
- for unit in ['B', 'KB', 'MB', 'GB']:
+ for unit in ["B", "KB", "MB", "GB"]:
if size < 1024.0:
return f"{size:.1f} {unit}"
size /= 1024.0
diff --git a/fastanime/cli/commands/registry/commands/clean.py b/fastanime/cli/commands/registry/commands/clean.py
index f866dc9..3552b19 100644
--- a/fastanime/cli/commands/registry/commands/clean.py
+++ b/fastanime/cli/commands/registry/commands/clean.py
@@ -13,41 +13,26 @@ from ....utils.feedback import create_feedback_manager
@click.command(help="Clean up orphaned entries and invalid data from registry")
@click.option(
- "--dry-run",
- is_flag=True,
- help="Show what would be cleaned without making changes"
+ "--dry-run", is_flag=True, help="Show what would be cleaned without making changes"
)
@click.option(
"--orphaned",
is_flag=True,
- help="Remove orphaned media records (index entries without files)"
+ help="Remove orphaned media records (index entries without files)",
+)
+@click.option("--invalid", is_flag=True, help="Remove invalid or corrupted entries")
+@click.option("--duplicates", is_flag=True, help="Remove duplicate entries")
+@click.option(
+ "--old-format", is_flag=True, help="Clean entries from old registry format versions"
)
@click.option(
- "--invalid",
- is_flag=True,
- help="Remove invalid or corrupted entries"
-)
-@click.option(
- "--duplicates",
- is_flag=True,
- help="Remove duplicate entries"
-)
-@click.option(
- "--old-format",
- is_flag=True,
- help="Clean entries from old registry format versions"
-)
-@click.option(
- "--force",
- "-f",
- is_flag=True,
- help="Force cleanup without confirmation prompts"
+ "--force", "-f", is_flag=True, help="Force cleanup without confirmation prompts"
)
@click.option(
"--api",
default="anilist",
type=click.Choice(["anilist"], case_sensitive=False),
- help="Media API registry to clean"
+ help="Media API registry to clean",
)
@click.pass_obj
def clean(
@@ -58,73 +43,86 @@ def clean(
duplicates: bool,
old_format: bool,
force: bool,
- api: str
+ api: str,
):
"""
Clean up your local media registry.
-
+
Can remove orphaned entries, invalid data, duplicates, and entries
from old format versions. Use --dry-run to preview changes.
"""
feedback = create_feedback_manager(config.general.icons)
console = Console()
-
+
# Default to all cleanup types if none specified
if not any([orphaned, invalid, duplicates, old_format]):
orphaned = invalid = duplicates = old_format = True
-
+
try:
registry_service = MediaRegistryService(api, config.registry)
-
+
cleanup_results = {
"orphaned": [],
"invalid": [],
"duplicates": [],
- "old_format": []
+ "old_format": [],
}
-
+
# Analyze registry for cleanup opportunities
- _analyze_registry(registry_service, cleanup_results, orphaned, invalid, duplicates, old_format)
-
+ _analyze_registry(
+ registry_service, cleanup_results, orphaned, invalid, duplicates, old_format
+ )
+
# Show cleanup summary
_display_cleanup_summary(console, cleanup_results, config.general.icons)
-
+
# Confirm cleanup if not dry run and not forced
total_items = sum(len(items) for items in cleanup_results.values())
if total_items == 0:
- feedback.info("Registry Clean", "No cleanup needed - registry is already clean!")
+ feedback.info(
+ "Registry Clean", "No cleanup needed - registry is already clean!"
+ )
return
-
+
if not dry_run:
if not force:
if not click.confirm(f"Clean up {total_items} items from registry?"):
feedback.info("Cleanup Cancelled", "No changes were made")
return
-
+
# Perform cleanup
_perform_cleanup(registry_service, cleanup_results, feedback)
-
- feedback.success("Cleanup Complete", f"Cleaned up {total_items} items from registry")
+
+ feedback.success(
+ "Cleanup Complete", f"Cleaned up {total_items} items from registry"
+ )
else:
feedback.info("Dry Run Complete", f"Would clean up {total_items} items")
-
+
except Exception as e:
feedback.error("Cleanup Error", f"Failed to clean registry: {e}")
raise click.Abort()
-def _analyze_registry(registry_service, results: dict, check_orphaned: bool, check_invalid: bool, check_duplicates: bool, check_old_format: bool):
+def _analyze_registry(
+ registry_service,
+ results: dict,
+ check_orphaned: bool,
+ check_invalid: bool,
+ check_duplicates: bool,
+ check_old_format: bool,
+):
"""Analyze registry for cleanup opportunities."""
-
+
if check_orphaned:
results["orphaned"] = _find_orphaned_entries(registry_service)
-
+
if check_invalid:
results["invalid"] = _find_invalid_entries(registry_service)
-
+
if check_duplicates:
results["duplicates"] = _find_duplicate_entries(registry_service)
-
+
if check_old_format:
results["old_format"] = _find_old_format_entries(registry_service)
@@ -132,65 +130,77 @@ def _analyze_registry(registry_service, results: dict, check_orphaned: bool, che
def _find_orphaned_entries(registry_service) -> list:
"""Find index entries that don't have corresponding media files."""
orphaned = []
-
+
try:
index = registry_service._load_index()
-
+
for entry_key, entry in index.media_index.items():
media_file = registry_service._get_media_file_path(entry.media_id)
if not media_file.exists():
- orphaned.append({
- "type": "orphaned_index",
- "id": entry.media_id,
- "key": entry_key,
- "reason": "Media file missing"
- })
+ orphaned.append(
+ {
+ "type": "orphaned_index",
+ "id": entry.media_id,
+ "key": entry_key,
+ "reason": "Media file missing",
+ }
+ )
except Exception:
pass
-
+
return orphaned
def _find_invalid_entries(registry_service) -> list:
"""Find invalid or corrupted entries."""
invalid = []
-
+
try:
# Check all media files
for media_file in registry_service.media_registry_dir.iterdir():
- if not media_file.name.endswith('.json'):
+ if not media_file.name.endswith(".json"):
continue
-
+
try:
media_id = int(media_file.stem)
record = registry_service.get_media_record(media_id)
-
+
# Check for invalid record structure
if not record or not record.media_item:
- invalid.append({
- "type": "invalid_record",
- "id": media_id,
- "file": media_file,
- "reason": "Invalid record structure"
- })
- elif not record.media_item.title or not record.media_item.title.english and not record.media_item.title.romaji:
- invalid.append({
- "type": "invalid_title",
- "id": media_id,
- "file": media_file,
- "reason": "Missing or invalid title"
- })
-
+ invalid.append(
+ {
+ "type": "invalid_record",
+ "id": media_id,
+ "file": media_file,
+ "reason": "Invalid record structure",
+ }
+ )
+ elif (
+ not record.media_item.title
+ or not record.media_item.title.english
+ and not record.media_item.title.romaji
+ ):
+ invalid.append(
+ {
+ "type": "invalid_title",
+ "id": media_id,
+ "file": media_file,
+ "reason": "Missing or invalid title",
+ }
+ )
+
except (ValueError, Exception) as e:
- invalid.append({
- "type": "corrupted_file",
- "id": media_file.stem,
- "file": media_file,
- "reason": f"File corruption: {e}"
- })
+ invalid.append(
+ {
+ "type": "corrupted_file",
+ "id": media_file.stem,
+ "file": media_file,
+ "reason": f"File corruption: {e}",
+ }
+ )
except Exception:
pass
-
+
return invalid
@@ -198,76 +208,81 @@ def _find_duplicate_entries(registry_service) -> list:
"""Find duplicate entries (same media ID appearing multiple times)."""
duplicates = []
seen_ids = set()
-
+
try:
index = registry_service._load_index()
-
+
for entry_key, entry in index.media_index.items():
if entry.media_id in seen_ids:
- duplicates.append({
- "type": "duplicate_index",
- "id": entry.media_id,
- "key": entry_key,
- "reason": "Duplicate media ID in index"
- })
+ duplicates.append(
+ {
+ "type": "duplicate_index",
+ "id": entry.media_id,
+ "key": entry_key,
+ "reason": "Duplicate media ID in index",
+ }
+ )
else:
seen_ids.add(entry.media_id)
except Exception:
pass
-
+
return duplicates
def _find_old_format_entries(registry_service) -> list:
"""Find entries from old registry format versions."""
old_format = []
-
+
try:
index = registry_service._load_index()
current_version = registry_service._index.version
-
+
# Check for entries that might be from old formats
# This is a placeholder - you'd implement specific checks based on your version history
for media_file in registry_service.media_registry_dir.iterdir():
- if not media_file.name.endswith('.json'):
+ if not media_file.name.endswith(".json"):
continue
-
+
try:
import json
- with open(media_file, 'r') as f:
+
+ with open(media_file, "r") as f:
data = json.load(f)
-
+
# Check for old format indicators
- if 'version' in data and data['version'] < current_version:
- old_format.append({
- "type": "old_version",
- "id": media_file.stem,
- "file": media_file,
- "reason": f"Old format version {data.get('version')}"
- })
+ if "version" in data and data["version"] < current_version:
+ old_format.append(
+ {
+ "type": "old_version",
+ "id": media_file.stem,
+ "file": media_file,
+ "reason": f"Old format version {data.get('version')}",
+ }
+ )
except Exception:
pass
except Exception:
pass
-
+
return old_format
def _display_cleanup_summary(console: Console, results: dict, icons: bool):
"""Display summary of cleanup opportunities."""
-
+
table = Table(title=f"{'๐งน ' if icons else ''}Registry Cleanup Summary")
table.add_column("Category", style="cyan", no_wrap=True)
table.add_column("Count", style="magenta", justify="right")
table.add_column("Description", style="white")
-
+
categories = {
"orphaned": "Orphaned Entries",
- "invalid": "Invalid Entries",
+ "invalid": "Invalid Entries",
"duplicates": "Duplicate Entries",
- "old_format": "Old Format Entries"
+ "old_format": "Old Format Entries",
}
-
+
for category, display_name in categories.items():
count = len(results[category])
if count > 0:
@@ -278,52 +293,50 @@ def _display_cleanup_summary(console: Console, results: dict, icons: bool):
description += "..."
else:
description = "None found"
-
+
table.add_row(display_name, str(count), description)
-
+
console.print(table)
console.print()
-
+
# Show detailed breakdown if there are items to clean
for category, items in results.items():
if items:
_display_category_details(console, category, items, icons)
-def _display_category_details(console: Console, category: str, items: list, icons: bool):
+def _display_category_details(
+ console: Console, category: str, items: list, icons: bool
+):
"""Display detailed breakdown for a cleanup category."""
-
+
category_names = {
"orphaned": "๐ Orphaned Entries" if icons else "Orphaned Entries",
"invalid": "โ Invalid Entries" if icons else "Invalid Entries",
- "duplicates": "๐ฅ Duplicate Entries" if icons else "Duplicate Entries",
- "old_format": "๐ผ Old Format Entries" if icons else "Old Format Entries"
+ "duplicates": "๐ฅ Duplicate Entries" if icons else "Duplicate Entries",
+ "old_format": "๐ผ Old Format Entries" if icons else "Old Format Entries",
}
-
+
table = Table(title=category_names.get(category, category.title()))
table.add_column("ID", style="cyan", no_wrap=True)
table.add_column("Type", style="magenta")
table.add_column("Reason", style="yellow")
-
+
for item in items[:10]: # Show max 10 items
- table.add_row(
- str(item["id"]),
- item["type"],
- item["reason"]
- )
-
+ table.add_row(str(item["id"]), item["type"], item["reason"])
+
if len(items) > 10:
table.add_row("...", "...", f"And {len(items) - 10} more")
-
+
console.print(table)
console.print()
def _perform_cleanup(registry_service, results: dict, feedback):
"""Perform the actual cleanup operations."""
-
+
cleaned_count = 0
-
+
# Clean orphaned entries
for item in results["orphaned"]:
try:
@@ -334,25 +347,29 @@ def _perform_cleanup(registry_service, results: dict, feedback):
registry_service._save_index(index)
cleaned_count += 1
except Exception as e:
- feedback.warning("Cleanup Error", f"Failed to clean orphaned entry {item['id']}: {e}")
-
+ feedback.warning(
+ "Cleanup Error", f"Failed to clean orphaned entry {item['id']}: {e}"
+ )
+
# Clean invalid entries
for item in results["invalid"]:
try:
if "file" in item:
item["file"].unlink() # Delete the file
cleaned_count += 1
-
+
# Also remove from index if present
index = registry_service._load_index()
entry_key = f"{registry_service._media_api}_{item['id']}"
if entry_key in index.media_index:
del index.media_index[entry_key]
registry_service._save_index(index)
-
+
except Exception as e:
- feedback.warning("Cleanup Error", f"Failed to clean invalid entry {item['id']}: {e}")
-
+ feedback.warning(
+ "Cleanup Error", f"Failed to clean invalid entry {item['id']}: {e}"
+ )
+
# Clean duplicates
for item in results["duplicates"]:
try:
@@ -363,8 +380,10 @@ def _perform_cleanup(registry_service, results: dict, feedback):
registry_service._save_index(index)
cleaned_count += 1
except Exception as e:
- feedback.warning("Cleanup Error", f"Failed to clean duplicate entry {item['id']}: {e}")
-
+ feedback.warning(
+ "Cleanup Error", f"Failed to clean duplicate entry {item['id']}: {e}"
+ )
+
# Clean old format entries
for item in results["old_format"]:
try:
@@ -374,6 +393,8 @@ def _perform_cleanup(registry_service, results: dict, feedback):
item["file"].unlink()
cleaned_count += 1
except Exception as e:
- feedback.warning("Cleanup Error", f"Failed to clean old format entry {item['id']}: {e}")
-
+ feedback.warning(
+ "Cleanup Error", f"Failed to clean old format entry {item['id']}: {e}"
+ )
+
feedback.info("Cleanup Results", f"Successfully cleaned {cleaned_count} items")
diff --git a/fastanime/cli/commands/registry/commands/export.py b/fastanime/cli/commands/registry/commands/export.py
index 87eafda..eb40641 100644
--- a/fastanime/cli/commands/registry/commands/export.py
+++ b/fastanime/cli/commands/registry/commands/export.py
@@ -20,37 +20,32 @@ from ....utils.feedback import create_feedback_manager
"output_format",
type=click.Choice(["json", "csv", "xml"], case_sensitive=False),
default="json",
- help="Export format"
+ help="Export format",
)
@click.option(
"--output",
"-o",
type=click.Path(),
- help="Output file path (auto-generated if not specified)"
+ help="Output file path (auto-generated if not specified)",
)
@click.option(
- "--include-metadata",
- is_flag=True,
- help="Include detailed media metadata in export"
+ "--include-metadata", is_flag=True, help="Include detailed media metadata in export"
)
@click.option(
"--status",
multiple=True,
- type=click.Choice([
- "watching", "completed", "planning", "dropped", "paused", "repeating"
- ], case_sensitive=False),
- help="Only export specific status lists"
-)
-@click.option(
- "--compress",
- is_flag=True,
- help="Compress the output file"
+ type=click.Choice(
+ ["watching", "completed", "planning", "dropped", "paused", "repeating"],
+ case_sensitive=False,
+ ),
+ help="Only export specific status lists",
)
+@click.option("--compress", is_flag=True, help="Compress the output file")
@click.option(
"--api",
default="anilist",
type=click.Choice(["anilist"], case_sensitive=False),
- help="Media API registry to export"
+ help="Media API registry to export",
)
@click.pass_obj
def export(
@@ -60,19 +55,19 @@ def export(
include_metadata: bool,
status: tuple[str, ...],
compress: bool,
- api: str
+ api: str,
):
"""
Export your local media registry to various formats.
-
+
Supports JSON, CSV, and XML formats. Can optionally include
detailed metadata and compress the output.
"""
feedback = create_feedback_manager(config.general.icons)
-
+
try:
registry_service = MediaRegistryService(api, config.registry)
-
+
# Generate output filename if not specified
if not output:
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
@@ -80,14 +75,12 @@ def export(
if compress:
extension += ".gz"
output = f"fastanime_registry_{api}_{timestamp}.{extension}"
-
+
output_path = Path(output)
-
+
# Get export data
- export_data = _prepare_export_data(
- registry_service, include_metadata, status
- )
-
+ export_data = _prepare_export_data(registry_service, include_metadata, status)
+
# Export based on format
if output_format.lower() == "json":
_export_json(export_data, output_path, compress, feedback)
@@ -95,22 +88,25 @@ def export(
_export_csv(export_data, output_path, compress, feedback)
elif output_format.lower() == "xml":
_export_xml(export_data, output_path, compress, feedback)
-
+
feedback.success(
"Export Complete",
- f"Registry exported to {output_path} ({_format_file_size(output_path)})"
+ f"Registry exported to {output_path} ({_format_file_size(output_path)})",
)
-
+
except Exception as e:
feedback.error("Export Error", f"Failed to export registry: {e}")
raise click.Abort()
-def _prepare_export_data(registry_service, include_metadata: bool, status_filter: tuple[str, ...]) -> dict:
+def _prepare_export_data(
+ registry_service, include_metadata: bool, status_filter: tuple[str, ...]
+) -> dict:
"""Prepare data for export based on options."""
-
+
# Convert status filter to enums
from .....libs.media_api.types import UserMediaListStatus
+
status_map = {
"watching": UserMediaListStatus.WATCHING,
"completed": UserMediaListStatus.COMPLETED,
@@ -119,9 +115,9 @@ def _prepare_export_data(registry_service, include_metadata: bool, status_filter
"paused": UserMediaListStatus.PAUSED,
"repeating": UserMediaListStatus.REPEATING,
}
-
+
status_enums = [status_map[s] for s in status_filter] if status_filter else None
-
+
export_data = {
"metadata": {
"export_timestamp": datetime.now().isoformat(),
@@ -130,19 +126,19 @@ def _prepare_export_data(registry_service, include_metadata: bool, status_filter
"filtered_status": list(status_filter) if status_filter else None,
},
"statistics": registry_service.get_registry_stats(),
- "media": []
+ "media": [],
}
-
+
# Get all records and filter by status if specified
all_records = registry_service.get_all_media_records()
-
+
for record in all_records:
index_entry = registry_service.get_media_index_entry(record.media_item.id)
-
+
# Skip if status filter is specified and doesn't match
if status_enums and (not index_entry or index_entry.status not in status_enums):
continue
-
+
media_data = {
"id": record.media_item.id,
"title": {
@@ -151,36 +147,63 @@ def _prepare_export_data(registry_service, include_metadata: bool, status_filter
"native": record.media_item.title.native,
},
"user_status": {
- "status": index_entry.status.value if index_entry and index_entry.status else None,
+ "status": index_entry.status.value
+ if index_entry and index_entry.status
+ else None,
"progress": index_entry.progress if index_entry else None,
"score": index_entry.score if index_entry else None,
- "last_watched": index_entry.last_watched.isoformat() if index_entry and index_entry.last_watched else None,
+ "last_watched": index_entry.last_watched.isoformat()
+ if index_entry and index_entry.last_watched
+ else None,
"notes": index_entry.notes if index_entry else None,
- }
+ },
}
-
+
if include_metadata:
- media_data.update({
- "format": record.media_item.format.value if record.media_item.format else None,
- "episodes": record.media_item.episodes,
- "duration": record.media_item.duration,
- "status": record.media_item.status.value if record.media_item.status else None,
- "start_date": record.media_item.start_date.isoformat() if record.media_item.start_date else None,
- "end_date": record.media_item.end_date.isoformat() if record.media_item.end_date else None,
- "average_score": record.media_item.average_score,
- "popularity": record.media_item.popularity,
- "genres": [genre.value for genre in record.media_item.genres],
- "tags": [{"name": tag.name.value, "rank": tag.rank} for tag in record.media_item.tags],
- "studios": [studio.name for studio in record.media_item.studios if studio.name],
- "description": record.media_item.description,
- "cover_image": {
- "large": record.media_item.cover_image.large if record.media_item.cover_image else None,
- "medium": record.media_item.cover_image.medium if record.media_item.cover_image else None,
- } if record.media_item.cover_image else None,
- })
-
+ media_data.update(
+ {
+ "format": record.media_item.format.value
+ if record.media_item.format
+ else None,
+ "episodes": record.media_item.episodes,
+ "duration": record.media_item.duration,
+ "status": record.media_item.status.value
+ if record.media_item.status
+ else None,
+ "start_date": record.media_item.start_date.isoformat()
+ if record.media_item.start_date
+ else None,
+ "end_date": record.media_item.end_date.isoformat()
+ if record.media_item.end_date
+ else None,
+ "average_score": record.media_item.average_score,
+ "popularity": record.media_item.popularity,
+ "genres": [genre.value for genre in record.media_item.genres],
+ "tags": [
+ {"name": tag.name.value, "rank": tag.rank}
+ for tag in record.media_item.tags
+ ],
+ "studios": [
+ studio.name
+ for studio in record.media_item.studios
+ if studio.name
+ ],
+ "description": record.media_item.description,
+ "cover_image": {
+ "large": record.media_item.cover_image.large
+ if record.media_item.cover_image
+ else None,
+ "medium": record.media_item.cover_image.medium
+ if record.media_item.cover_image
+ else None,
+ }
+ if record.media_item.cover_image
+ else None,
+ }
+ )
+
export_data["media"].append(media_data)
-
+
return export_data
@@ -188,10 +211,11 @@ def _export_json(data: dict, output_path: Path, compress: bool, feedback):
"""Export data to JSON format."""
if compress:
import gzip
- with gzip.open(output_path, 'wt', encoding='utf-8') as f:
+
+ with gzip.open(output_path, "wt", encoding="utf-8") as f:
json.dump(data, f, indent=2, ensure_ascii=False)
else:
- with open(output_path, 'w', encoding='utf-8') as f:
+ with open(output_path, "w", encoding="utf-8") as f:
json.dump(data, f, indent=2, ensure_ascii=False)
@@ -199,21 +223,38 @@ def _export_csv(data: dict, output_path: Path, compress: bool, feedback):
"""Export data to CSV format."""
# Flatten media data for CSV
fieldnames = [
- "id", "title_english", "title_romaji", "title_native",
- "status", "progress", "score", "last_watched", "notes"
+ "id",
+ "title_english",
+ "title_romaji",
+ "title_native",
+ "status",
+ "progress",
+ "score",
+ "last_watched",
+ "notes",
]
-
+
# Add metadata fields if included
if data["metadata"]["include_metadata"]:
- fieldnames.extend([
- "format", "episodes", "duration", "media_status", "start_date", "end_date",
- "average_score", "popularity", "genres", "description"
- ])
-
+ fieldnames.extend(
+ [
+ "format",
+ "episodes",
+ "duration",
+ "media_status",
+ "start_date",
+ "end_date",
+ "average_score",
+ "popularity",
+ "genres",
+ "description",
+ ]
+ )
+
def write_csv(file_obj):
writer = csv.DictWriter(file_obj, fieldnames=fieldnames)
writer.writeheader()
-
+
for media in data["media"]:
row = {
"id": media["id"],
@@ -226,29 +267,32 @@ def _export_csv(data: dict, output_path: Path, compress: bool, feedback):
"last_watched": media["user_status"]["last_watched"],
"notes": media["user_status"]["notes"],
}
-
+
if data["metadata"]["include_metadata"]:
- row.update({
- "format": media.get("format"),
- "episodes": media.get("episodes"),
- "duration": media.get("duration"),
- "media_status": media.get("status"),
- "start_date": media.get("start_date"),
- "end_date": media.get("end_date"),
- "average_score": media.get("average_score"),
- "popularity": media.get("popularity"),
- "genres": ",".join(media.get("genres", [])),
- "description": media.get("description"),
- })
-
+ row.update(
+ {
+ "format": media.get("format"),
+ "episodes": media.get("episodes"),
+ "duration": media.get("duration"),
+ "media_status": media.get("status"),
+ "start_date": media.get("start_date"),
+ "end_date": media.get("end_date"),
+ "average_score": media.get("average_score"),
+ "popularity": media.get("popularity"),
+ "genres": ",".join(media.get("genres", [])),
+ "description": media.get("description"),
+ }
+ )
+
writer.writerow(row)
-
+
if compress:
import gzip
- with gzip.open(output_path, 'wt', encoding='utf-8', newline='') as f:
+
+ with gzip.open(output_path, "wt", encoding="utf-8", newline="") as f:
write_csv(f)
else:
- with open(output_path, 'w', encoding='utf-8', newline='') as f:
+ with open(output_path, "w", encoding="utf-8", newline="") as f:
write_csv(f)
@@ -259,43 +303,43 @@ def _export_xml(data: dict, output_path: Path, compress: bool, feedback):
except ImportError:
feedback.error("XML Export Error", "XML export requires Python's xml module")
raise click.Abort()
-
+
root = ET.Element("fastanime_registry")
-
+
# Add metadata
metadata_elem = ET.SubElement(root, "metadata")
for key, value in data["metadata"].items():
if value is not None:
elem = ET.SubElement(metadata_elem, key)
elem.text = str(value)
-
+
# Add statistics
stats_elem = ET.SubElement(root, "statistics")
for key, value in data["statistics"].items():
if value is not None:
elem = ET.SubElement(stats_elem, key)
elem.text = str(value)
-
+
# Add media
media_list_elem = ET.SubElement(root, "media_list")
for media in data["media"]:
media_elem = ET.SubElement(media_list_elem, "media")
media_elem.set("id", str(media["id"]))
-
+
# Add titles
titles_elem = ET.SubElement(media_elem, "titles")
for title_type, title_value in media["title"].items():
if title_value:
title_elem = ET.SubElement(titles_elem, title_type)
title_elem.text = title_value
-
+
# Add user status
status_elem = ET.SubElement(media_elem, "user_status")
for key, value in media["user_status"].items():
if value is not None:
elem = ET.SubElement(status_elem, key)
elem.text = str(value)
-
+
# Add metadata if included
if data["metadata"]["include_metadata"]:
for key, value in media.items():
@@ -314,22 +358,23 @@ def _export_xml(data: dict, output_path: Path, compress: bool, feedback):
else:
elem = ET.SubElement(media_elem, key)
elem.text = str(value)
-
+
# Write XML
tree = ET.ElementTree(root)
if compress:
import gzip
- with gzip.open(output_path, 'wb') as f:
- tree.write(f, encoding='utf-8', xml_declaration=True)
+
+ with gzip.open(output_path, "wb") as f:
+ tree.write(f, encoding="utf-8", xml_declaration=True)
else:
- tree.write(output_path, encoding='utf-8', xml_declaration=True)
+ tree.write(output_path, encoding="utf-8", xml_declaration=True)
def _format_file_size(file_path: Path) -> str:
"""Format file size in human-readable format."""
try:
size = file_path.stat().st_size
- for unit in ['B', 'KB', 'MB', 'GB']:
+ for unit in ["B", "KB", "MB", "GB"]:
if size < 1024.0:
return f"{size:.1f} {unit}"
size /= 1024.0
diff --git a/fastanime/cli/commands/registry/commands/import_.py b/fastanime/cli/commands/registry/commands/import_.py
index 8484a41..2a570b8 100644
--- a/fastanime/cli/commands/registry/commands/import_.py
+++ b/fastanime/cli/commands/registry/commands/import_.py
@@ -22,34 +22,26 @@ from ....utils.feedback import create_feedback_manager
"input_format",
type=click.Choice(["json", "csv", "xml", "auto"], case_sensitive=False),
default="auto",
- help="Input format (auto-detect if not specified)"
+ help="Input format (auto-detect if not specified)",
)
@click.option(
- "--merge",
- is_flag=True,
- help="Merge with existing registry (default: replace)"
+ "--merge", is_flag=True, help="Merge with existing registry (default: replace)"
)
@click.option(
- "--dry-run",
- is_flag=True,
- help="Show what would be imported without making changes"
+ "--dry-run", is_flag=True, help="Show what would be imported without making changes"
)
@click.option(
"--force",
"-f",
is_flag=True,
- help="Force import even if format version doesn't match"
-)
-@click.option(
- "--backup",
- is_flag=True,
- help="Create backup before importing"
+ help="Force import even if format version doesn't match",
)
+@click.option("--backup", is_flag=True, help="Create backup before importing")
@click.option(
"--api",
default="anilist",
type=click.Choice(["anilist"], case_sensitive=False),
- help="Media API registry to import to"
+ help="Media API registry to import to",
)
@click.pass_obj
def import_(
@@ -60,50 +52,50 @@ def import_(
dry_run: bool,
force: bool,
backup: bool,
- api: str
+ api: str,
):
"""
Import media registry data from various formats.
-
+
Supports JSON, CSV, and XML formats exported by the export command
or compatible third-party tools.
"""
feedback = create_feedback_manager(config.general.icons)
-
+
try:
registry_service = MediaRegistryService(api, config.registry)
-
+
# Create backup if requested
if backup and not dry_run:
_create_backup(registry_service, feedback)
-
+
# Auto-detect format if needed
if input_format == "auto":
input_format = _detect_format(input_file)
- feedback.info("Format Detection", f"Detected format: {input_format.upper()}")
-
+ feedback.info(
+ "Format Detection", f"Detected format: {input_format.upper()}"
+ )
+
# Parse input file
import_data = _parse_input_file(input_file, input_format, feedback)
-
+
# Validate import data
_validate_import_data(import_data, force, feedback)
-
+
# Import data
- _import_data(
- registry_service, import_data, merge, dry_run, feedback
- )
-
+ _import_data(registry_service, import_data, merge, dry_run, feedback)
+
if not dry_run:
feedback.success(
"Import Complete",
- f"Successfully imported {len(import_data.get('media', []))} media entries"
+ f"Successfully imported {len(import_data.get('media', []))} media entries",
)
else:
feedback.info(
"Dry Run Complete",
- f"Would import {len(import_data.get('media', []))} media entries"
+ f"Would import {len(import_data.get('media', []))} media entries",
)
-
+
except Exception as e:
feedback.error("Import Error", f"Failed to import registry: {e}")
raise click.Abort()
@@ -112,40 +104,40 @@ def import_(
def _create_backup(registry_service, feedback):
"""Create a backup before importing."""
from .export import _prepare_export_data, _export_json
-
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
backup_path = Path(f"fastanime_registry_backup_{timestamp}.json")
-
+
export_data = _prepare_export_data(registry_service, True, ())
_export_json(export_data, backup_path, False, feedback)
-
+
feedback.info("Backup Created", f"Registry backed up to {backup_path}")
def _detect_format(file_path: Path) -> str:
"""Auto-detect file format based on extension and content."""
extension = file_path.suffix.lower()
-
- if extension in ['.json', '.gz']:
+
+ if extension in [".json", ".gz"]:
return "json"
- elif extension == '.csv':
+ elif extension == ".csv":
return "csv"
- elif extension == '.xml':
+ elif extension == ".xml":
return "xml"
-
+
# Try to detect by content
try:
- with open(file_path, 'r', encoding='utf-8') as f:
+ with open(file_path, "r", encoding="utf-8") as f:
content = f.read(100).strip()
- if content.startswith('{') or content.startswith('['):
+ if content.startswith("{") or content.startswith("["):
return "json"
- elif content.startswith(' dict:
def _parse_json(file_path: Path) -> dict:
"""Parse JSON input file."""
try:
- if file_path.suffix.lower() == '.gz':
+ if file_path.suffix.lower() == ".gz":
import gzip
- with gzip.open(file_path, 'rt', encoding='utf-8') as f:
+
+ with gzip.open(file_path, "rt", encoding="utf-8") as f:
return json.load(f)
else:
- with open(file_path, 'r', encoding='utf-8') as f:
+ with open(file_path, "r", encoding="utf-8") as f:
return json.load(f)
except json.JSONDecodeError as e:
raise click.ClickException(f"Invalid JSON format: {e}")
@@ -182,11 +175,11 @@ def _parse_csv(file_path: Path) -> dict:
"import_timestamp": datetime.now().isoformat(),
"source_format": "csv",
},
- "media": []
+ "media": [],
}
-
+
try:
- with open(file_path, 'r', encoding='utf-8') as f:
+ with open(file_path, "r", encoding="utf-8") as f:
reader = csv.DictReader(f)
for row in reader:
media_data = {
@@ -198,33 +191,47 @@ def _parse_csv(file_path: Path) -> dict:
},
"user_status": {
"status": row.get("status"),
- "progress": int(row["progress"]) if row.get("progress") else None,
+ "progress": int(row["progress"])
+ if row.get("progress")
+ else None,
"score": float(row["score"]) if row.get("score") else None,
"last_watched": row.get("last_watched"),
"notes": row.get("notes"),
- }
+ },
}
-
+
# Add metadata fields if present
if "format" in row:
- media_data.update({
- "format": row.get("format"),
- "episodes": int(row["episodes"]) if row.get("episodes") else None,
- "duration": int(row["duration"]) if row.get("duration") else None,
- "media_status": row.get("media_status"),
- "start_date": row.get("start_date"),
- "end_date": row.get("end_date"),
- "average_score": float(row["average_score"]) if row.get("average_score") else None,
- "popularity": int(row["popularity"]) if row.get("popularity") else None,
- "genres": row.get("genres", "").split(",") if row.get("genres") else [],
- "description": row.get("description"),
- })
-
+ media_data.update(
+ {
+ "format": row.get("format"),
+ "episodes": int(row["episodes"])
+ if row.get("episodes")
+ else None,
+ "duration": int(row["duration"])
+ if row.get("duration")
+ else None,
+ "media_status": row.get("media_status"),
+ "start_date": row.get("start_date"),
+ "end_date": row.get("end_date"),
+ "average_score": float(row["average_score"])
+ if row.get("average_score")
+ else None,
+ "popularity": int(row["popularity"])
+ if row.get("popularity")
+ else None,
+ "genres": row.get("genres", "").split(",")
+ if row.get("genres")
+ else [],
+ "description": row.get("description"),
+ }
+ )
+
import_data["media"].append(media_data)
-
+
except (ValueError, KeyError) as e:
raise click.ClickException(f"Invalid CSV format: {e}")
-
+
return import_data
@@ -234,22 +241,19 @@ def _parse_xml(file_path: Path) -> dict:
import xml.etree.ElementTree as ET
except ImportError:
raise click.ClickException("XML import requires Python's xml module")
-
+
try:
tree = ET.parse(file_path)
root = tree.getroot()
-
- import_data = {
- "metadata": {},
- "media": []
- }
-
+
+ import_data = {"metadata": {}, "media": []}
+
# Parse metadata
metadata_elem = root.find("metadata")
if metadata_elem is not None:
for child in metadata_elem:
import_data["metadata"][child.tag] = child.text
-
+
# Parse media
media_list_elem = root.find("media_list")
if media_list_elem is not None:
@@ -257,15 +261,15 @@ def _parse_xml(file_path: Path) -> dict:
media_data = {
"id": int(media_elem.get("id")),
"title": {},
- "user_status": {}
+ "user_status": {},
}
-
+
# Parse titles
titles_elem = media_elem.find("titles")
if titles_elem is not None:
for title_elem in titles_elem:
media_data["title"][title_elem.tag] = title_elem.text
-
+
# Parse user status
status_elem = media_elem.find("user_status")
if status_elem is not None:
@@ -273,32 +277,38 @@ def _parse_xml(file_path: Path) -> dict:
value = child.text
if child.tag in ["progress", "score"] and value:
try:
- value = float(value) if child.tag == "score" else int(value)
+ value = (
+ float(value) if child.tag == "score" else int(value)
+ )
except ValueError:
pass
media_data["user_status"][child.tag] = value
-
+
# Parse other metadata
for child in media_elem:
if child.tag not in ["titles", "user_status"]:
if child.tag in ["episodes", "duration", "popularity"]:
try:
- media_data[child.tag] = int(child.text) if child.text else None
+ media_data[child.tag] = (
+ int(child.text) if child.text else None
+ )
except ValueError:
media_data[child.tag] = child.text
elif child.tag == "average_score":
try:
- media_data[child.tag] = float(child.text) if child.text else None
+ media_data[child.tag] = (
+ float(child.text) if child.text else None
+ )
except ValueError:
media_data[child.tag] = child.text
else:
media_data[child.tag] = child.text
-
+
import_data["media"].append(media_data)
-
+
except ET.ParseError as e:
raise click.ClickException(f"Invalid XML format: {e}")
-
+
return import_data
@@ -306,36 +316,45 @@ def _validate_import_data(data: dict, force: bool, feedback):
"""Validate import data structure and compatibility."""
if "media" not in data:
raise click.ClickException("Import data missing 'media' section")
-
+
if not isinstance(data["media"], list):
raise click.ClickException("'media' section must be a list")
-
+
# Check if any media entries exist
if not data["media"]:
feedback.warning("No Media", "Import file contains no media entries")
return
-
+
# Validate media entries
required_fields = ["id", "title"]
for i, media in enumerate(data["media"]):
for field in required_fields:
if field not in media:
- raise click.ClickException(f"Media entry {i} missing required field: {field}")
-
+ raise click.ClickException(
+ f"Media entry {i} missing required field: {field}"
+ )
+
if not isinstance(media.get("title"), dict):
raise click.ClickException(f"Media entry {i} has invalid title format")
-
- feedback.info("Validation", f"Import data validated - {len(data['media'])} media entries")
+
+ feedback.info(
+ "Validation", f"Import data validated - {len(data['media'])} media entries"
+ )
def _import_data(registry_service, data: dict, merge: bool, dry_run: bool, feedback):
"""Import data into the registry."""
- from .....libs.media_api.types import MediaFormat, MediaGenre, MediaStatus, MediaType
-
+ from .....libs.media_api.types import (
+ MediaFormat,
+ MediaGenre,
+ MediaStatus,
+ MediaType,
+ )
+
imported_count = 0
updated_count = 0
error_count = 0
-
+
status_map = {
"watching": UserMediaListStatus.WATCHING,
"completed": UserMediaListStatus.COMPLETED,
@@ -344,47 +363,47 @@ def _import_data(registry_service, data: dict, merge: bool, dry_run: bool, feedb
"paused": UserMediaListStatus.PAUSED,
"repeating": UserMediaListStatus.REPEATING,
}
-
+
for media_data in data["media"]:
try:
media_id = media_data["id"]
if not media_id:
error_count += 1
continue
-
+
title_data = media_data.get("title", {})
title = MediaTitle(
english=title_data.get("english") or "",
romaji=title_data.get("romaji"),
native=title_data.get("native"),
)
-
+
# Create minimal MediaItem for registry
media_item = MediaItem(
id=media_id,
title=title,
type=MediaType.ANIME, # Default to anime
)
-
+
# Add additional metadata if available
if "format" in media_data and media_data["format"]:
try:
media_item.format = getattr(MediaFormat, media_data["format"])
except (AttributeError, TypeError):
pass
-
+
if "episodes" in media_data:
media_item.episodes = media_data["episodes"]
-
+
if "average_score" in media_data:
media_item.average_score = media_data["average_score"]
-
+
if dry_run:
title_str = title.english or title.romaji or f"ID:{media_id}"
feedback.info("Would import", title_str)
imported_count += 1
continue
-
+
# Check if record exists
existing_record = registry_service.get_media_record(media_id)
if existing_record and not merge:
@@ -394,11 +413,11 @@ def _import_data(registry_service, data: dict, merge: bool, dry_run: bool, feedb
updated_count += 1
else:
imported_count += 1
-
+
# Create or update record
record = registry_service.get_or_create_record(media_item)
registry_service.save_media_record(record)
-
+
# Update user status if provided
user_status = media_data.get("user_status", {})
if user_status.get("status"):
@@ -412,14 +431,17 @@ def _import_data(registry_service, data: dict, merge: bool, dry_run: bool, feedb
score=user_status.get("score"),
notes=user_status.get("notes"),
)
-
+
except Exception as e:
error_count += 1
- feedback.warning("Import Error", f"Failed to import media {media_data.get('id', 'unknown')}: {e}")
+ feedback.warning(
+ "Import Error",
+ f"Failed to import media {media_data.get('id', 'unknown')}: {e}",
+ )
continue
-
+
if not dry_run:
feedback.info(
"Import Summary",
- f"Imported: {imported_count}, Updated: {updated_count}, Errors: {error_count}"
+ f"Imported: {imported_count}, Updated: {updated_count}, Errors: {error_count}",
)
diff --git a/fastanime/cli/commands/registry/commands/restore.py b/fastanime/cli/commands/registry/commands/restore.py
index 40c3c92..69cf96b 100644
--- a/fastanime/cli/commands/registry/commands/restore.py
+++ b/fastanime/cli/commands/registry/commands/restore.py
@@ -17,26 +17,19 @@ from ....utils.feedback import create_feedback_manager
@click.command(help="Restore registry from a backup file")
@click.argument("backup_file", type=click.Path(exists=True, path_type=Path))
@click.option(
- "--force",
- "-f",
- is_flag=True,
- help="Force restore even if current registry exists"
+ "--force", "-f", is_flag=True, help="Force restore even if current registry exists"
)
@click.option(
"--backup-current",
is_flag=True,
- help="Create backup of current registry before restoring"
-)
-@click.option(
- "--verify",
- is_flag=True,
- help="Verify backup integrity before restoring"
+ help="Create backup of current registry before restoring",
)
+@click.option("--verify", is_flag=True, help="Verify backup integrity before restoring")
@click.option(
"--api",
default="anilist",
type=click.Choice(["anilist"], case_sensitive=False),
- help="Media API registry to restore to"
+ help="Media API registry to restore to",
)
@click.pass_obj
def restore(
@@ -45,57 +38,66 @@ def restore(
force: bool,
backup_current: bool,
verify: bool,
- api: str
+ api: str,
):
"""
Restore your media registry from a backup file.
-
+
Can restore from tar or zip backups created by the backup command.
Optionally creates a backup of the current registry before restoring.
"""
feedback = create_feedback_manager(config.general.icons)
-
+
try:
# Detect backup format
backup_format = _detect_backup_format(backup_file)
feedback.info("Backup Format", f"Detected {backup_format.upper()} format")
-
+
# Verify backup if requested
if verify:
if not _verify_backup(backup_file, backup_format, feedback):
- feedback.error("Verification Failed", "Backup file appears to be corrupted")
+ feedback.error(
+ "Verification Failed", "Backup file appears to be corrupted"
+ )
raise click.Abort()
feedback.success("Verification", "Backup file integrity verified")
-
+
# Check if current registry exists
registry_service = MediaRegistryService(api, config.registry)
registry_exists = _check_registry_exists(registry_service)
-
+
if registry_exists and not force:
if not click.confirm("Current registry exists. Continue with restore?"):
feedback.info("Restore Cancelled", "No changes were made")
return
-
+
# Create backup of current registry if requested
if backup_current and registry_exists:
_backup_current_registry(registry_service, api, feedback)
-
+
# Show restore summary
_show_restore_summary(backup_file, backup_format, feedback)
-
+
# Perform restore
_perform_restore(backup_file, backup_format, config, api, feedback)
-
- feedback.success("Restore Complete", "Registry has been successfully restored from backup")
-
+
+ feedback.success(
+ "Restore Complete", "Registry has been successfully restored from backup"
+ )
+
# Verify restored registry
try:
restored_service = MediaRegistryService(api, config.registry)
stats = restored_service.get_registry_stats()
- feedback.info("Restored Registry", f"Contains {stats.get('total_media', 0)} media entries")
+ feedback.info(
+ "Restored Registry",
+ f"Contains {stats.get('total_media', 0)} media entries",
+ )
except Exception as e:
- feedback.warning("Verification Warning", f"Could not verify restored registry: {e}")
-
+ feedback.warning(
+ "Verification Warning", f"Could not verify restored registry: {e}"
+ )
+
except Exception as e:
feedback.error("Restore Error", f"Failed to restore registry: {e}")
raise click.Abort()
@@ -103,27 +105,28 @@ def restore(
def _detect_backup_format(backup_file: Path) -> str:
"""Detect backup file format."""
- if backup_file.suffix.lower() in ['.tar', '.gz']:
+ if backup_file.suffix.lower() in [".tar", ".gz"]:
return "tar"
- elif backup_file.suffix.lower() == '.zip':
+ elif backup_file.suffix.lower() == ".zip":
return "zip"
- elif backup_file.name.endswith('.tar.gz'):
+ elif backup_file.name.endswith(".tar.gz"):
return "tar"
else:
# Try to detect by content
try:
- with tarfile.open(backup_file, 'r:*'):
+ with tarfile.open(backup_file, "r:*"):
return "tar"
except:
pass
-
+
try:
import zipfile
- with zipfile.ZipFile(backup_file, 'r'):
+
+ with zipfile.ZipFile(backup_file, "r"):
return "zip"
except:
pass
-
+
raise click.ClickException(f"Could not detect backup format for {backup_file}")
@@ -131,53 +134,68 @@ def _verify_backup(backup_file: Path, format_type: str, feedback) -> bool:
"""Verify backup file integrity."""
try:
if format_type == "tar":
- with tarfile.open(backup_file, 'r:*') as tar:
+ with tarfile.open(backup_file, "r:*") as tar:
# Check if essential files exist
names = tar.getnames()
- has_registry = any('registry/' in name for name in names)
- has_index = any('index/' in name for name in names)
- has_metadata = 'backup_metadata.json' in names
-
+ has_registry = any("registry/" in name for name in names)
+ has_index = any("index/" in name for name in names)
+ has_metadata = "backup_metadata.json" in names
+
if not (has_registry and has_index):
return False
-
+
# Try to read metadata if it exists
if has_metadata:
try:
- metadata_member = tar.getmember('backup_metadata.json')
+ metadata_member = tar.getmember("backup_metadata.json")
metadata_file = tar.extractfile(metadata_member)
if metadata_file:
import json
+
metadata = json.load(metadata_file)
- feedback.info("Backup Info", f"Created: {metadata.get('backup_timestamp', 'Unknown')}")
- feedback.info("Backup Info", f"Total Media: {metadata.get('total_media', 'Unknown')}")
+ feedback.info(
+ "Backup Info",
+ f"Created: {metadata.get('backup_timestamp', 'Unknown')}",
+ )
+ feedback.info(
+ "Backup Info",
+ f"Total Media: {metadata.get('total_media', 'Unknown')}",
+ )
except:
pass
-
+
else: # zip
import zipfile
- with zipfile.ZipFile(backup_file, 'r') as zip_file:
+
+ with zipfile.ZipFile(backup_file, "r") as zip_file:
names = zip_file.namelist()
- has_registry = any('registry/' in name for name in names)
- has_index = any('index/' in name for name in names)
- has_metadata = 'backup_metadata.json' in names
-
+ has_registry = any("registry/" in name for name in names)
+ has_index = any("index/" in name for name in names)
+ has_metadata = "backup_metadata.json" in names
+
if not (has_registry and has_index):
return False
-
+
# Try to read metadata
if has_metadata:
try:
- with zip_file.open('backup_metadata.json') as metadata_file:
+ with zip_file.open("backup_metadata.json") as metadata_file:
import json
+
metadata = json.load(metadata_file)
- feedback.info("Backup Info", f"Created: {metadata.get('backup_timestamp', 'Unknown')}")
- feedback.info("Backup Info", f"Total Media: {metadata.get('total_media', 'Unknown')}")
+ feedback.info(
+ "Backup Info",
+ f"Created: {metadata.get('backup_timestamp', 'Unknown')}",
+ )
+ feedback.info(
+ "Backup Info",
+ f"Total Media: {metadata.get('total_media', 'Unknown')}",
+ )
except:
pass
-
+
return True
-
+
except Exception:
return False
@@ -186,7 +204,7 @@ def _check_registry_exists(registry_service) -> bool:
"""Check if a registry already exists."""
try:
stats = registry_service.get_registry_stats()
- return stats.get('total_media', 0) > 0
+ return stats.get("total_media", 0) > 0
except:
return False
@@ -194,10 +212,10 @@ def _check_registry_exists(registry_service) -> bool:
def _backup_current_registry(registry_service, api: str, feedback):
"""Create backup of current registry before restoring."""
from .backup import _create_tar_backup
-
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
backup_path = Path(f"fastanime_registry_pre_restore_{api}_{timestamp}.tar.gz")
-
+
try:
_create_tar_backup(registry_service, backup_path, True, False, feedback, api)
feedback.info("Current Registry Backed Up", f"Saved to {backup_path}")
@@ -209,72 +227,89 @@ def _show_restore_summary(backup_file: Path, format_type: str, feedback):
"""Show summary of what will be restored."""
try:
if format_type == "tar":
- with tarfile.open(backup_file, 'r:*') as tar:
+ with tarfile.open(backup_file, "r:*") as tar:
members = tar.getmembers()
file_count = len([m for m in members if m.isfile()])
-
+
# Count media files
- media_files = len([m for m in members if m.name.startswith('registry/') and m.name.endswith('.json')])
-
+ media_files = len(
+ [
+ m
+ for m in members
+ if m.name.startswith("registry/") and m.name.endswith(".json")
+ ]
+ )
+
else: # zip
import zipfile
- with zipfile.ZipFile(backup_file, 'r') as zip_file:
+
+ with zipfile.ZipFile(backup_file, "r") as zip_file:
info_list = zip_file.infolist()
file_count = len([info for info in info_list if not info.is_dir()])
-
+
# Count media files
- media_files = len([info for info in info_list if info.filename.startswith('registry/') and info.filename.endswith('.json')])
-
+ media_files = len(
+ [
+ info
+ for info in info_list
+ if info.filename.startswith("registry/")
+ and info.filename.endswith(".json")
+ ]
+ )
+
feedback.info("Restore Preview", f"Will restore {file_count} files")
feedback.info("Media Records", f"Contains {media_files} media entries")
-
+
except Exception as e:
feedback.warning("Preview Error", f"Could not analyze backup: {e}")
-def _perform_restore(backup_file: Path, format_type: str, config: AppConfig, api: str, feedback):
+def _perform_restore(
+ backup_file: Path, format_type: str, config: AppConfig, api: str, feedback
+):
"""Perform the actual restore operation."""
-
+
# Create temporary extraction directory
temp_dir = Path(config.registry.media_dir.parent / "restore_temp")
temp_dir.mkdir(exist_ok=True)
-
+
try:
# Extract backup
if format_type == "tar":
- with tarfile.open(backup_file, 'r:*') as tar:
+ with tarfile.open(backup_file, "r:*") as tar:
tar.extractall(temp_dir)
else: # zip
import zipfile
- with zipfile.ZipFile(backup_file, 'r') as zip_file:
+
+ with zipfile.ZipFile(backup_file, "r") as zip_file:
zip_file.extractall(temp_dir)
-
+
feedback.info("Extraction", "Backup extracted to temporary directory")
-
+
# Remove existing registry if it exists
registry_dir = config.registry.media_dir / api
index_dir = config.registry.index_dir
-
+
if registry_dir.exists():
shutil.rmtree(registry_dir)
feedback.info("Cleanup", "Removed existing registry data")
-
+
if index_dir.exists():
shutil.rmtree(index_dir)
feedback.info("Cleanup", "Removed existing index data")
-
+
# Move extracted files to proper locations
extracted_registry = temp_dir / "registry" / api
extracted_index = temp_dir / "index"
-
+
if extracted_registry.exists():
shutil.move(str(extracted_registry), str(registry_dir))
feedback.info("Restore", "Registry data restored")
-
+
if extracted_index.exists():
shutil.move(str(extracted_index), str(index_dir))
feedback.info("Restore", "Index data restored")
-
+
# Restore cache if it exists
extracted_cache = temp_dir / "cache"
if extracted_cache.exists():
@@ -283,7 +318,7 @@ def _perform_restore(backup_file: Path, format_type: str, config: AppConfig, api
shutil.rmtree(cache_dir)
shutil.move(str(extracted_cache), str(cache_dir))
feedback.info("Restore", "Cache data restored")
-
+
finally:
# Clean up temporary directory
if temp_dir.exists():
diff --git a/fastanime/cli/commands/registry/commands/search.py b/fastanime/cli/commands/registry/commands/search.py
index 4689582..68a0697 100644
--- a/fastanime/cli/commands/registry/commands/search.py
+++ b/fastanime/cli/commands/registry/commands/search.py
@@ -17,63 +17,44 @@ from ....utils.feedback import create_feedback_manager
@click.argument("query", required=False)
@click.option(
"--status",
- type=click.Choice([
- "watching", "completed", "planning", "dropped", "paused", "repeating"
- ], case_sensitive=False),
- help="Filter by watch status"
+ type=click.Choice(
+ ["watching", "completed", "planning", "dropped", "paused", "repeating"],
+ case_sensitive=False,
+ ),
+ help="Filter by watch status",
)
@click.option(
- "--genre",
- multiple=True,
- help="Filter by genre (can be used multiple times)"
+ "--genre", multiple=True, help="Filter by genre (can be used multiple times)"
)
@click.option(
"--format",
- type=click.Choice([
- "TV", "TV_SHORT", "MOVIE", "SPECIAL", "OVA", "ONA", "MUSIC"
- ], case_sensitive=False),
- help="Filter by format"
-)
-@click.option(
- "--year",
- type=int,
- help="Filter by release year"
-)
-@click.option(
- "--min-score",
- type=float,
- help="Minimum average score (0.0 - 10.0)"
-)
-@click.option(
- "--max-score",
- type=float,
- help="Maximum average score (0.0 - 10.0)"
+ type=click.Choice(
+ ["TV", "TV_SHORT", "MOVIE", "SPECIAL", "OVA", "ONA", "MUSIC"],
+ case_sensitive=False,
+ ),
+ help="Filter by format",
)
+@click.option("--year", type=int, help="Filter by release year")
+@click.option("--min-score", type=float, help="Minimum average score (0.0 - 10.0)")
+@click.option("--max-score", type=float, help="Maximum average score (0.0 - 10.0)")
@click.option(
"--sort",
- type=click.Choice([
- "title", "score", "popularity", "year", "episodes", "updated"
- ], case_sensitive=False),
+ type=click.Choice(
+ ["title", "score", "popularity", "year", "episodes", "updated"],
+ case_sensitive=False,
+ ),
default="title",
- help="Sort results by field"
+ help="Sort results by field",
)
+@click.option("--limit", type=int, default=20, help="Maximum number of results to show")
@click.option(
- "--limit",
- type=int,
- default=20,
- help="Maximum number of results to show"
-)
-@click.option(
- "--json",
- "output_json",
- is_flag=True,
- help="Output results in JSON format"
+ "--json", "output_json", is_flag=True, help="Output results in JSON format"
)
@click.option(
"--api",
default="anilist",
type=click.Choice(["anilist"], case_sensitive=False),
- help="Media API registry to search"
+ help="Media API registry to search",
)
@click.pass_obj
def search(
@@ -88,39 +69,40 @@ def search(
sort: str,
limit: int,
output_json: bool,
- api: str
+ api: str,
):
"""
Search through your local media registry.
-
+
You can search by title and filter by various criteria like status,
genre, format, year, and score range.
"""
feedback = create_feedback_manager(config.general.icons)
console = Console()
-
+
try:
registry_service = MediaRegistryService(api, config.registry)
-
+
# Build search parameters
search_params = _build_search_params(
query, status, genre, format, year, min_score, max_score, sort, limit
)
-
+
# Perform search
result = registry_service.search_for_media(search_params)
-
+
if not result or not result.media:
feedback.info("No Results", "No media found matching your criteria")
return
-
+
if output_json:
import json
+
print(json.dumps(result.model_dump(), indent=2, default=str))
return
-
+
_display_search_results(console, result, config.general.icons)
-
+
except Exception as e:
feedback.error("Search Error", f"Failed to search registry: {e}")
raise click.Abort()
@@ -130,20 +112,20 @@ def _build_search_params(
query, status, genre, format, year, min_score, max_score, sort, limit
) -> MediaSearchParams:
"""Build MediaSearchParams from command options."""
-
+
# Convert status string to enum
status_enum = None
if status:
status_map = {
"watching": UserMediaListStatus.WATCHING,
- "completed": UserMediaListStatus.COMPLETED,
+ "completed": UserMediaListStatus.COMPLETED,
"planning": UserMediaListStatus.PLANNING,
"dropped": UserMediaListStatus.DROPPED,
"paused": UserMediaListStatus.PAUSED,
"repeating": UserMediaListStatus.REPEATING,
}
status_enum = status_map.get(status.lower())
-
+
# Convert sort string to enum
sort_map = {
"title": MediaSort.TITLE_ROMAJI,
@@ -154,29 +136,33 @@ def _build_search_params(
"updated": MediaSort.UPDATED_AT_DESC,
}
sort_enum = sort_map.get(sort.lower(), MediaSort.TITLE_ROMAJI)
-
+
# Convert format string to enum if provided
format_enum = None
if format:
from .....libs.media_api.types import MediaFormat
+
format_enum = getattr(MediaFormat, format.upper(), None)
-
+
# Convert genre strings to enums
genre_enums = []
if genre:
from .....libs.media_api.types import MediaGenre
+
for g in genre:
# Try to find matching genre enum
for genre_enum in MediaGenre:
if genre_enum.value.lower() == g.lower():
genre_enums.append(genre_enum)
break
-
+
return MediaSearchParams(
query=query,
per_page=limit,
sort=[sort_enum],
- averageScore_greater=min_score * 10 if min_score else None, # Convert to AniList scale
+ averageScore_greater=min_score * 10
+ if min_score
+ else None, # Convert to AniList scale
averageScore_lesser=max_score * 10 if max_score else None,
genre_in=genre_enums if genre_enums else None,
format_in=[format_enum] if format_enum else None,
@@ -187,8 +173,10 @@ def _build_search_params(
def _display_search_results(console: Console, result, icons: bool):
"""Display search results in a formatted table."""
-
- table = Table(title=f"{'๐ ' if icons else ''}Search Results ({len(result.media)} found)")
+
+ table = Table(
+ title=f"{'๐ ' if icons else ''}Search Results ({len(result.media)} found)"
+ )
table.add_column("Title", style="cyan", min_width=30)
table.add_column("Year", style="dim", justify="center", min_width=6)
table.add_column("Format", style="magenta", justify="center", min_width=8)
@@ -196,31 +184,35 @@ def _display_search_results(console: Console, result, icons: bool):
table.add_column("Score", style="yellow", justify="center", min_width=6)
table.add_column("Status", style="blue", justify="center", min_width=10)
table.add_column("Progress", style="white", justify="center", min_width=8)
-
+
for media in result.media:
# Get title (prefer English, fallback to Romaji)
title = media.title.english or media.title.romaji or "Unknown"
if len(title) > 40:
title = title[:37] + "..."
-
+
# Get year from start date
year = ""
if media.start_date:
year = str(media.start_date.year)
-
+
# Format episodes
episodes = str(media.episodes) if media.episodes else "?"
-
+
# Format score
- score = f"{media.average_score/10:.1f}" if media.average_score else "N/A"
-
+ score = f"{media.average_score / 10:.1f}" if media.average_score else "N/A"
+
# Get user status
status = "Not Listed"
progress = "0"
if media.user_status:
- status = media.user_status.status.value.title() if media.user_status.status else "Unknown"
+ status = (
+ media.user_status.status.value.title()
+ if media.user_status.status
+ else "Unknown"
+ )
progress = f"{media.user_status.progress or 0}/{episodes}"
-
+
table.add_row(
title,
year,
@@ -228,11 +220,11 @@ def _display_search_results(console: Console, result, icons: bool):
episodes,
score,
status,
- progress
+ progress,
)
-
+
console.print(table)
-
+
# Show pagination info if applicable
if result.page_info.total > len(result.media):
console.print(
diff --git a/fastanime/cli/commands/registry/commands/stats.py b/fastanime/cli/commands/registry/commands/stats.py
index cd96cdf..6f2e02a 100644
--- a/fastanime/cli/commands/registry/commands/stats.py
+++ b/fastanime/cli/commands/registry/commands/stats.py
@@ -17,45 +17,43 @@ from ....utils.feedback import create_feedback_manager
"--detailed",
"-d",
is_flag=True,
- help="Show detailed breakdown by genre, format, and year"
+ help="Show detailed breakdown by genre, format, and year",
)
@click.option(
- "--json",
- "output_json",
- is_flag=True,
- help="Output statistics in JSON format"
+ "--json", "output_json", is_flag=True, help="Output statistics in JSON format"
)
@click.option(
"--api",
default="anilist",
type=click.Choice(["anilist"], case_sensitive=False),
- help="Media API to show stats for"
+ help="Media API to show stats for",
)
@click.pass_obj
def stats(config: AppConfig, detailed: bool, output_json: bool, api: str):
"""
Display comprehensive statistics about your local media registry.
-
- Shows total counts, status breakdown, and optionally detailed
+
+ Shows total counts, status breakdown, and optionally detailed
analysis by genre, format, and release year.
"""
feedback = create_feedback_manager(config.general.icons)
console = Console()
-
+
try:
registry_service = MediaRegistryService(api, config.registry)
stats_data = registry_service.get_registry_stats()
-
+
if output_json:
import json
+
print(json.dumps(stats_data, indent=2, default=str))
return
-
+
_display_stats_overview(console, stats_data, api, config.general.icons)
-
+
if detailed:
_display_detailed_stats(console, stats_data, config.general.icons)
-
+
except Exception as e:
feedback.error("Stats Error", f"Failed to generate statistics: {e}")
raise click.Abort()
@@ -63,118 +61,122 @@ def stats(config: AppConfig, detailed: bool, output_json: bool, api: str):
def _display_stats_overview(console: Console, stats: dict, api: str, icons: bool):
"""Display basic registry statistics overview."""
-
+
# Main overview panel
overview_text = f"[bold cyan]Media API:[/bold cyan] {api.title()}\n"
- overview_text += f"[bold cyan]Total Media:[/bold cyan] {stats.get('total_media', 0)}\n"
- overview_text += f"[bold cyan]Registry Version:[/bold cyan] {stats.get('version', 'Unknown')}\n"
- overview_text += f"[bold cyan]Last Updated:[/bold cyan] {stats.get('last_updated', 'Never')}\n"
- overview_text += f"[bold cyan]Storage Size:[/bold cyan] {stats.get('storage_size', 'Unknown')}"
-
+ overview_text += (
+ f"[bold cyan]Total Media:[/bold cyan] {stats.get('total_media', 0)}\n"
+ )
+ overview_text += (
+ f"[bold cyan]Registry Version:[/bold cyan] {stats.get('version', 'Unknown')}\n"
+ )
+ overview_text += (
+ f"[bold cyan]Last Updated:[/bold cyan] {stats.get('last_updated', 'Never')}\n"
+ )
+ overview_text += (
+ f"[bold cyan]Storage Size:[/bold cyan] {stats.get('storage_size', 'Unknown')}"
+ )
+
panel = Panel(
overview_text,
title=f"{'๐ ' if icons else ''}Registry Overview",
- border_style="cyan"
+ border_style="cyan",
)
console.print(panel)
console.print()
-
+
# Status breakdown table
- status_breakdown = stats.get('status_breakdown', {})
+ status_breakdown = stats.get("status_breakdown", {})
if status_breakdown:
table = Table(title=f"{'๐ ' if icons else ''}Status Breakdown")
table.add_column("Status", style="cyan", no_wrap=True)
table.add_column("Count", style="magenta", justify="right")
table.add_column("Percentage", style="green", justify="right")
-
+
total = sum(status_breakdown.values())
for status, count in sorted(status_breakdown.items()):
percentage = (count / total * 100) if total > 0 else 0
- table.add_row(
- status.title(),
- str(count),
- f"{percentage:.1f}%"
- )
-
+ table.add_row(status.title(), str(count), f"{percentage:.1f}%")
+
console.print(table)
console.print()
-
+
# Download status breakdown
- download_stats = stats.get('download_stats', {})
+ download_stats = stats.get("download_stats", {})
if download_stats:
table = Table(title=f"{'๐พ ' if icons else ''}Download Status")
table.add_column("Status", style="cyan", no_wrap=True)
table.add_column("Count", style="magenta", justify="right")
-
+
for status, count in download_stats.items():
table.add_row(status.title(), str(count))
-
+
console.print(table)
console.print()
def _display_detailed_stats(console: Console, stats: dict, icons: bool):
"""Display detailed breakdown by various categories."""
-
+
# Genre breakdown
- genre_breakdown = stats.get('genre_breakdown', {})
+ genre_breakdown = stats.get("genre_breakdown", {})
if genre_breakdown:
table = Table(title=f"{'๐ญ ' if icons else ''}Top Genres")
table.add_column("Genre", style="cyan")
table.add_column("Count", style="magenta", justify="right")
-
+
# Sort by count and show top 10
- top_genres = sorted(genre_breakdown.items(), key=lambda x: x[1], reverse=True)[:10]
+ top_genres = sorted(genre_breakdown.items(), key=lambda x: x[1], reverse=True)[
+ :10
+ ]
for genre, count in top_genres:
table.add_row(genre, str(count))
-
+
console.print(table)
console.print()
-
+
# Format breakdown
- format_breakdown = stats.get('format_breakdown', {})
+ format_breakdown = stats.get("format_breakdown", {})
if format_breakdown:
table = Table(title=f"{'๐บ ' if icons else ''}Format Breakdown")
table.add_column("Format", style="cyan")
table.add_column("Count", style="magenta", justify="right")
table.add_column("Percentage", style="green", justify="right")
-
+
total = sum(format_breakdown.values())
for format_type, count in sorted(format_breakdown.items()):
percentage = (count / total * 100) if total > 0 else 0
- table.add_row(
- format_type,
- str(count),
- f"{percentage:.1f}%"
- )
-
+ table.add_row(format_type, str(count), f"{percentage:.1f}%")
+
console.print(table)
console.print()
-
+
# Year breakdown
- year_breakdown = stats.get('year_breakdown', {})
+ year_breakdown = stats.get("year_breakdown", {})
if year_breakdown:
table = Table(title=f"{'๐
' if icons else ''}Release Years (Top 10)")
table.add_column("Year", style="cyan", justify="center")
table.add_column("Count", style="magenta", justify="right")
-
+
# Sort by year descending and show top 10
- top_years = sorted(year_breakdown.items(), key=lambda x: x[0], reverse=True)[:10]
+ top_years = sorted(year_breakdown.items(), key=lambda x: x[0], reverse=True)[
+ :10
+ ]
for year, count in top_years:
table.add_row(str(year), str(count))
-
+
console.print(table)
console.print()
-
+
# Rating breakdown
- rating_breakdown = stats.get('rating_breakdown', {})
+ rating_breakdown = stats.get("rating_breakdown", {})
if rating_breakdown:
table = Table(title=f"{'โญ ' if icons else ''}Score Distribution")
table.add_column("Score Range", style="cyan")
table.add_column("Count", style="magenta", justify="right")
-
+
for score_range, count in sorted(rating_breakdown.items()):
table.add_row(score_range, str(count))
-
+
console.print(table)
console.print()
diff --git a/fastanime/cli/commands/search.py b/fastanime/cli/commands/search.py
index 522a0b0..d57a7fb 100644
--- a/fastanime/cli/commands/search.py
+++ b/fastanime/cli/commands/search.py
@@ -89,22 +89,23 @@ def search(config: AppConfig, **options: "Unpack[Options]"):
if not anime:
raise FastAnimeError(f"Failed to fetch anime {anime_result.title}")
-
+
available_episodes: list[str] = sorted(
getattr(anime.episodes, config.stream.translation_type), key=float
)
-
+
if options["episode_range"]:
from ..utils.parser import parse_episode_range
-
+
try:
episodes_range = parse_episode_range(
- options["episode_range"],
- available_episodes
+ options["episode_range"], available_episodes
)
-
+
for episode in episodes_range:
- stream_anime(config, provider, selector, anime, episode, anime_title)
+ stream_anime(
+ config, provider, selector, anime, episode, anime_title
+ )
except (ValueError, IndexError) as e:
raise FastAnimeError(f"Invalid episode range: {e}") from e
else:
diff --git a/fastanime/cli/commands/update.py b/fastanime/cli/commands/update.py
index 125d31b..bc31b4b 100644
--- a/fastanime/cli/commands/update.py
+++ b/fastanime/cli/commands/update.py
@@ -53,14 +53,20 @@ if TYPE_CHECKING:
)
@click.pass_context
@click.pass_obj
-def update(config: "AppConfig", ctx: click.Context, force: bool, check_only: bool, release_notes: bool) -> None:
+def update(
+ config: "AppConfig",
+ ctx: click.Context,
+ force: bool,
+ check_only: bool,
+ release_notes: bool,
+) -> None:
"""
Update FastAnime to the latest version.
-
+
This command checks for available updates and optionally updates
the application to the latest version from the configured sources
(pip, uv, pipx, git, or nix depending on installation method).
-
+
Args:
config: The application configuration object
ctx: The click context containing CLI options
@@ -72,73 +78,83 @@ def update(config: "AppConfig", ctx: click.Context, force: bool, check_only: boo
if release_notes:
print("[cyan]Fetching latest release notes...[/]")
is_latest, release_json = check_for_updates()
-
+
if not release_json:
- print("[yellow]Could not fetch release information. Please check your internet connection.[/]")
+ print(
+ "[yellow]Could not fetch release information. Please check your internet connection.[/]"
+ )
sys.exit(1)
-
- version = release_json.get('tag_name', 'unknown')
- release_name = release_json.get('name', version)
- release_body = release_json.get('body', 'No release notes available.')
- published_at = release_json.get('published_at', 'unknown')
-
+
+ version = release_json.get("tag_name", "unknown")
+ release_name = release_json.get("name", version)
+ release_body = release_json.get("body", "No release notes available.")
+ published_at = release_json.get("published_at", "unknown")
+
console = Console()
-
+
print(f"[bold cyan]Release: {release_name}[/]")
print(f"[dim]Version: {version}[/]")
print(f"[dim]Published: {published_at}[/]")
print()
-
+
# Display release notes as markdown if available
if release_body.strip():
markdown = Markdown(release_body)
console.print(markdown)
else:
print("[dim]No release notes available for this version.[/]")
-
+
return
-
+
elif check_only:
print("[cyan]Checking for updates...[/]")
is_latest, release_json = check_for_updates()
-
+
if not release_json:
- print("[yellow]Could not check for updates. Please check your internet connection.[/]")
+ print(
+ "[yellow]Could not check for updates. Please check your internet connection.[/]"
+ )
sys.exit(1)
-
+
if is_latest:
print("[green]FastAnime is up to date![/]")
- print(f"[dim]Current version: {release_json.get('tag_name', 'unknown')}[/]")
+ print(
+ f"[dim]Current version: {release_json.get('tag_name', 'unknown')}[/]"
+ )
else:
- latest_version = release_json.get('tag_name', 'unknown')
+ latest_version = release_json.get("tag_name", "unknown")
print(f"[yellow]Update available: {latest_version}[/]")
print(f"[dim]Run 'fastanime update' to update[/]")
sys.exit(1)
else:
print("[cyan]Checking for updates and updating if necessary...[/]")
success, release_json = update_app(force=force)
-
+
if not release_json:
- print("[red]Could not check for updates. Please check your internet connection.[/]")
+ print(
+ "[red]Could not check for updates. Please check your internet connection.[/]"
+ )
sys.exit(1)
-
+
if success:
- latest_version = release_json.get('tag_name', 'unknown')
+ latest_version = release_json.get("tag_name", "unknown")
print(f"[green]Successfully updated to version {latest_version}![/]")
else:
if force:
- print("[red]Update failed. Please check the error messages above.[/]")
+ print(
+ "[red]Update failed. Please check the error messages above.[/]"
+ )
sys.exit(1)
# If not forced and update failed, it might be because already up to date
# The update_app function already prints appropriate messages
-
+
except KeyboardInterrupt:
print("\n[yellow]Update cancelled by user.[/]")
sys.exit(1)
except Exception as e:
print(f"[red]An error occurred during update: {e}[/]")
# Get trace option from parent context
- trace = ctx.parent.params.get('trace', False) if ctx.parent else False
+ trace = ctx.parent.params.get("trace", False) if ctx.parent else False
if trace:
raise
sys.exit(1)
diff --git a/fastanime/cli/interactive/menu/media/media_actions.py b/fastanime/cli/interactive/menu/media/media_actions.py
index 41cad29..a810c8b 100644
--- a/fastanime/cli/interactive/menu/media/media_actions.py
+++ b/fastanime/cli/interactive/menu/media/media_actions.py
@@ -177,7 +177,9 @@ def _view_info(ctx: Context, state: State) -> MenuAction:
image.render_image(cover_image.large)
# Create main title
- main_title = media_item.title.english or media_item.title.romaji or "Unknown Title"
+ main_title = (
+ media_item.title.english or media_item.title.romaji or "Unknown Title"
+ )
title_text = Text(main_title, style="bold cyan")
# Create info table
@@ -189,7 +191,7 @@ def _view_info(ctx: Context, state: State) -> MenuAction:
info_table.add_row("English Title", media_item.title.english or "N/A")
info_table.add_row("Romaji Title", media_item.title.romaji or "N/A")
info_table.add_row("Native Title", media_item.title.native or "N/A")
-
+
if media_item.synonymns:
synonyms = ", ".join(media_item.synonymns[:3]) # Show first 3 synonyms
if len(media_item.synonymns) > 3:
@@ -197,10 +199,19 @@ def _view_info(ctx: Context, state: State) -> MenuAction:
info_table.add_row("Synonyms", synonyms)
info_table.add_row("Type", media_item.type.value if media_item.type else "N/A")
- info_table.add_row("Format", media_item.format.value if media_item.format else "N/A")
- info_table.add_row("Status", media_item.status.value if media_item.status else "N/A")
- info_table.add_row("Episodes", str(media_item.episodes) if media_item.episodes else "Unknown")
- info_table.add_row("Duration", f"{media_item.duration} min" if media_item.duration else "Unknown")
+ info_table.add_row(
+ "Format", media_item.format.value if media_item.format else "N/A"
+ )
+ info_table.add_row(
+ "Status", media_item.status.value if media_item.status else "N/A"
+ )
+ info_table.add_row(
+ "Episodes", str(media_item.episodes) if media_item.episodes else "Unknown"
+ )
+ info_table.add_row(
+ "Duration",
+ f"{media_item.duration} min" if media_item.duration else "Unknown",
+ )
# Add dates
if media_item.start_date:
@@ -229,63 +240,72 @@ def _view_info(ctx: Context, state: State) -> MenuAction:
Text(genres_text, style="green"),
title="[bold]Genres[/bold]",
border_style="green",
- box=box.ROUNDED
+ box=box.ROUNDED,
)
else:
genres_panel = Panel(
Text("No genres available", style="dim"),
title="[bold]Genres[/bold]",
border_style="green",
- box=box.ROUNDED
+ box=box.ROUNDED,
)
# Create tags panel (show top tags)
if media_item.tags:
- top_tags = sorted(media_item.tags, key=lambda x: x.rank or 0, reverse=True)[:10]
+ top_tags = sorted(media_item.tags, key=lambda x: x.rank or 0, reverse=True)[
+ :10
+ ]
tags_text = ", ".join([tag.name.value for tag in top_tags])
tags_panel = Panel(
Text(tags_text, style="yellow"),
title="[bold]Tags[/bold]",
border_style="yellow",
- box=box.ROUNDED
+ box=box.ROUNDED,
)
else:
tags_panel = Panel(
Text("No tags available", style="dim"),
title="[bold]Tags[/bold]",
border_style="yellow",
- box=box.ROUNDED
+ box=box.ROUNDED,
)
# Create studios panel
if media_item.studios:
- studios_text = ", ".join([studio.name for studio in media_item.studios if studio.name])
+ studios_text = ", ".join(
+ [studio.name for studio in media_item.studios if studio.name]
+ )
studios_panel = Panel(
Text(studios_text, style="blue"),
title="[bold]Studios[/bold]",
border_style="blue",
- box=box.ROUNDED
+ box=box.ROUNDED,
)
else:
studios_panel = Panel(
Text("No studio information", style="dim"),
title="[bold]Studios[/bold]",
border_style="blue",
- box=box.ROUNDED
+ box=box.ROUNDED,
)
# Create description panel
description = media_item.description or "No description available"
# Clean HTML tags from description
- clean_description = re.sub(r'<[^>]+>', '', description)
+ clean_description = re.sub(r"<[^>]+>", "", description)
# Replace common HTML entities
- clean_description = clean_description.replace('"', '"').replace('&', '&').replace('<', '<').replace('>', '>')
-
+ clean_description = (
+ clean_description.replace(""", '"')
+ .replace("&", "&")
+ .replace("<", "<")
+ .replace(">", ">")
+ )
+
description_panel = Panel(
Text(clean_description, style="white"),
title="[bold]Description[/bold]",
border_style="cyan",
- box=box.ROUNDED
+ box=box.ROUNDED,
)
# Create user status panel if available
@@ -293,22 +313,30 @@ def _view_info(ctx: Context, state: State) -> MenuAction:
user_info_table = Table(show_header=False, box=box.SIMPLE)
user_info_table.add_column("Field", style="bold magenta")
user_info_table.add_column("Value", style="white")
-
+
if media_item.user_status.status:
- user_info_table.add_row("Status", media_item.user_status.status.value.title())
+ user_info_table.add_row(
+ "Status", media_item.user_status.status.value.title()
+ )
if media_item.user_status.progress is not None:
- progress = f"{media_item.user_status.progress}/{media_item.episodes or '?'}"
+ progress = (
+ f"{media_item.user_status.progress}/{media_item.episodes or '?'}"
+ )
user_info_table.add_row("Progress", progress)
if media_item.user_status.score:
- user_info_table.add_row("Your Score", f"{media_item.user_status.score}/10")
+ user_info_table.add_row(
+ "Your Score", f"{media_item.user_status.score}/10"
+ )
if media_item.user_status.repeat:
- user_info_table.add_row("Rewatched", f"{media_item.user_status.repeat} times")
+ user_info_table.add_row(
+ "Rewatched", f"{media_item.user_status.repeat} times"
+ )
user_panel = Panel(
user_info_table,
title="[bold]Your List Status[/bold]",
border_style="magenta",
- box=box.ROUNDED
+ box=box.ROUNDED,
)
else:
user_panel = None
@@ -316,12 +344,15 @@ def _view_info(ctx: Context, state: State) -> MenuAction:
# Create next airing panel if available
if media_item.next_airing:
from datetime import datetime
+
airing_info_table = Table(show_header=False, box=box.SIMPLE)
airing_info_table.add_column("Field", style="bold red")
airing_info_table.add_column("Value", style="white")
-
- airing_info_table.add_row("Next Episode", str(media_item.next_airing.episode))
-
+
+ airing_info_table.add_row(
+ "Next Episode", str(media_item.next_airing.episode)
+ )
+
if media_item.next_airing.airing_at:
air_date = media_item.next_airing.airing_at.strftime("%Y-%m-%d %H:%M")
airing_info_table.add_row("Air Date", air_date)
@@ -330,7 +361,7 @@ def _view_info(ctx: Context, state: State) -> MenuAction:
airing_info_table,
title="[bold]Next Airing[/bold]",
border_style="red",
- box=box.ROUNDED
+ box=box.ROUNDED,
)
else:
airing_panel = None
@@ -340,30 +371,30 @@ def _view_info(ctx: Context, state: State) -> MenuAction:
info_table,
title="[bold]Basic Information[/bold]",
border_style="cyan",
- box=box.ROUNDED
+ box=box.ROUNDED,
)
# Display everything
console.print(Panel(title_text, box=box.DOUBLE, border_style="bright_cyan"))
console.print()
-
+
# Create columns for better layout
panels_row1 = [info_panel, genres_panel]
if user_panel:
panels_row1.append(user_panel)
-
+
console.print(Columns(panels_row1, equal=True, expand=True))
console.print()
-
+
panels_row2 = [tags_panel, studios_panel]
if airing_panel:
panels_row2.append(airing_panel)
-
+
console.print(Columns(panels_row2, equal=True, expand=True))
console.print()
-
+
console.print(description_panel)
-
+
ctx.selector.ask("Press Enter to continue...")
return InternalDirective.RELOAD
@@ -479,7 +510,7 @@ def _view_characters(ctx: Context, state: State) -> MenuAction:
loading_message = "Fetching characters..."
characters_data = None
-
+
with feedback.progress(loading_message):
characters_data = ctx.media_api.get_characters_of(
MediaCharactersParams(id=media_item.id)
@@ -487,8 +518,8 @@ def _view_characters(ctx: Context, state: State) -> MenuAction:
if not characters_data or not characters_data.get("data"):
feedback.warning(
- "No character information found",
- "This anime doesn't have character data available"
+ "No character information found",
+ "This anime doesn't have character data available",
)
return InternalDirective.RELOAD
@@ -496,7 +527,7 @@ def _view_characters(ctx: Context, state: State) -> MenuAction:
# Extract characters from the nested response structure
page_data = characters_data["data"]["Page"]["media"][0]
characters = page_data["characters"]["nodes"]
-
+
if not characters:
feedback.warning("No characters found for this anime")
return InternalDirective.RELOAD
@@ -528,12 +559,15 @@ def _view_characters(ctx: Context, state: State) -> MenuAction:
gender = char.get("gender") or "Unknown"
age = str(char.get("age") or "Unknown")
favorites = str(char.get("favourites") or "0")
-
+
# Clean up description (remove HTML tags and truncate)
description = char.get("description") or "No description"
if description:
import re
- description = re.sub(r'<[^>]+>', '', description) # Remove HTML tags
+
+ description = re.sub(
+ r"<[^>]+>", "", description
+ ) # Remove HTML tags
if len(description) > 100:
description = description[:97] + "..."
@@ -542,12 +576,12 @@ def _view_characters(ctx: Context, state: State) -> MenuAction:
# Display in a panel
panel = Panel(table, title=title, border_style="blue")
console.print(panel)
-
+
ctx.selector.ask("Press Enter to continue...")
-
+
except (KeyError, IndexError, TypeError) as e:
feedback.error(f"Error displaying characters: {e}")
-
+
return InternalDirective.RELOAD
return action
@@ -564,7 +598,7 @@ def _view_airing_schedule(ctx: Context, state: State) -> MenuAction:
loading_message = "Fetching airing schedule..."
schedule_data = None
-
+
with feedback.progress(loading_message):
schedule_data = ctx.media_api.get_airing_schedule_for(
MediaAiringScheduleParams(id=media_item.id)
@@ -572,8 +606,8 @@ def _view_airing_schedule(ctx: Context, state: State) -> MenuAction:
if not schedule_data or not schedule_data.get("data"):
feedback.warning(
- "No airing schedule found",
- "This anime doesn't have upcoming episodes or airing data"
+ "No airing schedule found",
+ "This anime doesn't have upcoming episodes or airing data",
)
return InternalDirective.RELOAD
@@ -581,11 +615,11 @@ def _view_airing_schedule(ctx: Context, state: State) -> MenuAction:
# Extract schedule from the nested response structure
page_data = schedule_data["data"]["Page"]["media"][0]
schedule_nodes = page_data["airingSchedule"]["nodes"]
-
+
if not schedule_nodes:
feedback.info(
"No upcoming episodes",
- "This anime has no scheduled upcoming episodes"
+ "This anime has no scheduled upcoming episodes",
)
return InternalDirective.RELOAD
@@ -611,7 +645,7 @@ def _view_airing_schedule(ctx: Context, state: State) -> MenuAction:
for episode in schedule_nodes[:10]: # Show next 10 episodes
ep_num = str(episode.get("episode", "?"))
-
+
# Format air date
airing_at = episode.get("airingAt")
if airing_at:
@@ -619,14 +653,14 @@ def _view_airing_schedule(ctx: Context, state: State) -> MenuAction:
formatted_date = air_date.strftime("%Y-%m-%d %H:%M")
else:
formatted_date = "Unknown"
-
+
# Format time until airing
time_until = episode.get("timeUntilAiring")
if time_until:
days = time_until // 86400
hours = (time_until % 86400) // 3600
minutes = (time_until % 3600) // 60
-
+
if days > 0:
time_str = f"{days}d {hours}h {minutes}m"
elif hours > 0:
@@ -641,12 +675,12 @@ def _view_airing_schedule(ctx: Context, state: State) -> MenuAction:
# Display in a panel
panel = Panel(table, title=title, border_style="blue")
console.print(panel)
-
+
ctx.selector.ask("Press Enter to continue...")
-
+
except (KeyError, IndexError, TypeError) as e:
feedback.error(f"Error displaying airing schedule: {e}")
-
+
return InternalDirective.RELOAD
return action
diff --git a/fastanime/cli/interactive/menu/media/provider_search.py b/fastanime/cli/interactive/menu/media/provider_search.py
index 9d65369..8804e3d 100644
--- a/fastanime/cli/interactive/menu/media/provider_search.py
+++ b/fastanime/cli/interactive/menu/media/provider_search.py
@@ -30,9 +30,12 @@ def provider_search(ctx: Context, state: State) -> State | InternalDirective:
return InternalDirective.BACK
provider_search_results = provider.search(
- SearchParams(query=normalize_title(media_title, config.general.provider.value,True), translation_type=config.stream.translation_type)
+ SearchParams(
+ query=normalize_title(media_title, config.general.provider.value, True),
+ translation_type=config.stream.translation_type,
+ )
)
-
+
if not provider_search_results or not provider_search_results.results:
feedback.warning(
f"Could not find '{media_title}' on {provider.__class__.__name__}",
@@ -51,7 +54,10 @@ def provider_search(ctx: Context, state: State) -> State | InternalDirective:
# Use fuzzy matching to find the best title
best_match_title = max(
provider_results_map.keys(),
- key=lambda p_title: fuzz.ratio(normalize_title(p_title,config.general.provider.value).lower(), media_title.lower()),
+ key=lambda p_title: fuzz.ratio(
+ normalize_title(p_title, config.general.provider.value).lower(),
+ media_title.lower(),
+ ),
)
feedback.info("Auto-selecting best match: {best_match_title}")
selected_provider_anime = provider_results_map[best_match_title]
diff --git a/fastanime/cli/interactive/menu/media/servers.py b/fastanime/cli/interactive/menu/media/servers.py
index 379c329..27547ab 100644
--- a/fastanime/cli/interactive/menu/media/servers.py
+++ b/fastanime/cli/interactive/menu/media/servers.py
@@ -73,7 +73,11 @@ def servers(ctx: Context, state: State) -> State | InternalDirective:
)
return InternalDirective.RELOAD
- final_title = (media_item.streaming_episodes[episode_number].title if media_item.streaming_episodes.get(episode_number) else f"{media_item.title.english} - Ep {episode_number}")
+ final_title = (
+ media_item.streaming_episodes[episode_number].title
+ if media_item.streaming_episodes.get(episode_number)
+ else f"{media_item.title.english} - Ep {episode_number}"
+ )
feedback.info(f"[bold green]Launching player for:[/] {final_title}")
player_result = ctx.player.play(
diff --git a/fastanime/cli/service/download/service.py b/fastanime/cli/service/download/service.py
index 13c5109..b208620 100644
--- a/fastanime/cli/service/download/service.py
+++ b/fastanime/cli/service/download/service.py
@@ -51,30 +51,34 @@ class DownloadService:
) -> bool:
"""
Download a specific episode and record it in the registry.
-
+
Args:
media_item: The media item to download
episode_number: The episode number to download
server: Optional specific server to use for download
quality: Optional quality preference
force_redownload: Whether to redownload if already exists
-
+
Returns:
bool: True if download was successful, False otherwise
"""
try:
# Get or create media record
media_record = self.media_registry.get_or_create_record(media_item)
-
+
# Check if episode already exists and is completed
- existing_episode = self._find_episode_in_record(media_record, episode_number)
+ existing_episode = self._find_episode_in_record(
+ media_record, episode_number
+ )
if (
- existing_episode
+ existing_episode
and existing_episode.download_status == DownloadStatus.COMPLETED
and not force_redownload
and existing_episode.file_path.exists()
):
- logger.info(f"Episode {episode_number} already downloaded at {existing_episode.file_path}")
+ logger.info(
+ f"Episode {episode_number} already downloaded at {existing_episode.file_path}"
+ )
return True
# Generate file path
@@ -130,8 +134,10 @@ class DownloadService:
file_size=file_size,
subtitle_paths=download_result.subtitle_paths,
)
-
- logger.info(f"Successfully downloaded episode {episode_number} to {download_result.video_path}")
+
+ logger.info(
+ f"Successfully downloaded episode {episode_number} to {download_result.video_path}"
+ )
else:
# Update episode record with failure
self.media_registry.update_episode_download_status(
@@ -140,8 +146,10 @@ class DownloadService:
status=DownloadStatus.FAILED,
error_message=download_result.error_message,
)
-
- logger.error(f"Failed to download episode {episode_number}: {download_result.error_message}")
+
+ logger.error(
+ f"Failed to download episode {episode_number}: {download_result.error_message}"
+ )
return download_result.success
@@ -157,7 +165,7 @@ class DownloadService:
)
except Exception as cleanup_error:
logger.error(f"Failed to update failed status: {cleanup_error}")
-
+
return False
def download_multiple_episodes(
@@ -169,18 +177,18 @@ class DownloadService:
) -> dict[str, bool]:
"""
Download multiple episodes and return success status for each.
-
+
Args:
media_item: The media item to download
episode_numbers: List of episode numbers to download
quality: Optional quality preference
force_redownload: Whether to redownload if already exists
-
+
Returns:
dict: Mapping of episode_number -> success status
"""
results = {}
-
+
for episode_number in episode_numbers:
success = self.download_episode(
media_item=media_item,
@@ -189,18 +197,22 @@ class DownloadService:
force_redownload=force_redownload,
)
results[episode_number] = success
-
+
# Log progress
- logger.info(f"Download progress: {episode_number} - {'โ' if success else 'โ'}")
-
+ logger.info(
+ f"Download progress: {episode_number} - {'โ' if success else 'โ'}"
+ )
+
return results
- def get_download_status(self, media_item: MediaItem, episode_number: str) -> Optional[DownloadStatus]:
+ def get_download_status(
+ self, media_item: MediaItem, episode_number: str
+ ) -> Optional[DownloadStatus]:
"""Get the download status for a specific episode."""
media_record = self.media_registry.get_media_record(media_item.id)
if not media_record:
return None
-
+
episode_record = self._find_episode_in_record(media_record, episode_number)
return episode_record.download_status if episode_record else None
@@ -209,7 +221,7 @@ class DownloadService:
media_record = self.media_registry.get_media_record(media_item.id)
if not media_record:
return []
-
+
return [
episode.episode_number
for episode in media_record.media_episodes
@@ -217,38 +229,43 @@ class DownloadService:
and episode.file_path.exists()
]
- def remove_downloaded_episode(self, media_item: MediaItem, episode_number: str) -> bool:
+ def remove_downloaded_episode(
+ self, media_item: MediaItem, episode_number: str
+ ) -> bool:
"""Remove a downloaded episode file and update registry."""
try:
media_record = self.media_registry.get_media_record(media_item.id)
if not media_record:
return False
-
+
episode_record = self._find_episode_in_record(media_record, episode_number)
if not episode_record:
return False
-
+
# Remove file if it exists
if episode_record.file_path.exists():
episode_record.file_path.unlink()
-
+
# Remove episode from record
media_record.media_episodes = [
- ep for ep in media_record.media_episodes
+ ep
+ for ep in media_record.media_episodes
if ep.episode_number != episode_number
]
-
+
# Save updated record
self.media_registry.save_media_record(media_record)
-
+
logger.info(f"Removed downloaded episode {episode_number}")
return True
-
+
except Exception as e:
logger.error(f"Error removing episode {episode_number}: {e}")
return False
- def _find_episode_in_record(self, media_record, episode_number: str) -> Optional[MediaEpisode]:
+ def _find_episode_in_record(
+ self, media_record, episode_number: str
+ ) -> Optional[MediaEpisode]:
"""Find an episode record by episode number."""
for episode in media_record.media_episodes:
if episode.episode_number == episode_number:
@@ -303,7 +320,7 @@ class DownloadService:
"""Download episode from a specific server."""
anime_title = media_item.title.english or media_item.title.romaji or "Unknown"
episode_title = server.episode_title or f"Episode {episode_number}"
-
+
try:
# Get the best quality link from server
if not server.links:
@@ -319,7 +336,9 @@ class DownloadService:
episode_title=episode_title,
silent=True, # Use True by default since there's no verbose in config
headers=server.headers,
- subtitles=[sub.url for sub in server.subtitles] if server.subtitles else [],
+ subtitles=[sub.url for sub in server.subtitles]
+ if server.subtitles
+ else [],
vid_format=self.downloads_config.preferred_quality,
force_unknown_ext=True,
)
@@ -333,6 +352,7 @@ class DownloadService:
except Exception as e:
logger.error(f"Error during download: {e}")
from ....core.downloader.model import DownloadResult
+
return DownloadResult(
success=False,
error_message=str(e),
@@ -346,28 +366,34 @@ class DownloadService:
def get_failed_downloads(self) -> list[tuple[int, str]]:
"""Get all episodes that failed to download."""
- return self.media_registry.get_episodes_by_download_status(DownloadStatus.FAILED)
+ return self.media_registry.get_episodes_by_download_status(
+ DownloadStatus.FAILED
+ )
def get_queued_downloads(self) -> list[tuple[int, str]]:
"""Get all episodes queued for download."""
- return self.media_registry.get_episodes_by_download_status(DownloadStatus.QUEUED)
+ return self.media_registry.get_episodes_by_download_status(
+ DownloadStatus.QUEUED
+ )
def retry_failed_downloads(self, max_retries: int = 3) -> dict[str, bool]:
"""Retry all failed downloads up to max_retries."""
failed_episodes = self.get_failed_downloads()
results = {}
-
+
for media_id, episode_number in failed_episodes:
# Get the media record to check retry attempts
media_record = self.media_registry.get_media_record(media_id)
if not media_record:
continue
-
+
episode_record = self._find_episode_in_record(media_record, episode_number)
if not episode_record or episode_record.download_attempts >= max_retries:
- logger.info(f"Skipping {media_id}:{episode_number} - max retries exceeded")
+ logger.info(
+ f"Skipping {media_id}:{episode_number} - max retries exceeded"
+ )
continue
-
+
logger.info(f"Retrying download for {media_id}:{episode_number}")
success = self.download_episode(
media_item=media_record.media_item,
@@ -375,40 +401,41 @@ class DownloadService:
force_redownload=True,
)
results[f"{media_id}:{episode_number}"] = success
-
+
return results
def cleanup_failed_downloads(self, older_than_days: int = 7) -> int:
"""Clean up failed download records older than specified days."""
from datetime import datetime, timedelta
-
+
cleanup_count = 0
cutoff_date = datetime.now() - timedelta(days=older_than_days)
-
+
try:
for record in self.media_registry.get_all_media_records():
episodes_to_remove = []
-
+
for episode in record.media_episodes:
if (
episode.download_status == DownloadStatus.FAILED
and episode.download_date < cutoff_date
):
episodes_to_remove.append(episode.episode_number)
-
+
for episode_number in episodes_to_remove:
record.media_episodes = [
- ep for ep in record.media_episodes
+ ep
+ for ep in record.media_episodes
if ep.episode_number != episode_number
]
cleanup_count += 1
-
+
if episodes_to_remove:
self.media_registry.save_media_record(record)
-
+
logger.info(f"Cleaned up {cleanup_count} failed download records")
return cleanup_count
-
+
except Exception as e:
logger.error(f"Error during cleanup: {e}")
return 0
@@ -438,10 +465,23 @@ class DownloadService:
try:
media_record = self.media_registry.get_media_record(media_item.id)
if not media_record:
- return {"total": 0, "downloaded": 0, "failed": 0, "queued": 0, "downloading": 0}
-
- stats = {"total": 0, "downloaded": 0, "failed": 0, "queued": 0, "downloading": 0, "paused": 0}
-
+ return {
+ "total": 0,
+ "downloaded": 0,
+ "failed": 0,
+ "queued": 0,
+ "downloading": 0,
+ }
+
+ stats = {
+ "total": 0,
+ "downloaded": 0,
+ "failed": 0,
+ "queued": 0,
+ "downloading": 0,
+ "paused": 0,
+ }
+
for episode in media_record.media_episodes:
stats["total"] += 1
status = episode.download_status.value.lower()
@@ -455,26 +495,36 @@ class DownloadService:
stats["downloading"] += 1
elif status == "paused":
stats["paused"] += 1
-
+
return stats
-
+
except Exception as e:
logger.error(f"Error getting download progress: {e}")
- return {"total": 0, "downloaded": 0, "failed": 0, "queued": 0, "downloading": 0}
+ return {
+ "total": 0,
+ "downloaded": 0,
+ "failed": 0,
+ "queued": 0,
+ "downloading": 0,
+ }
- def _generate_episode_file_path(self, media_item: MediaItem, episode_number: str) -> Path:
+ def _generate_episode_file_path(
+ self, media_item: MediaItem, episode_number: str
+ ) -> Path:
"""Generate the file path for a downloaded episode."""
# Use the download directory from config
base_dir = self.downloads_config.downloads_dir
-
+
# Create anime-specific directory
anime_title = media_item.title.english or media_item.title.romaji or "Unknown"
# Sanitize title for filesystem
- safe_title = "".join(c for c in anime_title if c.isalnum() or c in (' ', '-', '_')).rstrip()
-
+ safe_title = "".join(
+ c for c in anime_title if c.isalnum() or c in (" ", "-", "_")
+ ).rstrip()
+
anime_dir = base_dir / safe_title
-
+
# Generate filename (could use template from config in the future)
filename = f"Episode_{episode_number:0>2}.mp4"
-
+
return anime_dir / filename
diff --git a/fastanime/cli/utils/__init__.py b/fastanime/cli/utils/__init__.py
index a4bb9b7..535f469 100644
--- a/fastanime/cli/utils/__init__.py
+++ b/fastanime/cli/utils/__init__.py
@@ -2,4 +2,4 @@
from .parser import parse_episode_range
-__all__ = ["parse_episode_range"]
\ No newline at end of file
+__all__ = ["parse_episode_range"]
diff --git a/fastanime/cli/utils/lazyloader.py b/fastanime/cli/utils/lazyloader.py
index bf35c49..3cd96dc 100644
--- a/fastanime/cli/utils/lazyloader.py
+++ b/fastanime/cli/utils/lazyloader.py
@@ -2,7 +2,7 @@ import importlib
import click
-
+# TODO: since command structure is pretty obvious default to only requiring mapping of command names to their function name(cause some have special names like import)
class LazyGroup(click.Group):
def __init__(self, root: str, *args, lazy_subcommands=None, **kwargs):
super().__init__(*args, **kwargs)
diff --git a/fastanime/cli/utils/parser.py b/fastanime/cli/utils/parser.py
index 5f0d628..8256316 100644
--- a/fastanime/cli/utils/parser.py
+++ b/fastanime/cli/utils/parser.py
@@ -4,12 +4,11 @@ from typing import Iterator
def parse_episode_range(
- episode_range_str: str | None,
- available_episodes: list[str]
+ episode_range_str: str | None, available_episodes: list[str]
) -> Iterator[str]:
"""
Parse an episode range string and return an iterator of episode numbers.
-
+
This function handles various episode range formats:
- Single episode: "5" -> episodes from index 5 onwards
- Range with start and end: "5:10" -> episodes from index 5 to 10 (exclusive)
@@ -17,18 +16,18 @@ def parse_episode_range(
- Start only: "5:" -> episodes from index 5 onwards
- End only: ":10" -> episodes from beginning to index 10
- All episodes: ":" -> all episodes
-
+
Args:
episode_range_str: The episode range string to parse (e.g., "5:10", "5:", ":10", "5")
available_episodes: List of available episode numbers/identifiers
-
+
Returns:
Iterator over the selected episode numbers
-
+
Raises:
ValueError: If the episode range format is invalid
IndexError: If the specified indices are out of range
-
+
Examples:
>>> episodes = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
>>> list(parse_episode_range("2:5", episodes))
@@ -43,14 +42,14 @@ def parse_episode_range(
if not episode_range_str:
# No range specified, return all episodes
return iter(available_episodes)
-
+
# Sort episodes numerically for consistent ordering
episodes = sorted(available_episodes, key=float)
-
+
if ":" in episode_range_str:
# Handle colon-separated ranges
parts = episode_range_str.split(":")
-
+
if len(parts) == 3:
# Format: start:end:step
start_str, end_str, step_str = parts
@@ -59,15 +58,15 @@ def parse_episode_range(
f"Invalid episode range format: '{episode_range_str}'. "
"When using 3 parts (start:end:step), all parts must be non-empty."
)
-
+
try:
start_idx = int(start_str)
end_idx = int(end_str)
step = int(step_str)
-
+
if step <= 0:
raise ValueError("Step value must be positive")
-
+
return iter(episodes[start_idx:end_idx:step])
except ValueError as e:
if "invalid literal" in str(e):
@@ -76,11 +75,11 @@ def parse_episode_range(
"All parts must be valid integers."
) from e
raise
-
+
elif len(parts) == 2:
# Format: start:end or start: or :end
start_str, end_str = parts
-
+
if start_str and end_str:
# Both start and end specified: start:end
try:
@@ -92,7 +91,7 @@ def parse_episode_range(
f"Invalid episode range format: '{episode_range_str}'. "
"Start and end must be valid integers."
) from e
-
+
elif start_str and not end_str:
# Only start specified: start:
try:
@@ -103,7 +102,7 @@ def parse_episode_range(
f"Invalid episode range format: '{episode_range_str}'. "
"Start must be a valid integer."
) from e
-
+
elif not start_str and end_str:
# Only end specified: :end
try:
@@ -114,7 +113,7 @@ def parse_episode_range(
f"Invalid episode range format: '{episode_range_str}'. "
"End must be a valid integer."
) from e
-
+
else:
# Both empty: ":"
return iter(episodes)
diff --git a/fastanime/cli/utils/update.py b/fastanime/cli/utils/update.py
index 06dcf92..b1111bd 100644
--- a/fastanime/cli/utils/update.py
+++ b/fastanime/cli/utils/update.py
@@ -9,7 +9,7 @@ import sys
from httpx import get
from rich import print
-from ...core.constants import AUTHOR, GIT_REPO, PROJECT_NAME_LOWER, __version__
+from ...core.constants import AUTHOR, GIT_REPO, PROJECT_NAME_LOWER, __version__
API_URL = f"https://api.{GIT_REPO}/repos/{AUTHOR}/{PROJECT_NAME_LOWER}/releases/latest"
@@ -98,7 +98,7 @@ def update_app(force=False):
process = subprocess.run(
[NIX, "profile", "upgrade", PROJECT_NAME_LOWER], check=False
)
- elif is_git_repo(AUTHOR, PROJECT_NAME_LOWER) :
+ elif is_git_repo(AUTHOR, PROJECT_NAME_LOWER):
GIT_EXECUTABLE = shutil.which("git")
args = [
GIT_EXECUTABLE,
@@ -117,7 +117,9 @@ def update_app(force=False):
)
elif UV := shutil.which("uv"):
- process = subprocess.run([UV, "tool", "upgrade", PROJECT_NAME_LOWER], check=False)
+ process = subprocess.run(
+ [UV, "tool", "upgrade", PROJECT_NAME_LOWER], check=False
+ )
elif PIPX := shutil.which("pipx"):
process = subprocess.run([PIPX, "upgrade", PROJECT_NAME_LOWER], check=False)
else:
diff --git a/fastanime/core/downloader/default.py b/fastanime/core/downloader/default.py
index d8f62a7..0f9999f 100644
--- a/fastanime/core/downloader/default.py
+++ b/fastanime/core/downloader/default.py
@@ -34,14 +34,14 @@ logger = logging.getLogger(__name__)
class DefaultDownloader(BaseDownloader):
"""Default downloader that uses httpx for downloads without yt-dlp dependency."""
-
+
def download(self, params: DownloadParams) -> DownloadResult:
"""Download video and optionally subtitles, returning detailed results."""
try:
video_path = None
sub_paths = []
merged_path = None
-
+
if TORRENT_REGEX.match(params.url):
from .torrents import download_torrent_with_webtorrent_cli
@@ -51,24 +51,26 @@ class DefaultDownloader(BaseDownloader):
dest_dir.mkdir(parents=True, exist_ok=True)
video_path = dest_dir / episode_title
- video_path = download_torrent_with_webtorrent_cli(video_path, params.url)
+ video_path = download_torrent_with_webtorrent_cli(
+ video_path, params.url
+ )
else:
video_path = self._download_video(params)
-
+
if params.subtitles:
sub_paths = self._download_subs(params)
if params.merge:
merged_path = self._merge_subtitles(params, video_path, sub_paths)
-
+
return DownloadResult(
success=True,
video_path=video_path,
subtitle_paths=sub_paths,
merged_path=merged_path,
anime_title=params.anime_title,
- episode_title=params.episode_title
+ episode_title=params.episode_title,
)
-
+
except KeyboardInterrupt:
print()
print("Aborted!")
@@ -76,7 +78,7 @@ class DefaultDownloader(BaseDownloader):
success=False,
error_message="Download aborted by user",
anime_title=params.anime_title,
- episode_title=params.episode_title
+ episode_title=params.episode_title,
)
except Exception as e:
logger.error(f"Download failed: {e}")
@@ -84,43 +86,41 @@ class DefaultDownloader(BaseDownloader):
success=False,
error_message=str(e),
anime_title=params.anime_title,
- episode_title=params.episode_title
+ episode_title=params.episode_title,
)
def _download_video(self, params: DownloadParams) -> Path:
"""Download video using httpx with progress tracking."""
anime_title = sanitize_filename(params.anime_title)
episode_title = sanitize_filename(params.episode_title)
-
+
dest_dir = self.config.downloads_dir / anime_title
dest_dir.mkdir(parents=True, exist_ok=True)
-
+
# Get file extension from URL or headers
file_extension = self._get_file_extension(params.url, params.headers)
if params.force_unknown_ext and not file_extension:
file_extension = ".unknown_video"
elif not file_extension:
file_extension = ".mp4" # default fallback
-
+
video_path = dest_dir / f"{episode_title}{file_extension}"
-
+
# Check if file already exists
if video_path.exists() and not params.prompt:
logger.info(f"File already exists: {video_path}")
return video_path
elif video_path.exists() and params.prompt:
- if not Confirm.ask(f"File exists: {video_path.name}. Overwrite?", default=False):
+ if not Confirm.ask(
+ f"File exists: {video_path.name}. Overwrite?", default=False
+ ):
return video_path
-
+
# Download with progress tracking
self._download_with_progress(
- params.url,
- video_path,
- params.headers,
- params.silent,
- params.progress_hooks
+ params.url, video_path, params.headers, params.silent, params.progress_hooks
)
-
+
# Handle unknown video extension normalization
if video_path.suffix == ".unknown_video":
normalized_path = video_path.with_suffix(".mp4")
@@ -128,7 +128,7 @@ class DefaultDownloader(BaseDownloader):
shutil.move(video_path, normalized_path)
print("Successfully normalized path")
return normalized_path
-
+
return video_path
def _get_file_extension(self, url: str, headers: dict) -> str:
@@ -136,55 +136,64 @@ class DefaultDownloader(BaseDownloader):
# First try to get from URL
parsed_url = urllib.parse.urlparse(url)
path = parsed_url.path
- if path and '.' in path:
+ if path and "." in path:
return Path(path).suffix
-
+
# Try to get from response headers
try:
- with self.client.stream('HEAD', url, headers=headers) as response:
- content_type = response.headers.get('content-type', '')
- if 'video/mp4' in content_type:
- return '.mp4'
- elif 'video/webm' in content_type:
- return '.webm'
- elif 'video/x-matroska' in content_type:
- return '.mkv'
- elif 'video/x-msvideo' in content_type:
- return '.avi'
- elif 'video/quicktime' in content_type:
- return '.mov'
-
+ with self.client.stream("HEAD", url, headers=headers) as response:
+ content_type = response.headers.get("content-type", "")
+ if "video/mp4" in content_type:
+ return ".mp4"
+ elif "video/webm" in content_type:
+ return ".webm"
+ elif "video/x-matroska" in content_type:
+ return ".mkv"
+ elif "video/x-msvideo" in content_type:
+ return ".avi"
+ elif "video/quicktime" in content_type:
+ return ".mov"
+
# Try content-disposition header
- content_disposition = response.headers.get('content-disposition', '')
- if 'filename=' in content_disposition:
- filename = content_disposition.split('filename=')[1].strip('"\'')
+ content_disposition = response.headers.get("content-disposition", "")
+ if "filename=" in content_disposition:
+ filename = content_disposition.split("filename=")[1].strip("\"'")
return Path(filename).suffix
except Exception:
pass
-
+
return ""
- def _download_with_progress(self, url: str, output_path: Path, headers: dict, silent: bool, progress_hooks: list | None = None):
+ def _download_with_progress(
+ self,
+ url: str,
+ output_path: Path,
+ headers: dict,
+ silent: bool,
+ progress_hooks: list | None = None,
+ ):
"""Download file with rich progress bar and progress hooks."""
progress_hooks = progress_hooks or []
-
+
# Always show download start message
print(f"[cyan]Starting download of {output_path.name}...[/]")
-
+
try:
- with self.client.stream('GET', url, headers=headers) as response:
+ with self.client.stream("GET", url, headers=headers) as response:
response.raise_for_status()
-
- total_size = int(response.headers.get('content-length', 0))
+
+ total_size = int(response.headers.get("content-length", 0))
downloaded = 0
-
+
# Initialize progress display - always show progress
progress = None
task_id = None
-
+
if total_size > 0:
progress = Progress(
- TextColumn("[bold blue]{task.fields[filename]}", justify="right"),
+ TextColumn(
+ "[bold blue]{task.fields[filename]}", justify="right"
+ ),
BarColumn(bar_width=None),
"[progress.percentage]{task.percentage:>3.1f}%",
"โข",
@@ -197,75 +206,77 @@ class DefaultDownloader(BaseDownloader):
else:
# Progress without total size (indeterminate)
progress = Progress(
- TextColumn("[bold blue]{task.fields[filename]}", justify="right"),
+ TextColumn(
+ "[bold blue]{task.fields[filename]}", justify="right"
+ ),
TextColumn("[green]{task.completed} bytes"),
"โข",
TransferSpeedColumn(),
)
-
+
progress.start()
task_id = progress.add_task(
- "download",
- filename=output_path.name,
- total=total_size if total_size > 0 else None
+ "download",
+ filename=output_path.name,
+ total=total_size if total_size > 0 else None,
)
-
+
try:
- with open(output_path, 'wb') as f:
+ with open(output_path, "wb") as f:
for chunk in response.iter_bytes(chunk_size=8192):
if chunk:
f.write(chunk)
chunk_size = len(chunk)
downloaded += chunk_size
-
+
# Always update progress bar
if progress is not None and task_id is not None:
progress.update(task_id, advance=chunk_size)
-
+
# Call progress hooks
if progress_hooks:
progress_info = {
- 'downloaded_bytes': downloaded,
- 'total_bytes': total_size,
- 'filename': output_path.name,
- 'status': 'downloading'
+ "downloaded_bytes": downloaded,
+ "total_bytes": total_size,
+ "filename": output_path.name,
+ "status": "downloading",
}
for hook in progress_hooks:
try:
hook(progress_info)
except Exception as e:
logger.warning(f"Progress hook failed: {e}")
-
+
finally:
if progress:
progress.stop()
-
+
# Always show completion message
print(f"[green]โ Download completed: {output_path.name}[/]")
-
+
# Call completion hooks
if progress_hooks:
completion_info = {
- 'downloaded_bytes': downloaded,
- 'total_bytes': total_size or downloaded,
- 'filename': output_path.name,
- 'status': 'finished'
+ "downloaded_bytes": downloaded,
+ "total_bytes": total_size or downloaded,
+ "filename": output_path.name,
+ "status": "finished",
}
for hook in progress_hooks:
try:
hook(completion_info)
except Exception as e:
logger.warning(f"Progress hook failed: {e}")
-
+
except httpx.HTTPError as e:
# Call error hooks
if progress_hooks:
error_info = {
- 'downloaded_bytes': 0,
- 'total_bytes': 0,
- 'filename': output_path.name,
- 'status': 'error',
- 'error': str(e)
+ "downloaded_bytes": 0,
+ "total_bytes": 0,
+ "filename": output_path.name,
+ "status": "error",
+ "error": str(e),
}
for hook in progress_hooks:
try:
@@ -280,12 +291,12 @@ class DefaultDownloader(BaseDownloader):
episode_title = sanitize_filename(params.episode_title)
base = self.config.downloads_dir / anime_title
downloaded_subs = []
-
+
for i, sub_url in enumerate(params.subtitles):
try:
response = self.client.get(sub_url, headers=params.headers)
response.raise_for_status()
-
+
# Determine filename
filename = get_remote_filename(response)
if not filename:
@@ -293,79 +304,87 @@ class DefaultDownloader(BaseDownloader):
filename = f"{episode_title}.srt"
else:
filename = f"{episode_title}.{i}.srt"
-
+
sub_path = base / filename
-
+
# Write subtitle content
- with open(sub_path, 'w', encoding='utf-8') as f:
+ with open(sub_path, "w", encoding="utf-8") as f:
f.write(response.text)
-
+
downloaded_subs.append(sub_path)
-
+
print(f"Downloaded subtitle: {filename}")
-
+
except httpx.HTTPError as e:
logger.error(f"Failed to download subtitle {i}: {e}")
print(f"[red]Failed to download subtitle {i}: {e}[/red]")
-
+
return downloaded_subs
- def _merge_subtitles(self, params: DownloadParams, video_path: Path, sub_paths: list[Path]) -> Optional[Path]:
+ def _merge_subtitles(
+ self, params: DownloadParams, video_path: Path, sub_paths: list[Path]
+ ) -> Optional[Path]:
"""Merge subtitles with video using ffmpeg and return the path to the merged file."""
ffmpeg_executable = shutil.which("ffmpeg")
if not ffmpeg_executable:
raise FastAnimeError("Please install ffmpeg in order to merge subtitles")
-
+
merged_filename = video_path.stem + ".mkv"
-
+
# Prepare subtitle input arguments
subs_input_args = list(
itertools.chain.from_iterable(
[["-i", str(sub_path)] for sub_path in sub_paths]
)
)
-
+
with tempfile.TemporaryDirectory() as temp_dir_str:
temp_dir = Path(temp_dir_str)
temp_output_path = temp_dir / merged_filename
-
+
# Construct ffmpeg command
args = [
ffmpeg_executable,
"-hide_banner",
- "-i", str(video_path), # Main video input
+ "-i",
+ str(video_path), # Main video input
*subs_input_args, # All subtitle inputs
- "-c", "copy", # Copy streams without re-encoding
- "-map", "0:v", # Map video from first input
- "-map", "0:a", # Map audio from first input
+ "-c",
+ "copy", # Copy streams without re-encoding
+ "-map",
+ "0:v", # Map video from first input
+ "-map",
+ "0:a", # Map audio from first input
]
-
+
# Map subtitle streams from each subtitle input
for i in range(len(sub_paths)):
args.extend(["-map", f"{i + 1}:s"])
-
+
args.append(str(temp_output_path))
-
+
print(f"[cyan]Starting subtitle merge for {video_path.name}...[/]")
-
+
try:
# Run ffmpeg - use silent flag to control ffmpeg output, not progress
process = subprocess.run(
- args,
+ args,
capture_output=params.silent, # Only suppress ffmpeg output if silent
text=True,
- check=True
+ check=True,
)
-
+
final_output_path = video_path.parent / merged_filename
-
+
# Handle existing file
if final_output_path.exists():
if not params.prompt or Confirm.ask(
f"File exists ({final_output_path}). Overwrite?",
default=True,
):
- print(f"[yellow]Overwriting existing file: {final_output_path}[/]")
+ print(
+ f"[yellow]Overwriting existing file: {final_output_path}[/]"
+ )
final_output_path.unlink()
shutil.move(str(temp_output_path), str(final_output_path))
else:
@@ -373,18 +392,20 @@ class DefaultDownloader(BaseDownloader):
return None
else:
shutil.move(str(temp_output_path), str(final_output_path))
-
+
# Clean up original files if requested
if params.clean:
print("[cyan]Cleaning original files...[/]")
video_path.unlink()
for sub_path in sub_paths:
sub_path.unlink()
-
- print(f"[green bold]Subtitles merged successfully.[/] Output: {final_output_path}")
-
+
+ print(
+ f"[green bold]Subtitles merged successfully.[/] Output: {final_output_path}"
+ )
+
return final_output_path
-
+
except subprocess.CalledProcessError as e:
error_msg = f"FFmpeg failed: {e.stderr if e.stderr else str(e)}"
logger.error(error_msg)
diff --git a/fastanime/core/downloader/downloader.py b/fastanime/core/downloader/downloader.py
index ed58bb5..ca397a9 100644
--- a/fastanime/core/downloader/downloader.py
+++ b/fastanime/core/downloader/downloader.py
@@ -30,9 +30,11 @@ class DownloadFactory:
try:
import yt_dlp
from .yt_dlp import YtDLPDownloader
+
return YtDLPDownloader(config)
except ImportError:
from .default import DefaultDownloader
+
return DefaultDownloader(config)
else:
raise FastAnimeError("Downloader not implemented")
diff --git a/fastanime/core/downloader/model.py b/fastanime/core/downloader/model.py
index 5f02646..3c7a07b 100644
--- a/fastanime/core/downloader/model.py
+++ b/fastanime/core/downloader/model.py
@@ -8,25 +8,22 @@ from pydantic import BaseModel, Field
class DownloadResult(BaseModel):
"""Result of a download operation."""
-
+
success: bool = Field(description="Whether the download was successful")
video_path: Optional[Path] = Field(
- default=None,
- description="Path to the downloaded video file"
+ default=None, description="Path to the downloaded video file"
)
subtitle_paths: list[Path] = Field(
- default_factory=list,
- description="Paths to downloaded subtitle files"
+ default_factory=list, description="Paths to downloaded subtitle files"
)
merged_path: Optional[Path] = Field(
default=None,
- description="Path to the merged video+subtitles file if merge was performed"
+ description="Path to the merged video+subtitles file if merge was performed",
)
error_message: Optional[str] = Field(
- default=None,
- description="Error message if download failed"
+ default=None, description="Error message if download failed"
)
anime_title: str = Field(description="Title of the anime")
episode_title: str = Field(description="Title of the episode")
-
+
model_config = {"arbitrary_types_allowed": True}
diff --git a/fastanime/core/downloader/torrents.py b/fastanime/core/downloader/torrents.py
index 735b378..e13c133 100644
--- a/fastanime/core/downloader/torrents.py
+++ b/fastanime/core/downloader/torrents.py
@@ -11,6 +11,7 @@ from ..exceptions import FastAnimeError, DependencyNotFoundError
try:
import libtorrent as lt
+
LIBTORRENT_AVAILABLE = True
except ImportError:
LIBTORRENT_AVAILABLE = False
@@ -21,6 +22,7 @@ logger = logging.getLogger(__name__)
class TorrentDownloadError(FastAnimeError):
"""Raised when torrent download fails."""
+
pass
@@ -37,7 +39,7 @@ class TorrentDownloader:
max_download_rate: int = -1, # -1 means unlimited
max_connections: int = 200,
listen_port: int = 6881,
- progress_callback: Optional[Callable[[Dict[str, Any]], None]] = None
+ progress_callback: Optional[Callable[[Dict[str, Any]], None]] = None,
):
"""
Initialize the torrent downloader.
@@ -65,33 +67,33 @@ class TorrentDownloader:
raise DependencyNotFoundError("libtorrent is not available")
session = lt.session() # type: ignore
-
+
# Configure session settings
settings = {
- 'user_agent': 'FastAnime/1.0',
- 'listen_interfaces': f'0.0.0.0:{self.listen_port}',
- 'enable_outgoing_utp': True,
- 'enable_incoming_utp': True,
- 'enable_outgoing_tcp': True,
- 'enable_incoming_tcp': True,
- 'connections_limit': self.max_connections,
- 'dht_bootstrap_nodes': 'dht.transmissionbt.com:6881',
+ "user_agent": "FastAnime/1.0",
+ "listen_interfaces": f"0.0.0.0:{self.listen_port}",
+ "enable_outgoing_utp": True,
+ "enable_incoming_utp": True,
+ "enable_outgoing_tcp": True,
+ "enable_incoming_tcp": True,
+ "connections_limit": self.max_connections,
+ "dht_bootstrap_nodes": "dht.transmissionbt.com:6881",
}
-
+
if self.max_upload_rate > 0:
- settings['upload_rate_limit'] = self.max_upload_rate * 1024
+ settings["upload_rate_limit"] = self.max_upload_rate * 1024
if self.max_download_rate > 0:
- settings['download_rate_limit'] = self.max_download_rate * 1024
+ settings["download_rate_limit"] = self.max_download_rate * 1024
session.apply_settings(settings)
-
+
# Start DHT
session.start_dht()
-
+
# Add trackers
- session.add_dht_router('router.bittorrent.com', 6881)
- session.add_dht_router('router.utorrent.com', 6881)
-
+ session.add_dht_router("router.bittorrent.com", 6881)
+ session.add_dht_router("router.utorrent.com", 6881)
+
logger.info("Libtorrent session configured successfully")
return session
@@ -100,29 +102,29 @@ class TorrentDownloader:
if not LIBTORRENT_AVAILABLE or lt is None:
raise DependencyNotFoundError("libtorrent is not available")
- if torrent_source.startswith('magnet:'):
+ if torrent_source.startswith("magnet:"):
# Parse magnet link
return lt.parse_magnet_uri(torrent_source) # type: ignore
- elif torrent_source.startswith(('http://', 'https://')):
+ elif torrent_source.startswith(("http://", "https://")):
# Download torrent file
import urllib.request
- with tempfile.NamedTemporaryFile(suffix='.torrent', delete=False) as tmp_file:
+
+ with tempfile.NamedTemporaryFile(
+ suffix=".torrent", delete=False
+ ) as tmp_file:
urllib.request.urlretrieve(torrent_source, tmp_file.name)
torrent_info = lt.torrent_info(tmp_file.name) # type: ignore
Path(tmp_file.name).unlink() # Clean up temp file
- return {'ti': torrent_info}
+ return {"ti": torrent_info}
else:
# Local torrent file
torrent_path = Path(torrent_source)
if not torrent_path.exists():
raise TorrentDownloadError(f"Torrent file not found: {torrent_source}")
- return {'ti': lt.torrent_info(str(torrent_path))} # type: ignore
+ return {"ti": lt.torrent_info(str(torrent_path))} # type: ignore
def download_with_libtorrent(
- self,
- torrent_source: str,
- timeout: int = 3600,
- sequential: bool = False
+ self, torrent_source: str, timeout: int = 3600, sequential: bool = False
) -> Path:
"""
Download torrent using libtorrent.
@@ -148,48 +150,50 @@ class TorrentDownloader:
try:
self.session = self._setup_libtorrent_session()
torrent_params = self._get_torrent_info(torrent_source)
-
+
# Set save path
- torrent_params['save_path'] = str(self.download_path)
-
+ torrent_params["save_path"] = str(self.download_path)
+
if sequential and lt is not None:
- torrent_params['flags'] = lt.torrent_flags.sequential_download # type: ignore
+ torrent_params["flags"] = lt.torrent_flags.sequential_download # type: ignore
# Add torrent to session
if self.session is None:
raise TorrentDownloadError("Session is not initialized")
handle = self.session.add_torrent(torrent_params)
-
+
logger.info(f"Starting torrent download: {handle.name()}")
-
+
# Monitor download progress
start_time = time.time()
last_log_time = start_time
while not handle.is_seed():
current_time = time.time()
-
+
# Check timeout
if current_time - start_time > timeout:
- raise TorrentDownloadError(f"Download timeout after {timeout} seconds")
-
+ raise TorrentDownloadError(
+ f"Download timeout after {timeout} seconds"
+ )
+
status = handle.status()
-
+
# Prepare progress info
progress_info = {
- 'name': handle.name(),
- 'progress': status.progress * 100,
- 'download_rate': status.download_rate / 1024, # KB/s
- 'upload_rate': status.upload_rate / 1024, # KB/s
- 'num_peers': status.num_peers,
- 'total_size': status.total_wanted,
- 'downloaded': status.total_wanted_done,
- 'state': str(status.state),
+ "name": handle.name(),
+ "progress": status.progress * 100,
+ "download_rate": status.download_rate / 1024, # KB/s
+ "upload_rate": status.upload_rate / 1024, # KB/s
+ "num_peers": status.num_peers,
+ "total_size": status.total_wanted,
+ "downloaded": status.total_wanted_done,
+ "state": str(status.state),
}
-
+
# Call progress callback if provided
if self.progress_callback:
self.progress_callback(progress_info)
-
+
# Log progress periodically (every 10 seconds)
if current_time - last_log_time >= 10:
logger.info(
@@ -198,23 +202,23 @@ class TorrentDownloader:
f"- {progress_info['num_peers']} peers"
)
last_log_time = current_time
-
+
# Check for errors
if status.error:
raise TorrentDownloadError(f"Torrent error: {status.error}")
-
+
time.sleep(1)
-
+
# Download completed
download_path = self.download_path / handle.name()
logger.info(f"Torrent download completed: {download_path}")
-
+
# Remove torrent from session
if self.session is not None:
self.session.remove_torrent(handle)
-
+
return download_path
-
+
except Exception as e:
if isinstance(e, (TorrentDownloadError, DependencyNotFoundError)):
raise
@@ -243,40 +247,52 @@ class TorrentDownloader:
raise DependencyNotFoundError(
"webtorrent CLI is not available. Please install it: npm install -g webtorrent-cli"
)
-
+
try:
- cmd = [webtorrent_cli, "download", torrent_source, "--out", str(self.download_path)]
+ cmd = [
+ webtorrent_cli,
+ "download",
+ torrent_source,
+ "--out",
+ str(self.download_path),
+ ]
logger.info(f"Running webtorrent command: {' '.join(cmd)}")
-
- result = subprocess.run(cmd, check=True, capture_output=True, text=True, timeout=3600)
-
+
+ result = subprocess.run(
+ cmd, check=True, capture_output=True, text=True, timeout=3600
+ )
+
# Try to determine the download path from the output
# This is a best-effort approach since webtorrent output format may vary
- output_lines = result.stdout.split('\n')
+ output_lines = result.stdout.split("\n")
for line in output_lines:
- if 'Downloaded' in line and 'to' in line:
+ if "Downloaded" in line and "to" in line:
# Extract path from output
- parts = line.split('to')
+ parts = line.split("to")
if len(parts) > 1:
- path_str = parts[-1].strip().strip('"\'') # Remove quotes
+ path_str = parts[-1].strip().strip("\"'") # Remove quotes
download_path = Path(path_str)
if download_path.exists():
logger.info(f"Successfully downloaded to: {download_path}")
return download_path
-
+
# If we can't parse the output, scan the download directory for new files
- logger.warning("Could not parse webtorrent output, scanning download directory")
+ logger.warning(
+ "Could not parse webtorrent output, scanning download directory"
+ )
download_candidates = list(self.download_path.iterdir())
if download_candidates:
# Return the most recently modified item
newest_path = max(download_candidates, key=lambda p: p.stat().st_mtime)
logger.info(f"Found downloaded content: {newest_path}")
return newest_path
-
+
# Fallback: return the download directory
- logger.warning(f"No specific download found, returning download directory: {self.download_path}")
+ logger.warning(
+ f"No specific download found, returning download directory: {self.download_path}"
+ )
return self.download_path
-
+
except subprocess.CalledProcessError as e:
error_msg = e.stderr or e.stdout or "Unknown error"
raise TorrentDownloadError(
@@ -287,13 +303,12 @@ class TorrentDownloader:
f"webtorrent CLI timeout after {e.timeout} seconds"
) from e
except Exception as e:
- raise TorrentDownloadError(f"Failed to download with webtorrent: {str(e)}") from e
+ raise TorrentDownloadError(
+ f"Failed to download with webtorrent: {str(e)}"
+ ) from e
def download(
- self,
- torrent_source: str,
- prefer_libtorrent: bool = True,
- **kwargs
+ self, torrent_source: str, prefer_libtorrent: bool = True, **kwargs
) -> Path:
"""
Download torrent using the best available method.
@@ -310,24 +325,28 @@ class TorrentDownloader:
TorrentDownloadError: If all download methods fail
"""
methods = []
-
+
if prefer_libtorrent and LIBTORRENT_AVAILABLE:
- methods.extend([
- ('libtorrent', self.download_with_libtorrent),
- ('webtorrent-cli', self.download_with_webtorrent_cli)
- ])
+ methods.extend(
+ [
+ ("libtorrent", self.download_with_libtorrent),
+ ("webtorrent-cli", self.download_with_webtorrent_cli),
+ ]
+ )
else:
- methods.extend([
- ('webtorrent-cli', self.download_with_webtorrent_cli),
- ('libtorrent', self.download_with_libtorrent)
- ])
-
+ methods.extend(
+ [
+ ("webtorrent-cli", self.download_with_webtorrent_cli),
+ ("libtorrent", self.download_with_libtorrent),
+ ]
+ )
+
last_exception = None
-
+
for method_name, method_func in methods:
try:
logger.info(f"Attempting download with {method_name}")
- if method_name == 'libtorrent':
+ if method_name == "libtorrent":
return method_func(torrent_source, **kwargs)
else:
return method_func(torrent_source)
@@ -339,7 +358,7 @@ class TorrentDownloader:
logger.error(f"{method_name} failed: {e}")
last_exception = e
continue
-
+
# All methods failed
raise TorrentDownloadError(
f"All torrent download methods failed. Last error: {last_exception}"
diff --git a/fastanime/core/downloader/yt_dlp.py b/fastanime/core/downloader/yt_dlp.py
index 000a708..47cf140 100644
--- a/fastanime/core/downloader/yt_dlp.py
+++ b/fastanime/core/downloader/yt_dlp.py
@@ -28,7 +28,7 @@ class YtDLPDownloader(BaseDownloader):
video_path = None
sub_paths = []
merged_path = None
-
+
if TORRENT_REGEX.match(params.url):
from .torrents import download_torrent_with_webtorrent_cli
@@ -38,24 +38,26 @@ class YtDLPDownloader(BaseDownloader):
dest_dir.mkdir(parents=True, exist_ok=True)
video_path = dest_dir / episode_title
- video_path = download_torrent_with_webtorrent_cli(video_path, params.url)
+ video_path = download_torrent_with_webtorrent_cli(
+ video_path, params.url
+ )
else:
video_path = self._download_video(params)
-
+
if params.subtitles:
sub_paths = self._download_subs(params)
if params.merge:
merged_path = self._merge_subtitles(params, video_path, sub_paths)
-
+
return DownloadResult(
success=True,
video_path=video_path,
subtitle_paths=sub_paths,
merged_path=merged_path,
anime_title=params.anime_title,
- episode_title=params.episode_title
+ episode_title=params.episode_title,
)
-
+
except KeyboardInterrupt:
print()
print("Aborted!")
@@ -63,7 +65,7 @@ class YtDLPDownloader(BaseDownloader):
success=False,
error_message="Download aborted by user",
anime_title=params.anime_title,
- episode_title=params.episode_title
+ episode_title=params.episode_title,
)
except Exception as e:
logger.error(f"Download failed: {e}")
@@ -71,7 +73,7 @@ class YtDLPDownloader(BaseDownloader):
success=False,
error_message=str(e),
anime_title=params.anime_title,
- episode_title=params.episode_title
+ episode_title=params.episode_title,
)
def _download_video(self, params: DownloadParams) -> Path:
@@ -167,7 +169,9 @@ class YtDLPDownloader(BaseDownloader):
downloaded_subs.append(sub_path)
return downloaded_subs
- def _merge_subtitles(self, params, video_path: Path, sub_paths: list[Path]) -> Path | None:
+ def _merge_subtitles(
+ self, params, video_path: Path, sub_paths: list[Path]
+ ) -> Path | None:
"""Merge subtitles with video and return the path to the merged file."""
self.FFMPEG_EXECUTABLE = shutil.which("ffmpeg")
if not self.FFMPEG_EXECUTABLE:
@@ -243,7 +247,7 @@ class YtDLPDownloader(BaseDownloader):
f"[green bold]Subtitles merged successfully.[/] Output file: {final_output_path}"
)
return final_output_path
-
+
except Exception as e:
print(f"[red bold]An unexpected error[/] occurred: {e}")
return None
diff --git a/fastanime/core/utils/file.py b/fastanime/core/utils/file.py
index 81d5073..10ec4aa 100644
--- a/fastanime/core/utils/file.py
+++ b/fastanime/core/utils/file.py
@@ -7,9 +7,11 @@ from typing import IO, Any, Union
logger = logging.getLogger(__name__)
+
class NO_DEFAULT:
pass
+
def sanitize_filename(s, restricted=False, is_id=NO_DEFAULT):
"""Sanitizes a string so it could be used as part of a filename.
@param restricted Use a stricter subset of allowed characters
@@ -19,58 +21,85 @@ def sanitize_filename(s, restricted=False, is_id=NO_DEFAULT):
import itertools
import unicodedata
import re
- ACCENT_CHARS = dict(zip('รรรรรร
รรรรรรรรรรรรรรรรรลรลรรรรลฐรรรร รกรขรฃรครฅรฆรงรจรฉรชรซรฌรญรฎรฏรฐรฑรฒรณรดรตรถลรธลรนรบรปรผลฑรฝรพรฟ',
- itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUY', ['TH', 'ss'],
- 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuy', ['th'], 'y')))
- if s == '':
- return ''
+ ACCENT_CHARS = dict(
+ zip(
+ "รรรรรร
รรรรรรรรรรรรรรรรรลรลรรรรลฐรรรร รกรขรฃรครฅรฆรงรจรฉรชรซรฌรญรฎรฏรฐรฑรฒรณรดรตรถลรธลรนรบรปรผลฑรฝรพรฟ",
+ itertools.chain(
+ "AAAAAA",
+ ["AE"],
+ "CEEEEIIIIDNOOOOOOO",
+ ["OE"],
+ "UUUUUY",
+ ["TH", "ss"],
+ "aaaaaa",
+ ["ae"],
+ "ceeeeiiiionooooooo",
+ ["oe"],
+ "uuuuuy",
+ ["th"],
+ "y",
+ ),
+ )
+ )
+
+ if s == "":
+ return ""
def replace_insane(char):
if restricted and char in ACCENT_CHARS:
return ACCENT_CHARS[char]
- elif not restricted and char == '\n':
- return '\0 '
+ elif not restricted and char == "\n":
+ return "\0 "
elif is_id is NO_DEFAULT and not restricted and char in '"*:<>?|/\\':
# Replace with their full-width unicode counterparts
- return {'/': '\u29F8', '\\': '\u29f9'}.get(char, chr(ord(char) + 0xfee0))
- elif char == '?' or ord(char) < 32 or ord(char) == 127:
- return ''
+ return {"/": "\u29f8", "\\": "\u29f9"}.get(char, chr(ord(char) + 0xFEE0))
+ elif char == "?" or ord(char) < 32 or ord(char) == 127:
+ return ""
elif char == '"':
- return '' if restricted else '\''
- elif char == ':':
- return '\0_\0-' if restricted else '\0 \0-'
- elif char in '\\/|*<>':
- return '\0_'
- if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace() or ord(char) > 127):
- return '' if unicodedata.category(char)[0] in 'CM' else '\0_'
+ return "" if restricted else "'"
+ elif char == ":":
+ return "\0_\0-" if restricted else "\0 \0-"
+ elif char in "\\/|*<>":
+ return "\0_"
+ if restricted and (
+ char in "!&'()[]{}$;`^,#" or char.isspace() or ord(char) > 127
+ ):
+ return "" if unicodedata.category(char)[0] in "CM" else "\0_"
return char
# Replace look-alike Unicode glyphs
if restricted and (is_id is NO_DEFAULT or not is_id):
- s = unicodedata.normalize('NFKC', s)
- s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) # Handle timestamps
- result = ''.join(map(replace_insane, s))
+ s = unicodedata.normalize("NFKC", s)
+ s = re.sub(
+ r"[0-9]+(?::[0-9]+)+", lambda m: m.group(0).replace(":", "_"), s
+ ) # Handle timestamps
+ result = "".join(map(replace_insane, s))
if is_id is NO_DEFAULT:
- result = re.sub(r'(\0.)(?:(?=\1)..)+', r'\1', result) # Remove repeated substitute chars
- STRIP_RE = r'(?:\0.|[ _-])*'
- result = re.sub(f'^\0.{STRIP_RE}|{STRIP_RE}\0.$', '', result) # Remove substitute chars from start/end
- result = result.replace('\0', '') or '_'
+ result = re.sub(
+ r"(\0.)(?:(?=\1)..)+", r"\1", result
+ ) # Remove repeated substitute chars
+ STRIP_RE = r"(?:\0.|[ _-])*"
+ result = re.sub(
+ f"^\0.{STRIP_RE}|{STRIP_RE}\0.$", "", result
+ ) # Remove substitute chars from start/end
+ result = result.replace("\0", "") or "_"
if not is_id:
- while '__' in result:
- result = result.replace('__', '_')
- result = result.strip('_')
+ while "__" in result:
+ result = result.replace("__", "_")
+ result = result.strip("_")
# Common case of "Foreign band name - English song title"
- if restricted and result.startswith('-_'):
+ if restricted and result.startswith("-_"):
result = result[2:]
- if result.startswith('-'):
- result = '_' + result[len('-'):]
- result = result.lstrip('.')
+ if result.startswith("-"):
+ result = "_" + result[len("-") :]
+ result = result.lstrip(".")
if not result:
- result = '_'
+ result = "_"
return result
+
def get_file_modification_time(filepath: Path) -> float:
"""
Returns the modification time of a file as a Unix timestamp.
diff --git a/fastanime/core/utils/fuzzy.py b/fastanime/core/utils/fuzzy.py
index cd00781..3f3142b 100644
--- a/fastanime/core/utils/fuzzy.py
+++ b/fastanime/core/utils/fuzzy.py
@@ -6,24 +6,24 @@ otherwise falls back to a pure Python implementation with the same API.
Usage:
Basic usage with the convenience functions:
-
+
>>> from fastanime.core.utils.fuzzy import fuzz
>>> fuzz.ratio("hello world", "hello")
62
- >>> fuzz.partial_ratio("hello world", "hello")
+ >>> fuzz.partial_ratio("hello world", "hello")
100
-
+
Using the FuzzyMatcher class directly:
-
+
>>> from fastanime.core.utils.fuzzy import FuzzyMatcher
>>> matcher = FuzzyMatcher()
>>> matcher.backend
'thefuzz' # or 'pure_python' if thefuzz is not available
>>> matcher.token_sort_ratio("fuzzy wuzzy", "wuzzy fuzzy")
100
-
+
For drop-in replacement of thefuzz.fuzz:
-
+
>>> from fastanime.core.utils.fuzzy import ratio, partial_ratio
>>> ratio("test", "best")
75
@@ -37,6 +37,7 @@ logger = logging.getLogger(__name__)
# Try to import thefuzz, fall back to pure Python implementation
try:
from thefuzz import fuzz as _fuzz_impl
+
THEFUZZ_AVAILABLE = True
logger.debug("Using thefuzz for fuzzy matching")
except ImportError:
@@ -48,29 +49,29 @@ except ImportError:
class _PurePythonFuzz:
"""
Pure Python implementation of fuzzy string matching algorithms.
-
+
This provides the same API as thefuzz.fuzz but with pure Python implementations
of the core algorithms.
"""
-
+
@staticmethod
def _levenshtein_distance(s1: str, s2: str) -> int:
"""
Calculate the Levenshtein distance between two strings.
-
+
Args:
s1: First string
s2: Second string
-
+
Returns:
The Levenshtein distance as an integer
"""
if len(s1) < len(s2):
return _PurePythonFuzz._levenshtein_distance(s2, s1)
-
+
if len(s2) == 0:
return len(s1)
-
+
previous_row = list(range(len(s2) + 1))
for i, c1 in enumerate(s1):
current_row = [i + 1]
@@ -81,55 +82,55 @@ class _PurePythonFuzz:
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
-
+
return previous_row[-1]
-
+
@staticmethod
def _longest_common_subsequence(s1: str, s2: str) -> int:
"""
Calculate the length of the longest common subsequence.
-
+
Args:
s1: First string
s2: Second string
-
+
Returns:
Length of the longest common subsequence
"""
m, n = len(s1), len(s2)
dp = [[0] * (n + 1) for _ in range(m + 1)]
-
+
for i in range(1, m + 1):
for j in range(1, n + 1):
if s1[i - 1] == s2[j - 1]:
dp[i][j] = dp[i - 1][j - 1] + 1
else:
dp[i][j] = max(dp[i - 1][j], dp[i][j - 1])
-
+
return dp[m][n]
-
+
@staticmethod
def _normalize_string(s: str) -> str:
"""
Normalize a string for comparison by converting to lowercase and stripping whitespace.
-
+
Args:
s: String to normalize
-
+
Returns:
Normalized string
"""
return s.lower().strip()
-
+
@staticmethod
def ratio(s1: str, s2: str) -> int:
"""
Calculate the similarity ratio between two strings using Levenshtein distance.
-
+
Args:
s1: First string
s2: Second string
-
+
Returns:
Similarity ratio as an integer from 0 to 100
"""
@@ -137,185 +138,185 @@ class _PurePythonFuzz:
return 100
if not s1 or not s2:
return 0
-
+
distance = _PurePythonFuzz._levenshtein_distance(s1, s2)
max_len = max(len(s1), len(s2))
-
+
if max_len == 0:
return 100
-
+
similarity = (max_len - distance) / max_len
return int(similarity * 100)
-
+
@staticmethod
def partial_ratio(s1: str, s2: str) -> int:
"""
Calculate the partial similarity ratio between two strings.
-
+
This finds the best matching substring and calculates the ratio for that.
-
+
Args:
s1: First string
s2: Second string
-
+
Returns:
Partial similarity ratio as an integer from 0 to 100
"""
if not s1 or not s2:
return 0
-
+
if len(s1) <= len(s2):
shorter, longer = s1, s2
else:
shorter, longer = s2, s1
-
+
best_ratio = 0
for i in range(len(longer) - len(shorter) + 1):
- substring = longer[i:i + len(shorter)]
+ substring = longer[i : i + len(shorter)]
ratio = _PurePythonFuzz.ratio(shorter, substring)
best_ratio = max(best_ratio, ratio)
-
+
return best_ratio
-
+
@staticmethod
def token_sort_ratio(s1: str, s2: str) -> int:
"""
Calculate similarity after sorting tokens in both strings.
-
+
Args:
s1: First string
s2: Second string
-
+
Returns:
Token sort ratio as an integer from 0 to 100
"""
if not s1 or not s2:
return 0
-
+
# Normalize and split into tokens
tokens1 = sorted(_PurePythonFuzz._normalize_string(s1).split())
tokens2 = sorted(_PurePythonFuzz._normalize_string(s2).split())
-
+
# Rejoin sorted tokens
- sorted_s1 = ' '.join(tokens1)
- sorted_s2 = ' '.join(tokens2)
-
+ sorted_s1 = " ".join(tokens1)
+ sorted_s2 = " ".join(tokens2)
+
return _PurePythonFuzz.ratio(sorted_s1, sorted_s2)
-
+
@staticmethod
def token_set_ratio(s1: str, s2: str) -> int:
"""
Calculate similarity using set operations on tokens.
-
+
Args:
s1: First string
s2: Second string
-
+
Returns:
Token set ratio as an integer from 0 to 100
"""
if not s1 or not s2:
return 0
-
+
# Normalize and split into tokens
tokens1 = set(_PurePythonFuzz._normalize_string(s1).split())
tokens2 = set(_PurePythonFuzz._normalize_string(s2).split())
-
+
# Find intersection and differences
intersection = tokens1 & tokens2
diff1 = tokens1 - tokens2
diff2 = tokens2 - tokens1
-
+
# Create sorted strings for comparison
- sorted_intersection = ' '.join(sorted(intersection))
- sorted_diff1 = ' '.join(sorted(diff1))
- sorted_diff2 = ' '.join(sorted(diff2))
-
+ sorted_intersection = " ".join(sorted(intersection))
+ sorted_diff1 = " ".join(sorted(diff1))
+ sorted_diff2 = " ".join(sorted(diff2))
+
# Combine strings for comparison
combined1 = f"{sorted_intersection} {sorted_diff1}".strip()
combined2 = f"{sorted_intersection} {sorted_diff2}".strip()
-
+
if not combined1 and not combined2:
return 100
if not combined1 or not combined2:
return 0
-
+
return _PurePythonFuzz.ratio(combined1, combined2)
-
+
@staticmethod
def partial_token_sort_ratio(s1: str, s2: str) -> int:
"""
Calculate partial similarity after sorting tokens.
-
+
Args:
s1: First string
s2: Second string
-
+
Returns:
Partial token sort ratio as an integer from 0 to 100
"""
if not s1 or not s2:
return 0
-
+
# Normalize and split into tokens
tokens1 = sorted(_PurePythonFuzz._normalize_string(s1).split())
tokens2 = sorted(_PurePythonFuzz._normalize_string(s2).split())
-
+
# Rejoin sorted tokens
- sorted_s1 = ' '.join(tokens1)
- sorted_s2 = ' '.join(tokens2)
-
+ sorted_s1 = " ".join(tokens1)
+ sorted_s2 = " ".join(tokens2)
+
return _PurePythonFuzz.partial_ratio(sorted_s1, sorted_s2)
-
+
@staticmethod
def partial_token_set_ratio(s1: str, s2: str) -> int:
"""
Calculate partial similarity using set operations on tokens.
-
+
Args:
s1: First string
s2: Second string
-
+
Returns:
Partial token set ratio as an integer from 0 to 100
"""
if not s1 or not s2:
return 0
-
+
# Normalize and split into tokens
tokens1 = set(_PurePythonFuzz._normalize_string(s1).split())
tokens2 = set(_PurePythonFuzz._normalize_string(s2).split())
-
+
# Find intersection and differences
intersection = tokens1 & tokens2
diff1 = tokens1 - tokens2
diff2 = tokens2 - tokens1
-
+
# Create sorted strings for comparison
- sorted_intersection = ' '.join(sorted(intersection))
- sorted_diff1 = ' '.join(sorted(diff1))
- sorted_diff2 = ' '.join(sorted(diff2))
-
+ sorted_intersection = " ".join(sorted(intersection))
+ sorted_diff1 = " ".join(sorted(diff1))
+ sorted_diff2 = " ".join(sorted(diff2))
+
# Combine strings for comparison
combined1 = f"{sorted_intersection} {sorted_diff1}".strip()
combined2 = f"{sorted_intersection} {sorted_diff2}".strip()
-
+
if not combined1 and not combined2:
return 100
if not combined1 or not combined2:
return 0
-
+
return _PurePythonFuzz.partial_ratio(combined1, combined2)
class FuzzyMatcher:
"""
Fuzzy string matching class with the same API as thefuzz.fuzz.
-
+
This class automatically uses thefuzz if available, otherwise falls back
to a pure Python implementation.
"""
-
+
def __init__(self):
"""Initialize the fuzzy matcher with the appropriate backend."""
if THEFUZZ_AVAILABLE and _fuzz_impl is not None:
@@ -324,22 +325,22 @@ class FuzzyMatcher:
else:
self._impl = _PurePythonFuzz
self._backend = "pure_python"
-
+
logger.debug(f"FuzzyMatcher initialized with backend: {self._backend}")
-
+
@property
def backend(self) -> str:
"""Get the name of the backend being used."""
return self._backend
-
+
def ratio(self, s1: str, s2: str) -> int:
"""
Calculate the similarity ratio between two strings.
-
+
Args:
s1: First string
s2: Second string
-
+
Returns:
Similarity ratio as an integer from 0 to 100
"""
@@ -348,15 +349,15 @@ class FuzzyMatcher:
except Exception as e:
logger.warning(f"Error in ratio calculation: {e}")
return 0
-
+
def partial_ratio(self, s1: str, s2: str) -> int:
"""
Calculate the partial similarity ratio between two strings.
-
+
Args:
s1: First string
s2: Second string
-
+
Returns:
Partial similarity ratio as an integer from 0 to 100
"""
@@ -365,15 +366,15 @@ class FuzzyMatcher:
except Exception as e:
logger.warning(f"Error in partial_ratio calculation: {e}")
return 0
-
+
def token_sort_ratio(self, s1: str, s2: str) -> int:
"""
Calculate similarity after sorting tokens in both strings.
-
+
Args:
s1: First string
s2: Second string
-
+
Returns:
Token sort ratio as an integer from 0 to 100
"""
@@ -382,15 +383,15 @@ class FuzzyMatcher:
except Exception as e:
logger.warning(f"Error in token_sort_ratio calculation: {e}")
return 0
-
+
def token_set_ratio(self, s1: str, s2: str) -> int:
"""
Calculate similarity using set operations on tokens.
-
+
Args:
s1: First string
s2: Second string
-
+
Returns:
Token set ratio as an integer from 0 to 100
"""
@@ -399,15 +400,15 @@ class FuzzyMatcher:
except Exception as e:
logger.warning(f"Error in token_set_ratio calculation: {e}")
return 0
-
+
def partial_token_sort_ratio(self, s1: str, s2: str) -> int:
"""
Calculate partial similarity after sorting tokens.
-
+
Args:
s1: First string
s2: Second string
-
+
Returns:
Partial token sort ratio as an integer from 0 to 100
"""
@@ -416,15 +417,15 @@ class FuzzyMatcher:
except Exception as e:
logger.warning(f"Error in partial_token_sort_ratio calculation: {e}")
return 0
-
+
def partial_token_set_ratio(self, s1: str, s2: str) -> int:
"""
Calculate partial similarity using set operations on tokens.
-
+
Args:
s1: First string
s2: Second string
-
+
Returns:
Partial token set ratio as an integer from 0 to 100
"""
@@ -433,15 +434,15 @@ class FuzzyMatcher:
except Exception as e:
logger.warning(f"Error in partial_token_set_ratio calculation: {e}")
return 0
-
+
def best_ratio(self, s1: str, s2: str) -> int:
"""
Get the best ratio from all available methods.
-
+
Args:
s1: First string
s2: Second string
-
+
Returns:
Best similarity ratio as an integer from 0 to 100
"""
@@ -468,13 +469,13 @@ partial_token_sort_ratio = fuzz.partial_token_sort_ratio
partial_token_set_ratio = fuzz.partial_token_set_ratio
__all__ = [
- 'FuzzyMatcher',
- 'fuzz',
- 'ratio',
- 'partial_ratio',
- 'token_sort_ratio',
- 'token_set_ratio',
- 'partial_token_sort_ratio',
- 'partial_token_set_ratio',
- 'THEFUZZ_AVAILABLE',
+ "FuzzyMatcher",
+ "fuzz",
+ "ratio",
+ "partial_ratio",
+ "token_sort_ratio",
+ "token_set_ratio",
+ "partial_token_sort_ratio",
+ "partial_token_set_ratio",
+ "THEFUZZ_AVAILABLE",
]
diff --git a/fastanime/core/utils/normalizer.py b/fastanime/core/utils/normalizer.py
index 04dd113..086027a 100644
--- a/fastanime/core/utils/normalizer.py
+++ b/fastanime/core/utils/normalizer.py
@@ -26,15 +26,15 @@ Example Usage:
... provider_title_to_media_api_title,
... media_api_title_to_provider_title
... )
-
+
# Convert provider title to media API title
>>> provider_title_to_media_api_title("1P", "allanime")
'one piece'
-
+
# Convert media API title to provider title
>>> media_api_title_to_provider_title("one piece", "allanime")
'1P'
-
+
# Check available providers
>>> get_available_providers()
['allanime', 'hianime', 'animepahe']
@@ -58,23 +58,23 @@ _normalizer_cache: Optional[Dict[str, Dict[str, str]]] = None
def _load_normalizer_data() -> Dict[str, Dict[str, str]]:
"""
Load the normalizer.json file and cache it.
-
+
Returns:
Dictionary containing provider mappings from normalizer.json
-
+
Raises:
FileNotFoundError: If normalizer.json is not found
json.JSONDecodeError: If normalizer.json is malformed
"""
global _normalizer_cache
-
+
if _normalizer_cache is not None:
return _normalizer_cache
-
+
normalizer_path = ASSETS_DIR / "normalizer.json"
-
+
try:
- with open(normalizer_path, 'r', encoding='utf-8') as f:
+ with open(normalizer_path, "r", encoding="utf-8") as f:
_normalizer_cache = json.load(f)
logger.debug("Loaded normalizer data from %s", normalizer_path)
# Type checker now knows _normalizer_cache is not None
@@ -88,23 +88,20 @@ def _load_normalizer_data() -> Dict[str, Dict[str, str]]:
raise
-def provider_title_to_media_api_title(
- provider_title: str,
- provider_name: str
-) -> str:
+def provider_title_to_media_api_title(provider_title: str, provider_name: str) -> str:
"""
Convert a provider title to its equivalent media API title.
-
+
This function takes a title from a specific provider (e.g., "1P" from allanime)
and converts it to the standard media API title (e.g., "one piece").
-
+
Args:
provider_title: The title as it appears on the provider
provider_name: The name of the provider (e.g., "allanime", "hianime", "animepahe")
-
+
Returns:
The normalized media API title, or the original title if no mapping exists
-
+
Example:
>>> provider_title_to_media_api_title("1P", "allanime")
"one piece"
@@ -115,48 +112,47 @@ def provider_title_to_media_api_title(
"""
try:
normalizer_data = _load_normalizer_data()
-
+
# Check if the provider exists in the normalizer data
if provider_name not in normalizer_data:
logger.debug("Provider '%s' not found in normalizer data", provider_name)
return provider_title
-
+
provider_mappings = normalizer_data[provider_name]
-
+
# Return the mapped title if it exists, otherwise return the original
normalized_title = provider_mappings.get(provider_title, provider_title)
-
+
if normalized_title != provider_title:
logger.debug(
"Normalized provider title: '%s' -> '%s' (provider: %s)",
- provider_title, normalized_title, provider_name
+ provider_title,
+ normalized_title,
+ provider_name,
)
-
+
return normalized_title
-
+
except (FileNotFoundError, json.JSONDecodeError) as e:
logger.warning("Failed to load normalizer data: %s", e)
return provider_title
-def media_api_title_to_provider_title(
- media_api_title: str,
- provider_name: str
-) -> str:
+def media_api_title_to_provider_title(media_api_title: str, provider_name: str) -> str:
"""
Convert a media API title to its equivalent provider title.
-
+
This function takes a standard media API title and converts it to the title
- used by a specific provider. This is the reverse operation of
+ used by a specific provider. This is the reverse operation of
provider_title_to_media_api_title().
-
+
Args:
media_api_title: The title as it appears in the media API (e.g., AniList)
provider_name: The name of the provider (e.g., "allanime", "hianime", "animepahe")
-
+
Returns:
The provider-specific title, or the original title if no mapping exists
-
+
Example:
>>> media_api_title_to_provider_title("one piece", "allanime")
"1P"
@@ -167,53 +163,53 @@ def media_api_title_to_provider_title(
"""
try:
normalizer_data = _load_normalizer_data()
-
+
# Check if the provider exists in the normalizer data
if provider_name not in normalizer_data:
logger.debug("Provider '%s' not found in normalizer data", provider_name)
return media_api_title
-
+
provider_mappings = normalizer_data[provider_name]
-
+
# Create a reverse mapping (media_api_title -> provider_title)
reverse_mappings = {v: k for k, v in provider_mappings.items()}
-
+
# Return the mapped title if it exists, otherwise return the original
provider_title = reverse_mappings.get(media_api_title, media_api_title)
-
+
if provider_title != media_api_title:
logger.debug(
"Converted media API title to provider title: '%s' -> '%s' (provider: %s)",
- media_api_title, provider_title, provider_name
+ media_api_title,
+ provider_title,
+ provider_name,
)
-
+
return provider_title
-
+
except (FileNotFoundError, json.JSONDecodeError) as e:
logger.warning("Failed to load normalizer data: %s", e)
return media_api_title
def normalize_title(
- title: str,
- provider_name: str,
- use_provider_mapping: bool = False
+ title: str, provider_name: str, use_provider_mapping: bool = False
) -> str:
"""
Normalize a title for search operations.
-
+
This convenience function determines the appropriate normalization direction
based on the use_provider_mapping parameter.
-
+
Args:
title: The title to normalize
provider_name: The name of the provider
use_provider_mapping: If True, convert media API title to provider title.
If False, convert provider title to media API title.
-
+
Returns:
The normalized title
-
+
Example:
>>> normalize_title_for_search("one piece", "allanime", use_provider_mapping=True)
"1P"
@@ -229,10 +225,10 @@ def normalize_title(
def get_available_providers() -> list[str]:
"""
Get a list of all available providers in the normalizer data.
-
+
Returns:
List of provider names that have mappings defined
-
+
Example:
>>> get_available_providers()
['allanime', 'hianime', 'animepahe']
@@ -248,7 +244,7 @@ def get_available_providers() -> list[str]:
def clear_cache() -> None:
"""
Clear the internal cache for normalizer data.
-
+
This is useful for testing or when the normalizer.json file has been updated
and you want to reload the data.
"""
@@ -260,13 +256,13 @@ def clear_cache() -> None:
def get_provider_mappings(provider_name: str) -> Dict[str, str]:
"""
Get all title mappings for a specific provider.
-
+
Args:
provider_name: The name of the provider
-
+
Returns:
Dictionary mapping provider titles to media API titles
-
+
Example:
>>> mappings = get_provider_mappings("allanime")
>>> print(mappings["1P"])
@@ -283,16 +279,16 @@ def get_provider_mappings(provider_name: str) -> Dict[str, str]:
def has_mapping(title: str, provider_name: str, reverse: bool = False) -> bool:
"""
Check if a mapping exists for the given title and provider.
-
+
Args:
title: The title to check
provider_name: The name of the provider
reverse: If True, check for media API -> provider mapping.
If False, check for provider -> media API mapping.
-
+
Returns:
True if a mapping exists, False otherwise
-
+
Example:
>>> has_mapping("1P", "allanime", reverse=False)
True
@@ -303,44 +299,42 @@ def has_mapping(title: str, provider_name: str, reverse: bool = False) -> bool:
"""
try:
normalizer_data = _load_normalizer_data()
-
+
if provider_name not in normalizer_data:
return False
-
+
provider_mappings = normalizer_data[provider_name]
-
+
if reverse:
# Check if title exists as a value (media API title)
return title in provider_mappings.values()
else:
# Check if title exists as a key (provider title)
return title in provider_mappings
-
+
except (FileNotFoundError, json.JSONDecodeError) as e:
logger.warning("Failed to load normalizer data: %s", e)
return False
def add_runtime_mapping(
- provider_title: str,
- media_api_title: str,
- provider_name: str
+ provider_title: str, media_api_title: str, provider_name: str
) -> None:
"""
Add a new mapping at runtime (not persisted to file).
-
+
This is useful for adding mappings discovered during runtime that
are not present in the normalizer.json file.
-
+
Args:
provider_title: The provider-specific title
media_api_title: The media API title
provider_name: The name of the provider
-
+
Note:
This mapping is only stored in memory and will be lost when
the cache is cleared or the application restarts.
-
+
Example:
>>> add_runtime_mapping("Custom Title", "Normalized Title", "allanime")
>>> provider_title_to_media_api_title("Custom Title", "allanime")
@@ -348,18 +342,20 @@ def add_runtime_mapping(
"""
try:
normalizer_data = _load_normalizer_data()
-
+
# Initialize provider if it doesn't exist
if provider_name not in normalizer_data:
normalizer_data[provider_name] = {}
-
+
# Add the mapping
normalizer_data[provider_name][provider_title] = media_api_title
-
+
logger.info(
"Added runtime mapping: '%s' -> '%s' (provider: %s)",
- provider_title, media_api_title, provider_name
+ provider_title,
+ media_api_title,
+ provider_name,
)
-
+
except (FileNotFoundError, json.JSONDecodeError) as e:
- logger.warning("Failed to add runtime mapping: %s", e)
\ No newline at end of file
+ logger.warning("Failed to add runtime mapping: %s", e)
diff --git a/fastanime/libs/media_api/api.py b/fastanime/libs/media_api/api.py
index 43fadef..8b03249 100644
--- a/fastanime/libs/media_api/api.py
+++ b/fastanime/libs/media_api/api.py
@@ -17,7 +17,7 @@ logger = logging.getLogger(__name__)
# Map the client name to its import path AND the config section it needs.
API_CLIENTS = {
"anilist": ("fastanime.libs.media_api.anilist.api.AniListApi", "anilist"),
- "jikan": ("fastanime.libs.media_api.jikan.api.JikanApi", "jikan"), # For the future
+ "jikan": ("fastanime.libs.media_api.jikan.api.JikanApi", "jikan"), # For the future
}
diff --git a/fastanime/libs/media_api/jikan/api.py b/fastanime/libs/media_api/jikan/api.py
index 9458413..8401ea1 100644
--- a/fastanime/libs/media_api/jikan/api.py
+++ b/fastanime/libs/media_api/jikan/api.py
@@ -116,7 +116,7 @@ class JikanApi(BaseApiClient):
raw_data = self._execute_request(endpoint)
if not raw_data or "data" not in raw_data:
return None
-
+
recommendations = []
for item in raw_data["data"]:
# Jikan recommendation structure has an 'entry' field with anime data
@@ -124,7 +124,7 @@ class JikanApi(BaseApiClient):
if entry:
media_item = mapper._to_generic_media_item(entry)
recommendations.append(media_item)
-
+
return recommendations
except Exception as e:
logger.error(f"Failed to fetch recommendations for media {params.id}: {e}")
@@ -137,7 +137,7 @@ class JikanApi(BaseApiClient):
raw_data = self._execute_request(endpoint)
if not raw_data:
return None
-
+
# Return the raw character data as Jikan provides it
return raw_data
except Exception as e:
@@ -153,7 +153,7 @@ class JikanApi(BaseApiClient):
raw_data = self._execute_request(endpoint)
if not raw_data or "data" not in raw_data:
return None
-
+
related_anime = []
for relation in raw_data["data"]:
entries = relation.get("entry", [])
@@ -164,9 +164,7 @@ class JikanApi(BaseApiClient):
id=entry["mal_id"],
id_mal=entry["mal_id"],
title=MediaTitle(
- english=entry["name"],
- romaji=entry["name"],
- native=None
+ english=entry["name"], romaji=entry["name"], native=None
),
cover_image=MediaImage(large=""),
description=None,
@@ -176,7 +174,7 @@ class JikanApi(BaseApiClient):
user_status=None,
)
related_anime.append(media_item)
-
+
return related_anime
except Exception as e:
logger.error(f"Failed to fetch related anime for media {params.id}: {e}")
@@ -186,5 +184,7 @@ class JikanApi(BaseApiClient):
self, params: MediaAiringScheduleParams
) -> Optional[Dict]:
"""Jikan doesn't provide a direct airing schedule endpoint per anime."""
- logger.warning("Jikan API does not support fetching airing schedules for individual anime.")
+ logger.warning(
+ "Jikan API does not support fetching airing schedules for individual anime."
+ )
return None
diff --git a/fastanime/libs/provider/scraping/html_parser.py b/fastanime/libs/provider/scraping/html_parser.py
index ed66f2a..5970b21 100644
--- a/fastanime/libs/provider/scraping/html_parser.py
+++ b/fastanime/libs/provider/scraping/html_parser.py
@@ -4,6 +4,7 @@ HTML parsing utilities with optional lxml support.
This module provides comprehensive HTML parsing capabilities using either
Python's built-in html.parser or lxml for better performance when available.
"""
+
# TODO: Review and optimize the HTML parsing logic for better performance and flexibility.
# Consider adding more utility functions for common HTML manipulation tasks.
import logging
@@ -20,6 +21,7 @@ logger = logging.getLogger(__name__)
HAS_LXML = False
try:
from lxml import etree, html as lxml_html
+
HAS_LXML = True
logger.debug("lxml is available and will be used for HTML parsing")
except ImportError:
@@ -28,11 +30,11 @@ except ImportError:
class HTMLParserConfig:
"""Configuration for HTML parser selection."""
-
+
def __init__(self, use_lxml: Optional[bool] = None):
"""
Initialize parser configuration.
-
+
Args:
use_lxml: Force use of lxml (True), html.parser (False), or auto-detect (None)
"""
@@ -40,30 +42,32 @@ class HTMLParserConfig:
self.use_lxml = HAS_LXML
else:
self.use_lxml = use_lxml and HAS_LXML
-
+
if use_lxml and not HAS_LXML:
- logger.warning("lxml requested but not available, falling back to html.parser")
+ logger.warning(
+ "lxml requested but not available, falling back to html.parser"
+ )
class HTMLParser:
"""
Comprehensive HTML parser with optional lxml support.
-
+
Provides a unified interface for HTML parsing operations regardless
of the underlying parser implementation.
"""
-
+
def __init__(self, config: Optional[HTMLParserConfig] = None):
"""Initialize the HTML parser with configuration."""
self.config = config or HTMLParserConfig()
-
- def parse(self, html_content: str) -> Union[Any, 'ParsedHTML']:
+
+ def parse(self, html_content: str) -> Union[Any, "ParsedHTML"]:
"""
Parse HTML content and return a parsed tree.
-
+
Args:
html_content: Raw HTML string to parse
-
+
Returns:
Parsed HTML tree (lxml Element or custom ParsedHTML object)
"""
@@ -71,7 +75,7 @@ class HTMLParser:
return self._parse_with_lxml(html_content)
else:
return self._parse_with_builtin(html_content)
-
+
def _parse_with_lxml(self, html_content: str) -> Any:
"""Parse HTML using lxml."""
try:
@@ -80,8 +84,8 @@ class HTMLParser:
except Exception as e:
logger.warning(f"lxml parsing failed: {e}, falling back to html.parser")
return self._parse_with_builtin(html_content)
-
- def _parse_with_builtin(self, html_content: str) -> 'ParsedHTML':
+
+ def _parse_with_builtin(self, html_content: str) -> "ParsedHTML":
"""Parse HTML using Python's built-in parser."""
parser = BuiltinHTMLParser()
parser.feed(html_content)
@@ -90,89 +94,89 @@ class HTMLParser:
class BuiltinHTMLParser(BaseHTMLParser):
"""Enhanced HTML parser using Python's built-in capabilities."""
-
+
def __init__(self):
super().__init__()
self.elements = []
self.current_element = None
self.element_stack = []
-
+
def handle_starttag(self, tag: str, attrs: List[Tuple[str, Optional[str]]]):
"""Handle opening tags."""
element = {
- 'tag': tag,
- 'attrs': dict(attrs),
- 'text': '',
- 'children': [],
- 'start_pos': self.getpos(),
+ "tag": tag,
+ "attrs": dict(attrs),
+ "text": "",
+ "children": [],
+ "start_pos": self.getpos(),
}
-
+
if self.element_stack:
- self.element_stack[-1]['children'].append(element)
+ self.element_stack[-1]["children"].append(element)
else:
self.elements.append(element)
-
+
self.element_stack.append(element)
-
+
def handle_endtag(self, tag: str):
"""Handle closing tags."""
- if self.element_stack and self.element_stack[-1]['tag'] == tag:
+ if self.element_stack and self.element_stack[-1]["tag"] == tag:
element = self.element_stack.pop()
- element['end_pos'] = self.getpos()
-
+ element["end_pos"] = self.getpos()
+
def handle_data(self, data: str):
"""Handle text content."""
if self.element_stack:
- self.element_stack[-1]['text'] += data
+ self.element_stack[-1]["text"] += data
class ParsedHTML:
"""Wrapper for parsed HTML using built-in parser."""
-
+
def __init__(self, elements: List[Dict], raw_html: str):
self.elements = elements
self.raw_html = raw_html
-
+
def find_by_id(self, element_id: str) -> Optional[Dict]:
"""Find element by ID."""
- return self._find_recursive(self.elements, lambda el: el['attrs'].get('id') == element_id)
-
+ return self._find_recursive(
+ self.elements, lambda el: el["attrs"].get("id") == element_id
+ )
+
def find_by_class(self, class_name: str) -> List[Dict]:
"""Find elements by class name."""
results = []
self._find_all_recursive(
- self.elements,
- lambda el: class_name in el['attrs'].get('class', '').split(),
- results
+ self.elements,
+ lambda el: class_name in el["attrs"].get("class", "").split(),
+ results,
)
return results
-
+
def find_by_tag(self, tag_name: str) -> List[Dict]:
"""Find elements by tag name."""
results = []
self._find_all_recursive(
- self.elements,
- lambda el: el['tag'].lower() == tag_name.lower(),
- results
+ self.elements, lambda el: el["tag"].lower() == tag_name.lower(), results
)
return results
-
+
def _find_recursive(self, elements: List[Dict], condition) -> Optional[Dict]:
"""Recursively find first element matching condition."""
for element in elements:
if condition(element):
return element
- result = self._find_recursive(element['children'], condition)
+ result = self._find_recursive(element["children"], condition)
if result:
return result
return None
-
+
def _find_all_recursive(self, elements: List[Dict], condition, results: List[Dict]):
"""Recursively find all elements matching condition."""
for element in elements:
if condition(element):
results.append(element)
- self._find_all_recursive(element['children'], condition, results)
+ self._find_all_recursive(element["children"], condition, results)
# Global parser instance
@@ -182,62 +186,62 @@ _default_parser = HTMLParser()
def extract_attributes(html_element: str) -> Dict[str, str]:
"""
Extract attributes from an HTML element string.
-
+
Args:
html_element: HTML element as string (e.g., '
')
-
+
Returns:
Dictionary of attribute name-value pairs
-
+
Examples:
>>> extract_attributes('
')
{'class': 'test', 'id': 'main'}
"""
if not html_element:
return {}
-
+
# Use regex to extract attributes from HTML string
attr_pattern = r'(\w+)=(["\'])([^"\']*?)\2'
matches = re.findall(attr_pattern, html_element)
-
+
attributes = {}
for match in matches:
attr_name, _, attr_value = match
attributes[attr_name] = attr_value
-
+
# Handle attributes without quotes
- unquoted_pattern = r'(\w+)=([^\s>]+)'
+ unquoted_pattern = r"(\w+)=([^\s>]+)"
unquoted_matches = re.findall(unquoted_pattern, html_element)
for attr_name, attr_value in unquoted_matches:
if attr_name not in attributes:
attributes[attr_name] = attr_value
-
+
return attributes
def get_element_by_id(element_id: str, html_content: str) -> Optional[str]:
"""
Get HTML element by ID.
-
+
Args:
element_id: The ID attribute value to search for
html_content: HTML content to search in
-
+
Returns:
HTML string of the element or None if not found
-
+
Examples:
>>> html = '
Content
'
>>> get_element_by_id("test", html)
'
Content
'
"""
parsed = _default_parser.parse(html_content)
-
+
if _default_parser.config.use_lxml and HAS_LXML:
try:
element = parsed.xpath(f'//*[@id="{element_id}"]')
if element:
- return etree.tostring(element[0], encoding='unicode', method='html')
+ return etree.tostring(element[0], encoding="unicode", method="html")
except Exception as e:
logger.warning(f"lxml XPath search failed: {e}")
return None
@@ -245,28 +249,28 @@ def get_element_by_id(element_id: str, html_content: str) -> Optional[str]:
element = parsed.find_by_id(element_id)
if element:
return _element_to_html(element, html_content)
-
+
return None
def get_element_by_tag(tag_name: str, html_content: str) -> Optional[str]:
"""
Get first HTML element by tag name.
-
+
Args:
tag_name: The tag name to search for
html_content: HTML content to search in
-
+
Returns:
HTML string of the element or None if not found
"""
parsed = _default_parser.parse(html_content)
-
+
if _default_parser.config.use_lxml and HAS_LXML:
try:
- elements = parsed.xpath(f'//{tag_name}')
+ elements = parsed.xpath(f"//{tag_name}")
if elements:
- return etree.tostring(elements[0], encoding='unicode', method='html')
+ return etree.tostring(elements[0], encoding="unicode", method="html")
except Exception as e:
logger.warning(f"lxml XPath search failed: {e}")
return None
@@ -274,28 +278,28 @@ def get_element_by_tag(tag_name: str, html_content: str) -> Optional[str]:
elements = parsed.find_by_tag(tag_name)
if elements:
return _element_to_html(elements[0], html_content)
-
+
return None
def get_element_by_class(class_name: str, html_content: str) -> Optional[str]:
"""
Get first HTML element by class name.
-
+
Args:
class_name: The class name to search for
html_content: HTML content to search in
-
+
Returns:
HTML string of the element or None if not found
"""
parsed = _default_parser.parse(html_content)
-
+
if _default_parser.config.use_lxml and HAS_LXML:
try:
elements = parsed.xpath(f'//*[contains(@class, "{class_name}")]')
if elements:
- return etree.tostring(elements[0], encoding='unicode', method='html')
+ return etree.tostring(elements[0], encoding="unicode", method="html")
except Exception as e:
logger.warning(f"lxml XPath search failed: {e}")
return None
@@ -303,109 +307,119 @@ def get_element_by_class(class_name: str, html_content: str) -> Optional[str]:
elements = parsed.find_by_class(class_name)
if elements:
return _element_to_html(elements[0], html_content)
-
+
return None
def get_elements_by_tag(tag_name: str, html_content: str) -> List[str]:
"""
Get all HTML elements by tag name.
-
+
Args:
tag_name: The tag name to search for
html_content: HTML content to search in
-
+
Returns:
List of HTML strings for matching elements
"""
parsed = _default_parser.parse(html_content)
results = []
-
+
if _default_parser.config.use_lxml and HAS_LXML:
try:
- elements = parsed.xpath(f'//{tag_name}')
+ elements = parsed.xpath(f"//{tag_name}")
for element in elements:
- results.append(etree.tostring(element, encoding='unicode', method='html'))
+ results.append(
+ etree.tostring(element, encoding="unicode", method="html")
+ )
except Exception as e:
logger.warning(f"lxml XPath search failed: {e}")
else:
elements = parsed.find_by_tag(tag_name)
for element in elements:
results.append(_element_to_html(element, html_content))
-
+
return results
def get_elements_by_class(class_name: str, html_content: str) -> List[str]:
"""
Get all HTML elements by class name.
-
+
Args:
class_name: The class name to search for
html_content: HTML content to search in
-
+
Returns:
List of HTML strings for matching elements
"""
parsed = _default_parser.parse(html_content)
results = []
-
+
if _default_parser.config.use_lxml and HAS_LXML:
try:
elements = parsed.xpath(f'//*[contains(@class, "{class_name}")]')
for element in elements:
- results.append(etree.tostring(element, encoding='unicode', method='html'))
+ results.append(
+ etree.tostring(element, encoding="unicode", method="html")
+ )
except Exception as e:
logger.warning(f"lxml XPath search failed: {e}")
else:
elements = parsed.find_by_class(class_name)
for element in elements:
results.append(_element_to_html(element, html_content))
-
+
return results
def get_elements_html_by_class(class_name: str, html_content: str) -> List[str]:
"""
Get HTML strings of elements by class name.
-
+
This is an alias for get_elements_by_class for yt-dlp compatibility.
-
+
Args:
class_name: The class name to search for
html_content: HTML content to search in
-
+
Returns:
List of HTML strings for matching elements
"""
return get_elements_by_class(class_name, html_content)
-def get_element_text_and_html_by_tag(tag_name: str, html_content: str) -> Tuple[Optional[str], Optional[str]]:
+def get_element_text_and_html_by_tag(
+ tag_name: str, html_content: str
+) -> Tuple[Optional[str], Optional[str]]:
"""
Get both text content and HTML of first element by tag name.
-
+
Args:
tag_name: The tag name to search for
html_content: HTML content to search in
-
+
Returns:
Tuple of (text_content, html_string) or (None, None) if not found
-
+
Examples:
>>> html = ''
>>> get_element_text_and_html_by_tag("script", html)
('alert("test");', '')
"""
parsed = _default_parser.parse(html_content)
-
+
if _default_parser.config.use_lxml and HAS_LXML:
try:
- elements = parsed.xpath(f'//{tag_name}')
+ elements = parsed.xpath(f"//{tag_name}")
if elements:
element = elements[0]
- text = element.text_content() if hasattr(element, 'text_content') else (element.text or '')
- html_str = etree.tostring(element, encoding='unicode', method='html')
+ text = (
+ element.text_content()
+ if hasattr(element, "text_content")
+ else (element.text or "")
+ )
+ html_str = etree.tostring(element, encoding="unicode", method="html")
return text, html_str
except Exception as e:
logger.warning(f"lxml XPath search failed: {e}")
@@ -417,61 +431,63 @@ def get_element_text_and_html_by_tag(tag_name: str, html_content: str) -> Tuple[
text = _extract_text_content(element)
html_str = _element_to_html(element, html_content)
return text, html_str
-
+
return None, None
def _element_to_html(element: Dict, original_html: str) -> str:
"""
Convert parsed element back to HTML string.
-
+
This is a simplified implementation that reconstructs HTML from parsed data.
For production use, consider using lxml for better accuracy.
"""
if not element:
return ""
-
+
# Build opening tag
- tag = element['tag']
- attrs = element.get('attrs', {})
- attr_str = ' '.join(f'{k}="{v}"' for k, v in attrs.items() if v is not None)
-
+ tag = element["tag"]
+ attrs = element.get("attrs", {})
+ attr_str = " ".join(f'{k}="{v}"' for k, v in attrs.items() if v is not None)
+
if attr_str:
opening_tag = f"<{tag} {attr_str}>"
else:
opening_tag = f"<{tag}>"
-
+
# Add text content
- text = element.get('text', '')
-
+ text = element.get("text", "")
+
# Add children
children_html = ""
- for child in element.get('children', []):
+ for child in element.get("children", []):
children_html += _element_to_html(child, original_html)
-
+
# Build closing tag
closing_tag = f"{tag}>"
-
+
return f"{opening_tag}{text}{children_html}{closing_tag}"
def _extract_text_content(element: Dict) -> str:
"""Extract all text content from element and its children."""
- text = element.get('text', '')
-
- for child in element.get('children', []):
+ text = element.get("text", "")
+
+ for child in element.get("children", []):
text += _extract_text_content(child)
-
+
return text
def configure_parser(use_lxml: Optional[bool] = None) -> None:
"""
Configure the global HTML parser.
-
+
Args:
use_lxml: Force use of lxml (True), html.parser (False), or auto-detect (None)
"""
global _default_parser
_default_parser = HTMLParser(HTMLParserConfig(use_lxml))
- logger.info(f"HTML parser configured: {'lxml' if _default_parser.config.use_lxml else 'html.parser'}")
+ logger.info(
+ f"HTML parser configured: {'lxml' if _default_parser.config.use_lxml else 'html.parser'}"
+ )
diff --git a/fastanime/libs/provider/scraping/user_agents.py b/fastanime/libs/provider/scraping/user_agents.py
index c02592c..4637aae 100644
--- a/fastanime/libs/provider/scraping/user_agents.py
+++ b/fastanime/libs/provider/scraping/user_agents.py
@@ -12,150 +12,147 @@ from typing import List, Optional
class UserAgentGenerator:
"""
Generator for realistic user agent strings.
-
+
Provides a variety of common user agents from different browsers
and operating systems to help avoid detection.
"""
-
+
# Common user agents for different browsers and OS combinations
USER_AGENTS = [
# Chrome on Windows
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
-
# Chrome on macOS
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36",
-
# Chrome on Linux
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36",
-
# Firefox on Windows
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:123.0) Gecko/20100101 Firefox/123.0",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:122.0) Gecko/20100101 Firefox/122.0",
-
# Firefox on macOS
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:123.0) Gecko/20100101 Firefox/123.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:122.0) Gecko/20100101 Firefox/122.0",
-
# Firefox on Linux
"Mozilla/5.0 (X11; Linux x86_64; rv:123.0) Gecko/20100101 Firefox/123.0",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:123.0) Gecko/20100101 Firefox/123.0",
-
# Safari on macOS
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.3 Safari/605.1.15",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.2 Safari/605.1.15",
-
# Edge on Windows
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36 Edg/122.0.0.0",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36 Edg/121.0.0.0",
-
# Mobile Chrome (Android)
"Mozilla/5.0 (Linux; Android 14; SM-G998B) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; Android 13; Pixel 7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Mobile Safari/537.36",
-
# Mobile Safari (iOS)
"Mozilla/5.0 (iPhone; CPU iPhone OS 17_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.3 Mobile/15E148 Safari/604.1",
"Mozilla/5.0 (iPad; CPU OS 17_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.3 Mobile/15E148 Safari/604.1",
]
-
+
# Browser-specific user agents for when you need a specific browser
- CHROME_USER_AGENTS = [ua for ua in USER_AGENTS if "Chrome" in ua and "Edg" not in ua]
+ CHROME_USER_AGENTS = [
+ ua for ua in USER_AGENTS if "Chrome" in ua and "Edg" not in ua
+ ]
FIREFOX_USER_AGENTS = [ua for ua in USER_AGENTS if "Firefox" in ua]
- SAFARI_USER_AGENTS = [ua for ua in USER_AGENTS if "Safari" in ua and "Chrome" not in ua]
+ SAFARI_USER_AGENTS = [
+ ua for ua in USER_AGENTS if "Safari" in ua and "Chrome" not in ua
+ ]
EDGE_USER_AGENTS = [ua for ua in USER_AGENTS if "Edg" in ua]
-
+
# Platform-specific user agents
WINDOWS_USER_AGENTS = [ua for ua in USER_AGENTS if "Windows NT" in ua]
MACOS_USER_AGENTS = [ua for ua in USER_AGENTS if "Macintosh" in ua]
- LINUX_USER_AGENTS = [ua for ua in USER_AGENTS if "Linux" in ua and "Android" not in ua]
+ LINUX_USER_AGENTS = [
+ ua for ua in USER_AGENTS if "Linux" in ua and "Android" not in ua
+ ]
MOBILE_USER_AGENTS = [ua for ua in USER_AGENTS if "Mobile" in ua or "Android" in ua]
-
+
def __init__(self, seed: Optional[int] = None):
"""
Initialize the user agent generator.
-
+
Args:
seed: Random seed for reproducible results (optional)
"""
if seed is not None:
random.seed(seed)
-
+
def random(self) -> str:
"""
Get a random user agent string.
-
+
Returns:
Random user agent string
"""
return random.choice(self.USER_AGENTS)
-
+
def random_browser(self, browser: str) -> str:
"""
Get a random user agent for a specific browser.
-
+
Args:
browser: Browser name ('chrome', 'firefox', 'safari', 'edge')
-
+
Returns:
Random user agent string for the specified browser
-
+
Raises:
ValueError: If browser is not supported
"""
browser = browser.lower()
- if browser == 'chrome':
+ if browser == "chrome":
return random.choice(self.CHROME_USER_AGENTS)
- elif browser == 'firefox':
+ elif browser == "firefox":
return random.choice(self.FIREFOX_USER_AGENTS)
- elif browser == 'safari':
+ elif browser == "safari":
return random.choice(self.SAFARI_USER_AGENTS)
- elif browser == 'edge':
+ elif browser == "edge":
return random.choice(self.EDGE_USER_AGENTS)
else:
raise ValueError(f"Unsupported browser: {browser}")
-
+
def random_platform(self, platform: str) -> str:
"""
Get a random user agent for a specific platform.
-
+
Args:
platform: Platform name ('windows', 'macos', 'linux', 'mobile')
-
+
Returns:
Random user agent string for the specified platform
-
+
Raises:
ValueError: If platform is not supported
"""
platform = platform.lower()
- if platform == 'windows':
+ if platform == "windows":
return random.choice(self.WINDOWS_USER_AGENTS)
- elif platform in ('macos', 'mac'):
+ elif platform in ("macos", "mac"):
return random.choice(self.MACOS_USER_AGENTS)
- elif platform == 'linux':
+ elif platform == "linux":
return random.choice(self.LINUX_USER_AGENTS)
- elif platform == 'mobile':
+ elif platform == "mobile":
return random.choice(self.MOBILE_USER_AGENTS)
else:
raise ValueError(f"Unsupported platform: {platform}")
-
+
def add_user_agent(self, user_agent: str) -> None:
"""
Add a custom user agent to the list.
-
+
Args:
user_agent: Custom user agent string to add
"""
if user_agent not in self.USER_AGENTS:
self.USER_AGENTS.append(user_agent)
-
+
def get_all(self) -> List[str]:
"""
Get all available user agent strings.
-
+
Returns:
List of all user agent strings
"""
@@ -169,10 +166,10 @@ _default_generator = UserAgentGenerator()
def random_user_agent() -> str:
"""
Get a random user agent string using the default generator.
-
+
Returns:
Random user agent string
-
+
Examples:
>>> ua = random_user_agent()
>>> "Mozilla" in ua
@@ -184,10 +181,10 @@ def random_user_agent() -> str:
def random_user_agent_browser(browser: str) -> str:
"""
Get a random user agent for a specific browser.
-
+
Args:
browser: Browser name ('chrome', 'firefox', 'safari', 'edge')
-
+
Returns:
Random user agent string for the specified browser
"""
@@ -197,10 +194,10 @@ def random_user_agent_browser(browser: str) -> str:
def random_user_agent_platform(platform: str) -> str:
"""
Get a random user agent for a specific platform.
-
+
Args:
platform: Platform name ('windows', 'macos', 'linux', 'mobile')
-
+
Returns:
Random user agent string for the specified platform
"""
@@ -210,7 +207,7 @@ def random_user_agent_platform(platform: str) -> str:
def set_user_agent_seed(seed: int) -> None:
"""
Set the random seed for user agent generation.
-
+
Args:
seed: Random seed value
"""
@@ -221,7 +218,7 @@ def set_user_agent_seed(seed: int) -> None:
def add_custom_user_agent(user_agent: str) -> None:
"""
Add a custom user agent to the default generator.
-
+
Args:
user_agent: Custom user agent string to add
"""
@@ -231,7 +228,7 @@ def add_custom_user_agent(user_agent: str) -> None:
def get_all_user_agents() -> List[str]:
"""
Get all available user agent strings from the default generator.
-
+
Returns:
List of all user agent strings
"""
diff --git a/fastanime/libs/provider/scraping/utils.py b/fastanime/libs/provider/scraping/utils.py
index 4976e9a..9b3cce9 100644
--- a/fastanime/libs/provider/scraping/utils.py
+++ b/fastanime/libs/provider/scraping/utils.py
@@ -6,21 +6,21 @@ that was previously sourced from yt-dlp.
"""
import string
-from typing import Union,Optional
+from typing import Union, Optional
def encode_base_n(num: int, n: int, table: Optional[str] = None) -> str:
"""
Encode a number in base-n representation.
-
+
Args:
num: The number to encode
n: The base to use for encoding
table: Custom character table (optional)
-
+
Returns:
String representation of the number in base-n
-
+
Examples:
>>> encode_base_n(255, 16)
'ff'
@@ -30,39 +30,39 @@ def encode_base_n(num: int, n: int, table: Optional[str] = None) -> str:
if table is None:
# Default table: 0-9, a-z
table = string.digits + string.ascii_lowercase
-
+
if not 2 <= n <= len(table):
raise ValueError(f"Base must be between 2 and {len(table)}")
-
+
if num == 0:
return table[0]
-
+
result = []
is_negative = num < 0
num = abs(num)
-
+
while num > 0:
result.append(table[num % n])
num //= n
-
+
if is_negative:
- result.append('-')
-
- return ''.join(reversed(result))
+ result.append("-")
+
+ return "".join(reversed(result))
def decode_base_n(encoded: str, n: int, table: Optional[str] = None) -> int:
"""
Decode a base-n encoded string back to an integer.
-
+
Args:
encoded: The base-n encoded string
n: The base used for encoding
table: Custom character table (optional)
-
+
Returns:
The decoded integer
-
+
Examples:
>>> decode_base_n('ff', 16)
255
@@ -71,129 +71,135 @@ def decode_base_n(encoded: str, n: int, table: Optional[str] = None) -> int:
"""
if table is None:
table = string.digits + string.ascii_lowercase
-
+
if not 2 <= n <= len(table):
raise ValueError(f"Base must be between 2 and {len(table)}")
-
+
if not encoded:
return 0
-
- is_negative = encoded.startswith('-')
+
+ is_negative = encoded.startswith("-")
if is_negative:
encoded = encoded[1:]
-
+
result = 0
for i, char in enumerate(reversed(encoded.lower())):
if char not in table:
raise ValueError(f"Invalid character '{char}' for base {n}")
-
+
digit_value = table.index(char)
if digit_value >= n:
raise ValueError(f"Invalid digit '{char}' for base {n}")
-
- result += digit_value * (n ** i)
-
+
+ result += digit_value * (n**i)
+
return -result if is_negative else result
-def url_encode(text: str, safe: str = '') -> str:
+def url_encode(text: str, safe: str = "") -> str:
"""
URL encode a string.
-
+
Args:
text: Text to encode
safe: Characters that should not be encoded
-
+
Returns:
URL encoded string
"""
import urllib.parse
+
return urllib.parse.quote(text, safe=safe)
def url_decode(text: str) -> str:
"""
URL decode a string.
-
+
Args:
text: URL encoded text to decode
-
+
Returns:
Decoded string
"""
import urllib.parse
+
return urllib.parse.unquote(text)
def html_unescape(text: str) -> str:
"""
Unescape HTML entities in text.
-
+
Args:
text: Text containing HTML entities
-
+
Returns:
Text with HTML entities unescaped
-
+
Examples:
>>> html_unescape('"Hello" & <World>')
'"Hello" &
'
"""
import html
+
return html.unescape(text)
def strip_tags(html_content: str) -> str:
"""
Remove all HTML tags from content, leaving only text.
-
+
Args:
html_content: HTML content with tags
-
+
Returns:
Plain text with tags removed
-
+
Examples:
>>> strip_tags('Hello world!
')
'Hello world!'
"""
import re
- return re.sub(r'<[^>]+>', '', html_content)
+
+ return re.sub(r"<[^>]+>", "", html_content)
def normalize_whitespace(text: str) -> str:
"""
Normalize whitespace in text by collapsing multiple spaces and removing leading/trailing whitespace.
-
+
Args:
text: Text to normalize
-
+
Returns:
Text with normalized whitespace
-
+
Examples:
>>> normalize_whitespace(' Hello world \\n\\t ')
'Hello world'
"""
import re
- return re.sub(r'\s+', ' ', text.strip())
+
+ return re.sub(r"\s+", " ", text.strip())
def extract_domain(url: str) -> str:
"""
Extract domain from a URL.
-
+
Args:
url: Full URL
-
+
Returns:
Domain portion of the URL
-
+
Examples:
>>> extract_domain('https://example.com/path?query=1')
'example.com'
"""
import urllib.parse
+
parsed = urllib.parse.urlparse(url)
return parsed.netloc
@@ -201,38 +207,40 @@ def extract_domain(url: str) -> str:
def join_url(base: str, path: str) -> str:
"""
Join a base URL with a path.
-
+
Args:
base: Base URL
path: Path to join
-
+
Returns:
Combined URL
-
+
Examples:
>>> join_url('https://example.com', '/api/data')
'https://example.com/api/data'
"""
import urllib.parse
+
return urllib.parse.urljoin(base, path)
def parse_query_string(query: str) -> dict:
"""
Parse a query string into a dictionary.
-
+
Args:
query: Query string (with or without leading '?')
-
+
Returns:
Dictionary of query parameters
-
+
Examples:
>>> parse_query_string('?name=John&age=30')
{'name': ['John'], 'age': ['30']}
"""
import urllib.parse
- if query.startswith('?'):
+
+ if query.startswith("?"):
query = query[1:]
return urllib.parse.parse_qs(query)
@@ -240,19 +248,19 @@ def parse_query_string(query: str) -> dict:
def build_query_string(params: dict) -> str:
"""
Build a query string from a dictionary of parameters.
-
+
Args:
params: Dictionary of parameters
-
+
Returns:
URL-encoded query string
-
+
Examples:
>>> build_query_string({'name': 'John', 'age': 30})
'name=John&age=30'
"""
import urllib.parse
-
+
# Handle both single values and lists
normalized_params = {}
for key, value in params.items():
@@ -260,5 +268,5 @@ def build_query_string(params: dict) -> str:
normalized_params[key] = value
else:
normalized_params[key] = [str(value)]
-
+
return urllib.parse.urlencode(normalized_params, doseq=True)
diff --git a/fastanime/libs/selectors/base.py b/fastanime/libs/selectors/base.py
index bc4b099..f0ea128 100644
--- a/fastanime/libs/selectors/base.py
+++ b/fastanime/libs/selectors/base.py
@@ -55,7 +55,7 @@ class BaseSelector(ABC):
# Default implementation: single selection in a loop
selected = []
remaining_choices = choices.copy()
-
+
while remaining_choices:
choice = self.choose(
f"{prompt} (Select multiple, empty to finish)",
@@ -63,16 +63,18 @@ class BaseSelector(ABC):
preview=preview,
header=header,
)
-
+
if not choice or choice == "[DONE] Finish selection":
break
-
+
selected.append(choice)
remaining_choices.remove(choice)
-
- if not self.confirm(f"Selected: {', '.join(selected)}. Continue selecting?", default=True):
+
+ if not self.confirm(
+ f"Selected: {', '.join(selected)}. Continue selecting?", default=True
+ ):
break
-
+
return selected
@abstractmethod
diff --git a/fastanime/libs/selectors/fzf/selector.py b/fastanime/libs/selectors/fzf/selector.py
index 35555ff..576696f 100644
--- a/fastanime/libs/selectors/fzf/selector.py
+++ b/fastanime/libs/selectors/fzf/selector.py
@@ -77,9 +77,11 @@ class FzfSelector(BaseSelector):
)
if result.returncode != 0:
return []
-
+
# Split the output by newlines and filter out empty strings
- selections = [line.strip() for line in result.stdout.strip().split('\n') if line.strip()]
+ selections = [
+ line.strip() for line in result.stdout.strip().split("\n") if line.strip()
+ ]
return selections
def confirm(self, prompt, *, default=False):
diff --git a/tests/test_parser.py b/tests/test_parser.py
index 348236f..714c0fc 100644
--- a/tests/test_parser.py
+++ b/tests/test_parser.py
@@ -7,109 +7,109 @@ from fastanime.cli.utils.parser import parse_episode_range
class TestParseEpisodeRange:
"""Test cases for the parse_episode_range function."""
-
+
@pytest.fixture
def episodes(self):
"""Sample episode list for testing."""
return ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
-
+
def test_no_range_returns_all_episodes(self, episodes):
"""Test that None or empty range returns all episodes."""
result = list(parse_episode_range(None, episodes))
assert result == episodes
-
+
def test_colon_only_returns_all_episodes(self, episodes):
"""Test that ':' returns all episodes."""
result = list(parse_episode_range(":", episodes))
assert result == episodes
-
+
def test_start_end_range(self, episodes):
"""Test start:end range format."""
result = list(parse_episode_range("2:5", episodes))
assert result == ["3", "4", "5"]
-
+
def test_start_only_range(self, episodes):
"""Test start: range format."""
result = list(parse_episode_range("5:", episodes))
assert result == ["6", "7", "8", "9", "10"]
-
+
def test_end_only_range(self, episodes):
"""Test :end range format."""
result = list(parse_episode_range(":3", episodes))
assert result == ["1", "2", "3"]
-
+
def test_start_end_step_range(self, episodes):
"""Test start:end:step range format."""
result = list(parse_episode_range("2:8:2", episodes))
assert result == ["3", "5", "7"]
-
+
def test_single_number_range(self, episodes):
"""Test single number format (start from index)."""
result = list(parse_episode_range("5", episodes))
assert result == ["6", "7", "8", "9", "10"]
-
+
def test_empty_start_end_in_three_part_range_raises_error(self, episodes):
"""Test that empty parts in start:end:step format raise error."""
with pytest.raises(ValueError, match="When using 3 parts"):
list(parse_episode_range(":5:2", episodes))
-
+
with pytest.raises(ValueError, match="When using 3 parts"):
list(parse_episode_range("2::2", episodes))
-
+
with pytest.raises(ValueError, match="When using 3 parts"):
list(parse_episode_range("2:5:", episodes))
-
+
def test_invalid_integer_raises_error(self, episodes):
"""Test that invalid integers raise ValueError."""
with pytest.raises(ValueError, match="Must be a valid integer"):
list(parse_episode_range("abc", episodes))
-
+
with pytest.raises(ValueError, match="Start and end must be valid integers"):
list(parse_episode_range("2:abc", episodes))
-
+
with pytest.raises(ValueError, match="All parts must be valid integers"):
list(parse_episode_range("2:5:abc", episodes))
-
+
def test_zero_step_raises_error(self, episodes):
"""Test that zero step raises ValueError."""
with pytest.raises(ValueError, match="Step value must be positive"):
list(parse_episode_range("2:5:0", episodes))
-
+
def test_negative_step_raises_error(self, episodes):
"""Test that negative step raises ValueError."""
with pytest.raises(ValueError, match="Step value must be positive"):
list(parse_episode_range("2:5:-1", episodes))
-
+
def test_too_many_colons_raises_error(self, episodes):
"""Test that too many colons raise ValueError."""
with pytest.raises(ValueError, match="Too many colon separators"):
list(parse_episode_range("2:5:7:9", episodes))
-
+
def test_edge_case_empty_list(self):
"""Test behavior with empty episode list."""
result = list(parse_episode_range(":", []))
assert result == []
-
+
def test_edge_case_single_episode(self):
"""Test behavior with single episode."""
episodes = ["1"]
result = list(parse_episode_range(":", episodes))
assert result == ["1"]
-
+
result = list(parse_episode_range("0:1", episodes))
assert result == ["1"]
-
+
def test_numerical_sorting(self):
"""Test that episodes are sorted numerically, not lexicographically."""
episodes = ["10", "2", "1", "11", "3"]
result = list(parse_episode_range(":", episodes))
assert result == ["1", "2", "3", "10", "11"]
-
+
def test_index_out_of_bounds_behavior(self, episodes):
"""Test behavior when indices exceed available episodes."""
# Python slicing handles out-of-bounds gracefully
result = list(parse_episode_range("15:", episodes))
assert result == [] # No episodes beyond index 15
-
+
result = list(parse_episode_range(":20", episodes))
assert result == episodes # All episodes (slice stops at end)
diff --git a/tests/test_torrent_downloader.py b/tests/test_torrent_downloader.py
index 9da8fac..0f0d8de 100644
--- a/tests/test_torrent_downloader.py
+++ b/tests/test_torrent_downloader.py
@@ -12,7 +12,7 @@ import pytest
from fastanime.core.downloader.torrents import (
TorrentDownloader,
TorrentDownloadError,
- LIBTORRENT_AVAILABLE
+ LIBTORRENT_AVAILABLE,
)
from fastanime.core.exceptions import DependencyNotFoundError
@@ -27,12 +27,13 @@ class TestTorrentDownloader(unittest.TestCase):
download_path=self.temp_dir,
max_upload_rate=100,
max_download_rate=200,
- max_connections=50
+ max_connections=50,
)
def tearDown(self):
"""Clean up test fixtures."""
import shutil
+
shutil.rmtree(self.temp_dir, ignore_errors=True)
def test_init(self):
@@ -47,48 +48,50 @@ class TestTorrentDownloader(unittest.TestCase):
"""Test that download directory is created if it doesn't exist."""
non_existent_dir = self.temp_dir / "new_dir"
self.assertFalse(non_existent_dir.exists())
-
+
downloader = TorrentDownloader(download_path=non_existent_dir)
self.assertTrue(non_existent_dir.exists())
- @patch('fastanime.core.downloader.torrents.shutil.which')
+ @patch("fastanime.core.downloader.torrents.shutil.which")
def test_download_with_webtorrent_cli_not_available(self, mock_which):
"""Test webtorrent CLI fallback when not available."""
mock_which.return_value = None
-
+
with self.assertRaises(DependencyNotFoundError) as context:
self.downloader.download_with_webtorrent_cli("magnet:test")
-
+
self.assertIn("webtorrent CLI is not available", str(context.exception))
- @patch('fastanime.core.downloader.torrents.subprocess.run')
- @patch('fastanime.core.downloader.torrents.shutil.which')
+ @patch("fastanime.core.downloader.torrents.subprocess.run")
+ @patch("fastanime.core.downloader.torrents.shutil.which")
def test_download_with_webtorrent_cli_success(self, mock_which, mock_run):
"""Test successful webtorrent CLI download."""
mock_which.return_value = "/usr/bin/webtorrent"
mock_result = Mock()
mock_result.stdout = f"Downloaded test-file to {self.temp_dir}/test-file"
mock_run.return_value = mock_result
-
+
# Create a dummy file to simulate download
test_file = self.temp_dir / "test-file"
test_file.touch()
-
+
result = self.downloader.download_with_webtorrent_cli("magnet:test")
-
+
mock_run.assert_called_once()
self.assertEqual(result, test_file)
- @patch('fastanime.core.downloader.torrents.subprocess.run')
- @patch('fastanime.core.downloader.torrents.shutil.which')
+ @patch("fastanime.core.downloader.torrents.subprocess.run")
+ @patch("fastanime.core.downloader.torrents.shutil.which")
def test_download_with_webtorrent_cli_failure(self, mock_which, mock_run):
"""Test webtorrent CLI download failure."""
mock_which.return_value = "/usr/bin/webtorrent"
- mock_run.side_effect = subprocess.CalledProcessError(1, "webtorrent", stderr="Error")
-
+ mock_run.side_effect = subprocess.CalledProcessError(
+ 1, "webtorrent", stderr="Error"
+ )
+
with self.assertRaises(TorrentDownloadError) as context:
self.downloader.download_with_webtorrent_cli("magnet:test")
-
+
self.assertIn("webtorrent CLI failed", str(context.exception))
@unittest.skipUnless(LIBTORRENT_AVAILABLE, "libtorrent not available")
@@ -103,61 +106,60 @@ class TestTorrentDownloader(unittest.TestCase):
with self.assertRaises(DependencyNotFoundError):
self.downloader._setup_libtorrent_session()
- @patch('fastanime.core.downloader.torrents.LIBTORRENT_AVAILABLE', False)
+ @patch("fastanime.core.downloader.torrents.LIBTORRENT_AVAILABLE", False)
def test_download_with_libtorrent_not_available(self):
"""Test libtorrent download when not available."""
with self.assertRaises(DependencyNotFoundError) as context:
self.downloader.download_with_libtorrent("magnet:test")
-
+
self.assertIn("libtorrent is not available", str(context.exception))
def test_progress_callback(self):
"""Test progress callback functionality."""
callback_mock = Mock()
downloader = TorrentDownloader(
- download_path=self.temp_dir,
- progress_callback=callback_mock
+ download_path=self.temp_dir, progress_callback=callback_mock
)
-
+
# The callback should be stored
self.assertEqual(downloader.progress_callback, callback_mock)
- @patch.object(TorrentDownloader, 'download_with_webtorrent_cli')
- @patch.object(TorrentDownloader, 'download_with_libtorrent')
+ @patch.object(TorrentDownloader, "download_with_webtorrent_cli")
+ @patch.object(TorrentDownloader, "download_with_libtorrent")
def test_download_prefers_libtorrent(self, mock_libtorrent, mock_webtorrent):
"""Test that download method prefers libtorrent by default."""
mock_libtorrent.return_value = self.temp_dir / "test"
-
- with patch('fastanime.core.downloader.torrents.LIBTORRENT_AVAILABLE', True):
+
+ with patch("fastanime.core.downloader.torrents.LIBTORRENT_AVAILABLE", True):
result = self.downloader.download("magnet:test", prefer_libtorrent=True)
-
+
mock_libtorrent.assert_called_once()
mock_webtorrent.assert_not_called()
- @patch.object(TorrentDownloader, 'download_with_webtorrent_cli')
- @patch.object(TorrentDownloader, 'download_with_libtorrent')
+ @patch.object(TorrentDownloader, "download_with_webtorrent_cli")
+ @patch.object(TorrentDownloader, "download_with_libtorrent")
def test_download_fallback_to_webtorrent(self, mock_libtorrent, mock_webtorrent):
"""Test fallback to webtorrent when libtorrent fails."""
mock_libtorrent.side_effect = DependencyNotFoundError("libtorrent not found")
mock_webtorrent.return_value = self.temp_dir / "test"
-
- with patch('fastanime.core.downloader.torrents.LIBTORRENT_AVAILABLE', True):
+
+ with patch("fastanime.core.downloader.torrents.LIBTORRENT_AVAILABLE", True):
result = self.downloader.download("magnet:test")
-
+
mock_libtorrent.assert_called_once()
mock_webtorrent.assert_called_once()
self.assertEqual(result, self.temp_dir / "test")
- @patch.object(TorrentDownloader, 'download_with_webtorrent_cli')
- @patch.object(TorrentDownloader, 'download_with_libtorrent')
+ @patch.object(TorrentDownloader, "download_with_webtorrent_cli")
+ @patch.object(TorrentDownloader, "download_with_libtorrent")
def test_download_all_methods_fail(self, mock_libtorrent, mock_webtorrent):
"""Test when all download methods fail."""
mock_libtorrent.side_effect = DependencyNotFoundError("libtorrent not found")
mock_webtorrent.side_effect = DependencyNotFoundError("webtorrent not found")
-
+
with self.assertRaises(TorrentDownloadError) as context:
self.downloader.download("magnet:test")
-
+
self.assertIn("All torrent download methods failed", str(context.exception))
def test_magnet_link_detection(self):
@@ -165,12 +167,12 @@ class TestTorrentDownloader(unittest.TestCase):
magnet_link = "magnet:?xt=urn:btih:test"
http_link = "http://example.com/test.torrent"
file_path = "/path/to/test.torrent"
-
+
# These would be tested in integration tests with actual libtorrent
# Here we just verify the method exists and handles different input types
- self.assertTrue(magnet_link.startswith('magnet:'))
- self.assertTrue(http_link.startswith(('http://', 'https://')))
- self.assertFalse(file_path.startswith(('magnet:', 'http://', 'https://')))
+ self.assertTrue(magnet_link.startswith("magnet:"))
+ self.assertTrue(http_link.startswith(("http://", "https://")))
+ self.assertFalse(file_path.startswith(("magnet:", "http://", "https://")))
class TestLegacyFunction(unittest.TestCase):
@@ -183,18 +185,21 @@ class TestLegacyFunction(unittest.TestCase):
def tearDown(self):
"""Clean up test fixtures."""
import shutil
+
shutil.rmtree(self.temp_dir, ignore_errors=True)
- @patch.object(TorrentDownloader, 'download_with_webtorrent_cli')
+ @patch.object(TorrentDownloader, "download_with_webtorrent_cli")
def test_legacy_function(self, mock_download):
"""Test the legacy download_torrent_with_webtorrent_cli function."""
- from fastanime.core.downloader.torrents import download_torrent_with_webtorrent_cli
-
+ from fastanime.core.downloader.torrents import (
+ download_torrent_with_webtorrent_cli,
+ )
+
test_path = self.temp_dir / "test.mkv"
mock_download.return_value = test_path
-
+
result = download_torrent_with_webtorrent_cli(test_path, "magnet:test")
-
+
mock_download.assert_called_once_with("magnet:test")
self.assertEqual(result, test_path)
@@ -202,4 +207,5 @@ class TestLegacyFunction(unittest.TestCase):
if __name__ == "__main__":
# Add subprocess import for the test
import subprocess
+
unittest.main()