Compare commits

...

50 Commits

Author SHA1 Message Date
Benexl
75b1b8fab4 chore: bump version 2025-12-27 19:39:18 +03:00
Benexl
6f4155dd65 feat: add logger param 2025-12-27 19:39:10 +03:00
benexl
20ce2f6ca3 fix(cli): update command name based on availability of viu-media 2025-12-16 17:57:18 +03:00
benexl
dbbfe0331b chore: bump version to 3.3.3 2025-12-16 17:54:48 +03:00
benexl
04ae196d5f fix(cli): remove stdout and stderr reconfiguration for UTF-8 encoding on Windows 2025-12-16 17:50:04 +03:00
benexl
fe92ff8716 fix(preview): update cache directory paths and improve script execution formatting 2025-12-16 17:24:49 +03:00
benexl
c047377289 fix(preview): pass posix paths 2025-12-16 16:47:44 +03:00
benexl
fcbaa7fb0d fix(cli): ensure UTF-8 encoding on Windows platforms 2025-12-16 16:17:14 +03:00
benexl
87c87ebca7 fix(preview): update path handling for cache directories in preview scripts to pass as posix paths 2025-12-16 16:16:05 +03:00
Benedict Xavier
e1272ddf35 Merge pull request #171 from axtrat/provider/animeunity 2025-12-14 09:26:07 +03:00
axtrat
5fe59e1ddb fix: fixed pyright error 2025-12-13 21:25:12 +01:00
axtrat
83ad67a4a8 refactor(animeunity): reorganize extraction logic and update mapper 2025-12-11 13:19:39 +01:00
axtrat
94866b68f3 fix(animeunity): patch missing video info due to VixCloud changes
VixCloud's window.video object no longer provides 'quality' and 'filename' fields, causing a KeyError.
This fix updates the extraction logic.
2025-12-11 13:02:40 +01:00
Benedict Xavier
5f7e10a510 Update README with Termux installation instructions
Added installation instructions for Termux and clarified Python installation requirements.
2025-12-03 11:41:23 +03:00
Benexl
95586eb36f chore: bump version 2025-12-03 10:04:07 +03:00
Benexl
c01c08c03b feat: show welcome screen once a month 2025-12-03 10:03:52 +03:00
Benexl
14e1f44696 chore: bump version 2025-12-02 19:04:14 +03:00
Benexl
36b71c0751 feat: update welcome message 2025-12-02 18:58:15 +03:00
Benexl
6a5d7a0116 chore: bump version and update deps 2025-12-02 18:31:43 +03:00
Benedict Xavier
91efee9065 Merge pull request #169 from viu-media/feat/welcomescreen 2025-12-02 18:03:25 +03:00
Benedict Xavier
69d3d2e032 Update viu_media/cli/cli.py
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-12-02 18:02:11 +03:00
Benedict Xavier
29ba77f795 Update viu_media/cli/cli.py
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-12-02 18:01:56 +03:00
Benexl
a4950efa02 feat: wait for feedback 2025-12-02 18:00:44 +03:00
Benedict Xavier
bbd7931790 Merge branch 'master' into feat/welcomescreen 2025-12-02 17:53:17 +03:00
Benedict Xavier
c3ae5f9053 Merge pull request #168 from viu-media/feature/preview-scripts-rewrite-to-python 2025-12-02 17:52:21 +03:00
Benedict Xavier
bf06d7ee2c Update viu_media/assets/scripts/fzf/media_info.py
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-12-02 17:46:58 +03:00
Benexl
41aaf92bae style: remove unused import 2025-12-02 17:44:46 +03:00
Benexl
d38dc3194f feat: export ansi utils to preview root dir when doing dynamic previews 2025-12-02 17:43:18 +03:00
Benexl
54233aca79 feat: remove redundancy and stick to ansi_utils 2025-12-02 17:42:53 +03:00
Benexl
6b8dfba57e fix: remove double quotes 2025-12-02 17:30:31 +03:00
Benexl
3b008696d5 style: remove unused imports 2025-12-02 17:27:30 +03:00
Benedict Xavier
ece1f77e99 Merge branch 'master' into feature/preview-scripts-rewrite-to-python 2025-12-02 17:16:50 +03:00
Benexl
7b9de8620b chore: cleanup old preview scripts 2025-12-02 17:15:18 +03:00
Benexl
725754ea1a feat: improve text display for dynamic search 2025-12-02 17:12:27 +03:00
Benexl
80771f65ea feat: dynamic search rewrite in python 2025-12-02 14:36:03 +03:00
Benexl
c8c4e1b2c0 feat: refactor terminal width handling in FZF scripts for improved consistency 2025-12-02 13:30:03 +03:00
Benexl
f4958cc0cc fix: clean up whitespace in display_width and print_table_row functions 2025-12-02 13:14:19 +03:00
Benexl
1f72e0a579 feat: enhance display width calculation for better text alignment in print_table_row 2025-12-02 13:07:55 +03:00
Benexl
803c8316a7 fix: improve value alignment in print_table_row for better formatting 2025-12-02 13:04:25 +03:00
Benexl
8e803e8ecb feat(cli): search provider with title in lowercase 2025-11-20 22:14:17 +03:00
Benexl
61fcd39188 feat(dev): use PWD when specifying the viu venv bin path 2025-11-20 22:13:36 +03:00
Benexl
313f8369d7 feat: show release notes after upgrade 2025-11-18 16:32:30 +03:00
Benexl
bee73b3f9a feat(config): add show release option 2025-11-18 16:03:22 +03:00
Benexl
f647b7419a feat: add welcome screen message 2025-11-18 15:56:28 +03:00
Benexl
901c4422b5 feat: add welcome screen config option 2025-11-18 15:01:07 +03:00
Benedict Xavier
71b668894b Revise disclaimer and core features in README
Updated disclaimer section for clarity and removed redundancy.
2025-11-13 17:13:37 +03:00
Benedict Xavier
8b3a57ed07 Merge pull request #163 from Oreo-Kuuki/patch-1 2025-11-03 23:41:19 +03:00
Oreo-kuuki
b2f9c8349a Fix formatting of 'Hanka x Hanka' entry in normalizer.json
So like this, right?
2025-11-03 15:37:24 -05:00
Oreo-kuuki
25fe1e5e01 Fix formatting in normalizer.json entries
Added comma, hanka x hanka without the unicode
2025-11-03 15:14:08 -05:00
Oreo-kuuki
45ff463f7a Add mapping for 'Hanka×Hanka (2011)' to 'Hunter x Hunter (2011)' 2025-11-03 15:00:41 -05:00
42 changed files with 2205 additions and 2346 deletions

6
.envrc
View File

@@ -1,6 +1,6 @@
VIU_APP_NAME="viu-dev"
PATH="./.venv/bin/:$PATH"
PATH="$PWD/.venv/bin:$PATH"
export PATH VIU_APP_NAME
if command -v nix >/dev/null;then
use flake
if command -v nix >/dev/null; then
use flake
fi

View File

@@ -32,6 +32,11 @@
</details>
> [!IMPORTANT]
> This project scrapes public-facing websites for its streaming / downloading capabilities and primarily acts as an anilist, jikan and many other media apis tui client. The developer(s) of this application have no affiliation with these content providers. This application hosts zero content and is intended for educational and personal use only. Use at your own risk.
>
> [**Read the Full Disclaimer**](DISCLAIMER.md)
## Core Features
* 📺 **Interactive TUI:** Browse, search, and manage your AniList library in a rich terminal interface powered by `fzf`, `rofi`, or a built-in selector.
@@ -44,7 +49,7 @@
## Installation
Viu runs on any platform with Python 3.10+, including Windows, macOS, Linux, and Android (via Termux).
Viu runs on any platform with Python 3.10+, including Windows, macOS, Linux, and Android (via Termux, see other installation methods).
### Prerequisites
@@ -107,6 +112,40 @@ uv tool install "viu-media[notifications]" # For desktop notifications
# Git version (latest commit)
yay -S viu-media-git
```
#### Termux
You may have to have rust installed see this issue: https://github.com/pydantic/pydantic-core/issues/1012#issuecomment-2511269688.
```bash
pkg install python # though uv will probably install python for you, but doesn't hurt to have it :)
pkg install rust # maybe required cause of pydantic
# Recommended (with pip due to more control)
pip install viu-media
# you may need to install pydantic manually
python -m pip install pydantic --extra-index-url https://termux-user-repository.github.io/pypi/ # may also be necessary incase the above fails
# add yt-dlp by
pip install yt-dlp[default,curl-cffi]
# prefer without standard and manually install the things you need lxml, yt-dlp and
pip install viu-media[standard]
# you may need to manually install lxml and plyer manually eg
python -m pip install lxml --extra-index-url https://termux-user-repository.github.io/pypi/ # may also be necessary incase the above fails
# Alternative With Uv may work, no promises
pkg install uv
uv tool install viu-media
# and to add yt-dlp only you can do
uv tool install viu-media --with yt-dlp[default,curl-cffi]
# or though may fail, cause of lxml and plyer, in that case try to install manually
uv tool install viu-media[standard]
```
#### Using pipx (for isolated environments)
```bash
@@ -327,10 +366,3 @@ You can run the background worker as a systemd service for persistence.
## Contributing
Contributions are welcome! Whether it's reporting a bug, proposing a feature, or writing code, your help is appreciated. Please read our [**Contributing Guidelines**](CONTRIBUTIONS.md) to get started.
## Disclaimer
> [!IMPORTANT]
> This project scrapes public-facing websites. The developer(s) of this application have no affiliation with these content providers. This application hosts zero content and is intended for educational and personal use only. Use at your own risk.
>
> [**Read the Full Disclaimer**](DISCLAIMER.md)

View File

@@ -1,6 +1,6 @@
[project]
name = "viu-media"
version = "3.2.8"
version = "3.3.4"
description = "A browser anime site experience from the terminal"
license = "UNLICENSE"
readme = "README.md"

2422
uv.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -4,7 +4,8 @@
"Magia Record: Mahou Shoujo Madoka☆Magica Gaiden (TV)": "Mahou Shoujo Madoka☆Magica",
"Dungeon ni Deai o Motomeru no wa Machigatte Iru Darouka": "Dungeon ni Deai wo Motomeru no wa Machigatteiru Darou ka",
"Hazurewaku no \"Joutai Ijou Skill\" de Saikyou ni Natta Ore ga Subete wo Juurin suru made": "Hazure Waku no [Joutai Ijou Skill] de Saikyou ni Natta Ore ga Subete wo Juurin Suru made",
"Re:Zero kara Hajimeru Isekai Seikatsu Season 3": "Re:Zero kara Hajimeru Isekai Seikatsu 3rd Season"
"Re:Zero kara Hajimeru Isekai Seikatsu Season 3": "Re:Zero kara Hajimeru Isekai Seikatsu 3rd Season",
"Hanka×Hanka (2011)": "Hunter × Hunter (2011)"
},
"hianime": {
"My Star": "Oshi no Ko"

View File

@@ -5,9 +5,44 @@ Lightweight stdlib-only utilities to replace Rich dependency in preview scripts.
Provides RGB color formatting, table rendering, and markdown stripping.
"""
import os
import re
import shutil
import textwrap
import unicodedata
def get_terminal_width() -> int:
"""
Get terminal width, prioritizing FZF preview environment variables.
Returns:
Terminal width in columns
"""
fzf_cols = os.environ.get("FZF_PREVIEW_COLUMNS")
if fzf_cols:
return int(fzf_cols)
return shutil.get_terminal_size((80, 24)).columns
def display_width(text: str) -> int:
"""
Calculate the actual display width of text, accounting for wide characters.
Args:
text: Text to measure
Returns:
Display width in terminal columns
"""
width = 0
for char in text:
# East Asian Width property: 'F' (Fullwidth) and 'W' (Wide) take 2 columns
if unicodedata.east_asian_width(char) in ("F", "W"):
width += 2
else:
width += 1
return width
def rgb_color(r: int, g: int, b: int, text: str, bold: bool = False) -> str:
@@ -51,7 +86,7 @@ def print_rule(sep_color: str) -> None:
Args:
sep_color: Color as 'R,G,B' string
"""
width = shutil.get_terminal_size((80, 24)).columns
width = get_terminal_width()
r, g, b = parse_color(sep_color)
print(rgb_color(r, g, b, "" * width))
@@ -72,22 +107,35 @@ def print_table_row(
r, g, b = parse_color(header_color)
key_styled = rgb_color(r, g, b, key, bold=True)
# Ensure minimum width to avoid textwrap errors
safe_value_width = max(20, value_width)
# Get actual terminal width
term_width = get_terminal_width()
# Wrap value if it's too long
value_lines = textwrap.wrap(str(value), width=safe_value_width) if value else [""]
# Calculate display widths accounting for wide characters
key_display_width = display_width(key)
# Calculate actual value width based on terminal and key display width
actual_value_width = max(20, term_width - key_display_width - 2)
# Wrap value if it's too long (use character count, not display width for wrapping)
value_lines = textwrap.wrap(str(value), width=actual_value_width) if value else [""]
if not value_lines:
value_lines = [""]
# Print first line with right-aligned value
# Print first line with properly aligned value
first_line = value_lines[0]
print(f"{key_styled:<{key_width + 20}} {first_line:>{safe_value_width}}")
first_line_display_width = display_width(first_line)
# Use manual spacing to right-align based on display width
spacing = term_width - key_display_width - first_line_display_width - 2
if spacing > 0:
print(f"{key_styled} {' ' * spacing}{first_line}")
else:
print(f"{key_styled} {first_line}")
# Print remaining wrapped lines (left-aligned, indented)
for line in value_lines[1:]:
print(f"{' ' * (key_width + 2)}{line}")
print(f"{' ' * (key_display_width + 2)}{line}")
def strip_markdown(text: str) -> str:
@@ -149,6 +197,6 @@ def wrap_text(text: str, width: int | None = None) -> str:
Wrapped text
"""
if width is None:
width = shutil.get_terminal_size((80, 24)).columns
width = get_terminal_width()
return textwrap.fill(text, width=width)

View File

@@ -1,12 +1,17 @@
import sys
import shutil
from _ansi_utils import print_rule, print_table_row, strip_markdown, wrap_text
from _ansi_utils import (
print_rule,
print_table_row,
strip_markdown,
wrap_text,
get_terminal_width,
)
HEADER_COLOR = sys.argv[1]
SEPARATOR_COLOR = sys.argv[2]
# Get terminal dimensions
term_width = shutil.get_terminal_size((80, 24)).columns
term_width = get_terminal_width()
# Print title centered
print("{ANIME_TITLE}".center(term_width))

View File

@@ -1,12 +1,17 @@
import sys
import shutil
from _ansi_utils import print_rule, print_table_row, strip_markdown, wrap_text
from _ansi_utils import (
print_rule,
print_table_row,
strip_markdown,
wrap_text,
get_terminal_width,
)
HEADER_COLOR = sys.argv[1]
SEPARATOR_COLOR = sys.argv[2]
# Get terminal dimensions
term_width = shutil.get_terminal_size((80, 24)).columns
term_width = get_terminal_width()
# Print title centered
print("{CHARACTER_NAME}".center(term_width))

View File

@@ -0,0 +1,434 @@
#!/usr/bin/env python3
#
# FZF Dynamic Preview Script for Search Results
#
# This script handles previews for dynamic search by reading from the cached
# search results JSON and generating preview content on-the-fly.
# Template variables are injected by Python using .replace()
import json
import os
import shutil
import subprocess
import sys
from hashlib import sha256
from pathlib import Path
# Import the utility functions
from _ansi_utils import (
get_terminal_width,
print_rule,
print_table_row,
strip_markdown,
wrap_text,
)
# --- Template Variables (Injected by Python) ---
SEARCH_RESULTS_FILE = Path("{SEARCH_RESULTS_FILE}")
IMAGE_CACHE_DIR = Path("{IMAGE_CACHE_DIR}")
PREVIEW_MODE = "{PREVIEW_MODE}"
IMAGE_RENDERER = "{IMAGE_RENDERER}"
HEADER_COLOR = "{HEADER_COLOR}"
SEPARATOR_COLOR = "{SEPARATOR_COLOR}"
SCALE_UP = "{SCALE_UP}" == "True"
# --- Arguments ---
# sys.argv[1] is the selected anime title from fzf
SELECTED_TITLE = sys.argv[1] if len(sys.argv) > 1 else ""
def format_number(num):
"""Format number with thousand separators."""
if num is None:
return "N/A"
return f"{num:,}"
def format_date(date_obj):
"""Format date object to string."""
if not date_obj or date_obj == "null":
return "N/A"
year = date_obj.get("year")
month = date_obj.get("month")
day = date_obj.get("day")
if not year:
return "N/A"
if month and day:
return f"{day}/{month}/{year}"
if month:
return f"{month}/{year}"
return str(year)
def get_media_from_results(title):
"""Find media item in search results by title."""
if not SEARCH_RESULTS_FILE.exists():
return None
try:
with open(SEARCH_RESULTS_FILE, "r", encoding="utf-8") as f:
data = json.load(f)
media_list = data.get("data", {}).get("Page", {}).get("media", [])
for media in media_list:
title_obj = media.get("title", {})
eng = title_obj.get("english")
rom = title_obj.get("romaji")
nat = title_obj.get("native")
if title in (eng, rom, nat):
return media
return None
except Exception as e:
print(f"Error reading search results: {e}", file=sys.stderr)
return None
def download_image(url: str, output_path: Path) -> bool:
"""Download image from URL and save to file."""
try:
# Try using urllib (stdlib)
from urllib import request
req = request.Request(url, headers={"User-Agent": "viu/1.0"})
with request.urlopen(req, timeout=5) as response:
data = response.read()
output_path.write_bytes(data)
return True
except Exception:
# Silently fail - preview will just not show image
return False
def which(cmd):
"""Check if command exists."""
return shutil.which(cmd)
def get_terminal_dimensions():
"""Get terminal dimensions from FZF environment."""
fzf_cols = os.environ.get("FZF_PREVIEW_COLUMNS")
fzf_lines = os.environ.get("FZF_PREVIEW_LINES")
if fzf_cols and fzf_lines:
return int(fzf_cols), int(fzf_lines)
try:
rows, cols = (
subprocess.check_output(
["stty", "size"], text=True, stderr=subprocess.DEVNULL
)
.strip()
.split()
)
return int(cols), int(rows)
except Exception:
return 80, 24
def render_kitty(file_path, width, height, scale_up):
"""Render using the Kitty Graphics Protocol (kitten/icat)."""
cmd = []
if which("kitten"):
cmd = ["kitten", "icat"]
elif which("icat"):
cmd = ["icat"]
elif which("kitty"):
cmd = ["kitty", "+kitten", "icat"]
if not cmd:
return False
args = [
"--clear",
"--transfer-mode=memory",
"--unicode-placeholder",
"--stdin=no",
f"--place={width}x{height}@0x0",
]
if scale_up:
args.append("--scale-up")
args.append(file_path)
subprocess.run(cmd + args, stdout=sys.stdout, stderr=sys.stderr)
return True
def render_sixel(file_path, width, height):
"""Render using Sixel."""
if which("chafa"):
subprocess.run(
["chafa", "-f", "sixel", "-s", f"{width}x{height}", file_path],
stdout=sys.stdout,
stderr=sys.stderr,
)
return True
if which("img2sixel"):
pixel_width = width * 10
pixel_height = height * 20
subprocess.run(
[
"img2sixel",
f"--width={pixel_width}",
f"--height={pixel_height}",
file_path,
],
stdout=sys.stdout,
stderr=sys.stderr,
)
return True
return False
def render_iterm(file_path, width, height):
"""Render using iTerm2 Inline Image Protocol."""
if which("imgcat"):
subprocess.run(
["imgcat", "-W", str(width), "-H", str(height), file_path],
stdout=sys.stdout,
stderr=sys.stderr,
)
return True
if which("chafa"):
subprocess.run(
["chafa", "-f", "iterm", "-s", f"{width}x{height}", file_path],
stdout=sys.stdout,
stderr=sys.stderr,
)
return True
return False
def render_timg(file_path, width, height):
"""Render using timg."""
if which("timg"):
subprocess.run(
["timg", f"-g{width}x{height}", "--upscale", file_path],
stdout=sys.stdout,
stderr=sys.stderr,
)
return True
return False
def render_chafa_auto(file_path, width, height):
"""Render using Chafa in auto mode."""
if which("chafa"):
subprocess.run(
["chafa", "-s", f"{width}x{height}", file_path],
stdout=sys.stdout,
stderr=sys.stderr,
)
return True
return False
def fzf_image_preview(file_path: str):
"""Main dispatch function to choose the best renderer."""
cols, lines = get_terminal_dimensions()
width = cols
height = lines
# Check explicit configuration
if IMAGE_RENDERER == "icat" or IMAGE_RENDERER == "system-kitty":
if render_kitty(file_path, width, height, SCALE_UP):
return
elif IMAGE_RENDERER == "sixel" or IMAGE_RENDERER == "system-sixels":
if render_sixel(file_path, width, height):
return
elif IMAGE_RENDERER == "imgcat":
if render_iterm(file_path, width, height):
return
elif IMAGE_RENDERER == "timg":
if render_timg(file_path, width, height):
return
elif IMAGE_RENDERER == "chafa":
if render_chafa_auto(file_path, width, height):
return
# Auto-detection / Fallback
if os.environ.get("KITTY_WINDOW_ID") or os.environ.get("GHOSTTY_BIN_DIR"):
if render_kitty(file_path, width, height, SCALE_UP):
return
if os.environ.get("TERM_PROGRAM") == "iTerm.app":
if render_iterm(file_path, width, height):
return
# Try standard tools in order of quality/preference
if render_kitty(file_path, width, height, SCALE_UP):
return
if render_sixel(file_path, width, height):
return
if render_timg(file_path, width, height):
return
if render_chafa_auto(file_path, width, height):
return
print("⚠️ No suitable image renderer found (icat, chafa, timg, img2sixel).")
def main():
if not SELECTED_TITLE:
print("No selection")
return
# Get the media data from cached search results
media = get_media_from_results(SELECTED_TITLE)
if not media:
print("Loading preview...")
return
term_width = get_terminal_width()
# Extract media information
title_obj = media.get("title", {})
title = (
title_obj.get("english")
or title_obj.get("romaji")
or title_obj.get("native")
or "Unknown"
)
# Show image if in image or full mode
if PREVIEW_MODE in ("image", "full"):
cover_image = media.get("coverImage", {}).get("large", "")
if cover_image:
# Ensure image cache directory exists
IMAGE_CACHE_DIR.mkdir(parents=True, exist_ok=True)
# Generate hash matching the preview worker pattern
# Use "anime-" prefix and hash of just the title (no KEY prefix for dynamic search)
hash_id = f"anime-{sha256(SELECTED_TITLE.encode('utf-8')).hexdigest()}"
image_file = IMAGE_CACHE_DIR / f"{hash_id}.png"
# Download image if not cached
if not image_file.exists():
download_image(cover_image, image_file)
# Try to render the image
if image_file.exists():
fzf_image_preview(str(image_file))
print() # Spacer
else:
print("🖼️ Loading image...")
print()
# Show text info if in text or full mode
if PREVIEW_MODE in ("text", "full"):
# Separator line
r, g, b = map(int, SEPARATOR_COLOR.split(","))
separator = f"\x1b[38;2;{r};{g};{b}m" + ("" * term_width) + "\x1b[0m"
print(separator, flush=True)
# Title centered
print(title.center(term_width))
# Extract data
status = media.get("status", "Unknown")
format_type = media.get("format", "Unknown")
episodes = media.get("episodes", "?")
duration = media.get("duration")
duration_str = f"{duration} min" if duration else "Unknown"
score = media.get("averageScore")
score_str = f"{score}/100" if score else "N/A"
favourites = format_number(media.get("favourites", 0))
popularity = format_number(media.get("popularity", 0))
genres = ", ".join(media.get("genres", [])[:5]) or "Unknown"
start_date = format_date(media.get("startDate"))
end_date = format_date(media.get("endDate"))
studios_list = media.get("studios", {}).get("nodes", [])
studios = ", ".join([s.get("name", "") for s in studios_list[:3]]) or "Unknown"
synonyms_list = media.get("synonyms", [])
synonyms = ", ".join(synonyms_list[:3]) or "N/A"
description = media.get("description", "No description available.")
description = strip_markdown(description)
# Print sections matching media_info.py structure
rows = [
("Score", score_str),
("Favorites", favourites),
("Popularity", popularity),
("Status", status),
]
print_rule(SEPARATOR_COLOR)
for key, value in rows:
print_table_row(key, value, HEADER_COLOR, 0, 0)
rows = [
("Episodes", str(episodes)),
("Duration", duration_str),
]
print_rule(SEPARATOR_COLOR)
for key, value in rows:
print_table_row(key, value, HEADER_COLOR, 0, 0)
rows = [
("Genres", genres),
("Format", format_type),
]
print_rule(SEPARATOR_COLOR)
for key, value in rows:
print_table_row(key, value, HEADER_COLOR, 0, 0)
rows = [
("Start Date", start_date),
("End Date", end_date),
]
print_rule(SEPARATOR_COLOR)
for key, value in rows:
print_table_row(key, value, HEADER_COLOR, 0, 0)
rows = [
("Studios", studios),
]
print_rule(SEPARATOR_COLOR)
for key, value in rows:
print_table_row(key, value, HEADER_COLOR, 0, 0)
rows = [
("Synonyms", synonyms),
]
print_rule(SEPARATOR_COLOR)
for key, value in rows:
print_table_row(key, value, HEADER_COLOR, 0, 0)
print_rule(SEPARATOR_COLOR)
print(wrap_text(description, term_width))
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
except Exception as e:
print(f"Preview Error: {e}", file=sys.stderr)

View File

@@ -1,12 +1,11 @@
import sys
import shutil
from _ansi_utils import print_rule, print_table_row
from _ansi_utils import print_rule, print_table_row, get_terminal_width
HEADER_COLOR = sys.argv[1]
SEPARATOR_COLOR = sys.argv[2]
# Get terminal dimensions
term_width = shutil.get_terminal_size((80, 24)).columns
term_width = get_terminal_width()
# Print title centered
print("{TITLE}".center(term_width))

View File

@@ -1,12 +1,17 @@
import sys
import shutil
from _ansi_utils import print_rule, print_table_row, strip_markdown, wrap_text
from _ansi_utils import (
print_rule,
print_table_row,
strip_markdown,
wrap_text,
get_terminal_width,
)
HEADER_COLOR = sys.argv[1]
SEPARATOR_COLOR = sys.argv[2]
# Get terminal dimensions
term_width = shutil.get_terminal_size((80, 24)).columns
term_width = get_terminal_width()
# Print title centered
print("{TITLE}".center(term_width))
@@ -69,7 +74,7 @@ for key, value in rows:
print_table_row(key, value, HEADER_COLOR, 15, term_width - 20)
rows = [
("Synonymns", "{SYNONYMNS}"),
("Synonyms", "{SYNONYMNS}"),
]
print_rule(SEPARATOR_COLOR)

View File

@@ -1,22 +0,0 @@
#!/bin/sh
#
# Viu Airing Schedule Info Script Template
# This script formats and displays airing schedule details in the FZF preview pane.
# Python injects the actual data values into the placeholders.
draw_rule
print_kv "Anime Title" "{ANIME_TITLE}"
draw_rule
print_kv "Total Episodes" "{TOTAL_EPISODES}"
print_kv "Upcoming Episodes" "{UPCOMING_EPISODES}"
draw_rule
echo "{C_KEY}Next Episodes:{RESET}"
echo
echo "{SCHEDULE_TABLE}" | fold -s -w "$WIDTH"
draw_rule

View File

@@ -1,75 +0,0 @@
#!/bin/sh
#
# FZF Airing Schedule Preview Script Template
#
# This script is a template. The placeholders in curly braces, like {NAME}
# are dynamically filled by python using .replace()
WIDTH=${FZF_PREVIEW_COLUMNS:-80} # Set a fallback width of 80
IMAGE_RENDERER="{IMAGE_RENDERER}"
generate_sha256() {
local input
# Check if input is passed as an argument or piped
if [ -n "$1" ]; then
input="$1"
else
input=$(cat)
fi
if command -v sha256sum &>/dev/null; then
echo -n "$input" | sha256sum | awk '{print $1}'
elif command -v shasum &>/dev/null; then
echo -n "$input" | shasum -a 256 | awk '{print $1}'
elif command -v sha256 &>/dev/null; then
echo -n "$input" | sha256 | awk '{print $1}'
elif command -v openssl &>/dev/null; then
echo -n "$input" | openssl dgst -sha256 | awk '{print $2}'
else
echo -n "$input" | base64 | tr '/+' '_-' | tr -d '\n'
fi
}
print_kv() {
local key="$1"
local value="$2"
local key_len=${#key}
local value_len=${#value}
local multiplier="${3:-1}"
# Correctly calculate padding by accounting for the key, the ": ", and the value.
local padding_len=$((WIDTH - key_len - 2 - value_len * multiplier))
# If the text is too long to fit, just add a single space for separation.
if [ "$padding_len" -lt 1 ]; then
padding_len=1
value=$(echo $value| fold -s -w "$((WIDTH - key_len - 3))")
printf "{C_KEY}%s:{RESET}%*s%s\\n" "$key" "$padding_len" "" " $value"
else
printf "{C_KEY}%s:{RESET}%*s%s\\n" "$key" "$padding_len" "" " $value"
fi
}
draw_rule(){
ll=2
while [ $ll -le $FZF_PREVIEW_COLUMNS ];do
echo -n -e "{C_RULE}─{RESET}"
((ll++))
done
echo
}
title={}
hash=$(generate_sha256 "$title")
if [ "{PREVIEW_MODE}" = "full" ] || [ "{PREVIEW_MODE}" = "text" ]; then
info_file="{INFO_CACHE_DIR}{PATH_SEP}$hash"
if [ -f "$info_file" ]; then
source "$info_file"
else
echo "📅 Loading airing schedule..."
fi
fi

View File

@@ -1,41 +0,0 @@
#!/bin/sh
#
# Viu Character Info Script Template
# This script formats and displays character details in the FZF preview pane.
# Python injects the actual data values into the placeholders.
draw_rule
print_kv "Character Name" "{CHARACTER_NAME}"
if [ -n "{CHARACTER_NATIVE_NAME}" ] && [ "{CHARACTER_NATIVE_NAME}" != "N/A" ]; then
print_kv "Native Name" "{CHARACTER_NATIVE_NAME}"
fi
draw_rule
if [ -n "{CHARACTER_GENDER}" ] && [ "{CHARACTER_GENDER}" != "Unknown" ]; then
print_kv "Gender" "{CHARACTER_GENDER}"
fi
if [ -n "{CHARACTER_AGE}" ] && [ "{CHARACTER_AGE}" != "Unknown" ]; then
print_kv "Age" "{CHARACTER_AGE}"
fi
if [ -n "{CHARACTER_BLOOD_TYPE}" ] && [ "{CHARACTER_BLOOD_TYPE}" != "N/A" ]; then
print_kv "Blood Type" "{CHARACTER_BLOOD_TYPE}"
fi
if [ -n "{CHARACTER_BIRTHDAY}" ] && [ "{CHARACTER_BIRTHDAY}" != "N/A" ]; then
print_kv "Birthday" "{CHARACTER_BIRTHDAY}"
fi
if [ -n "{CHARACTER_FAVOURITES}" ] && [ "{CHARACTER_FAVOURITES}" != "0" ]; then
print_kv "Favorites" "{CHARACTER_FAVOURITES}"
fi
draw_rule
echo "{CHARACTER_DESCRIPTION}" | fold -s -w "$WIDTH"
draw_rule

View File

@@ -1,130 +0,0 @@
#!/bin/sh
#
# FZF Character Preview Script Template
#
# This script is a template. The placeholders in curly braces, like {NAME}
# are dynamically filled by python using .replace()
WIDTH=${FZF_PREVIEW_COLUMNS:-80} # Set a fallback width of 80
IMAGE_RENDERER="{IMAGE_RENDERER}"
generate_sha256() {
local input
# Check if input is passed as an argument or piped
if [ -n "$1" ]; then
input="$1"
else
input=$(cat)
fi
if command -v sha256sum &>/dev/null; then
echo -n "$input" | sha256sum | awk '{print $1}'
elif command -v shasum &>/dev/null; then
echo -n "$input" | shasum -a 256 | awk '{print $1}'
elif command -v sha256 &>/dev/null; then
echo -n "$input" | sha256 | awk '{print $1}'
elif command -v openssl &>/dev/null; then
echo -n "$input" | openssl dgst -sha256 | awk '{print $2}'
else
echo -n "$input" | base64 | tr '/+' '_-' | tr -d '\n'
fi
}
fzf_preview() {
file=$1
dim=${FZF_PREVIEW_COLUMNS}x${FZF_PREVIEW_LINES}
if [ "$dim" = x ]; then
dim=$(stty size </dev/tty | awk "{print \$2 \"x\" \$1}")
fi
if ! [ "$IMAGE_RENDERER" = "icat" ] && [ -z "$KITTY_WINDOW_ID" ] && [ "$((FZF_PREVIEW_TOP + FZF_PREVIEW_LINES))" -eq "$(stty size </dev/tty | awk "{print \$1}")" ]; then
dim=${FZF_PREVIEW_COLUMNS}x$((FZF_PREVIEW_LINES - 1))
fi
if [ "$IMAGE_RENDERER" = "icat" ] && [ -z "$GHOSTTY_BIN_DIR" ]; then
if command -v kitten >/dev/null 2>&1; then
kitten icat --clear --transfer-mode=memory --unicode-placeholder --stdin=no --place="$dim@0x0" "$file" | sed "\$d" | sed "$(printf "\$s/\$/\033[m/")"
elif command -v icat >/dev/null 2>&1; then
icat --clear --transfer-mode=memory --unicode-placeholder --stdin=no --place="$dim@0x0" "$file" | sed "\$d" | sed "$(printf "\$s/\$/\033[m/")"
else
kitty icat --clear --transfer-mode=memory --unicode-placeholder --stdin=no --place="$dim@0x0" "$file" | sed "\$d" | sed "$(printf "\$s/\$/\033[m/")"
fi
elif [ -n "$GHOSTTY_BIN_DIR" ]; then
if command -v kitten >/dev/null 2>&1; then
kitten icat --clear --transfer-mode=memory --unicode-placeholder --stdin=no --place="$dim@0x0" "$file" | sed "\$d" | sed "$(printf "\$s/\$/\033[m/")"
elif command -v icat >/dev/null 2>&1; then
icat --clear --transfer-mode=memory --unicode-placeholder --stdin=no --place="$dim@0x0" "$file" | sed "\$d" | sed "$(printf "\$s/\$/\033[m/")"
else
chafa -s "$dim" "$file"
fi
elif command -v chafa >/dev/null 2>&1; then
case "$PLATFORM" in
android) chafa -s "$dim" "$file" ;;
windows) chafa -f sixel -s "$dim" "$file" ;;
*) chafa -s "$dim" "$file" ;;
esac
echo
elif command -v imgcat >/dev/null; then
imgcat -W "${dim%%x*}" -H "${dim##*x}" "$file"
else
echo please install a terminal image viewer
echo either icat for kitty terminal and wezterm or imgcat or chafa
fi
}
print_kv() {
local key="$1"
local value="$2"
local key_len=${#key}
local value_len=${#value}
local multiplier="${3:-1}"
# Correctly calculate padding by accounting for the key, the ": ", and the value.
local padding_len=$((WIDTH - key_len - 2 - value_len * multiplier))
# If the text is too long to fit, just add a single space for separation.
if [ "$padding_len" -lt 1 ]; then
padding_len=1
value=$(echo $value| fold -s -w "$((WIDTH - key_len - 3))")
printf "{C_KEY}%s:{RESET}%*s%s\\n" "$key" "$padding_len" "" " $value"
else
printf "{C_KEY}%s:{RESET}%*s%s\\n" "$key" "$padding_len" "" " $value"
fi
}
draw_rule(){
ll=2
while [ $ll -le $FZF_PREVIEW_COLUMNS ];do
echo -n -e "{C_RULE}─{RESET}"
((ll++))
done
echo
}
title={}
hash=$(generate_sha256 "$title")
# FIXME: Disabled since they cover the text perhaps its aspect ratio related or image format not sure
# if [ "{PREVIEW_MODE}" = "full" ] || [ "{PREVIEW_MODE}" = "image" ]; then
# image_file="{IMAGE_CACHE_DIR}{PATH_SEP}$hash.png"
# if [ -f "$image_file" ]; then
# fzf_preview "$image_file"
# echo # Add a newline for spacing
# fi
# fi
if [ "{PREVIEW_MODE}" = "full" ] || [ "{PREVIEW_MODE}" = "text" ]; then
info_file="{INFO_CACHE_DIR}{PATH_SEP}$hash"
if [ -f "$info_file" ]; then
source "$info_file"
else
echo "👤 Loading character details..."
fi
fi

View File

@@ -1,315 +0,0 @@
#!/bin/bash
#
# FZF Dynamic Preview Script Template
#
# This script handles previews for dynamic search results by parsing the JSON
# search results file and extracting info for the selected item.
# The placeholders in curly braces are dynamically filled by Python using .replace()
WIDTH=${FZF_PREVIEW_COLUMNS:-80}
IMAGE_RENDERER="{IMAGE_RENDERER}"
SEARCH_RESULTS_FILE="{SEARCH_RESULTS_FILE}"
IMAGE_CACHE_PATH="{IMAGE_CACHE_PATH}"
INFO_CACHE_PATH="{INFO_CACHE_PATH}"
PATH_SEP="{PATH_SEP}"
# Color codes injected by Python
C_TITLE="{C_TITLE}"
C_KEY="{C_KEY}"
C_VALUE="{C_VALUE}"
C_RULE="{C_RULE}"
RESET="{RESET}"
# Selected item from fzf
SELECTED_ITEM={}
generate_sha256() {
local input="$1"
if command -v sha256sum &>/dev/null; then
echo -n "$input" | sha256sum | awk '{print $1}'
elif command -v shasum &>/dev/null; then
echo -n "$input" | shasum -a 256 | awk '{print $1}'
elif command -v sha256 &>/dev/null; then
echo -n "$input" | sha256 | awk '{print $1}'
elif command -v openssl &>/dev/null; then
echo -n "$input" | openssl dgst -sha256 | awk '{print $2}'
else
echo -n "$input" | base64 | tr '/+' '_-' | tr -d '\n'
fi
}
fzf_preview() {
file=$1
dim=${FZF_PREVIEW_COLUMNS}x${FZF_PREVIEW_LINES}
if [ "$dim" = x ]; then
dim=$(stty size </dev/tty | awk "{print \$2 \"x\" \$1}")
fi
if ! [ "$IMAGE_RENDERER" = "icat" ] && [ -z "$KITTY_WINDOW_ID" ] && [ "$((FZF_PREVIEW_TOP + FZF_PREVIEW_LINES))" -eq "$(stty size </dev/tty | awk "{print \$1}")" ]; then
dim=${FZF_PREVIEW_COLUMNS}x$((FZF_PREVIEW_LINES - 1))
fi
if [ "$IMAGE_RENDERER" = "icat" ] && [ -z "$GHOSTTY_BIN_DIR" ]; then
if command -v kitten >/dev/null 2>&1; then
kitten icat --clear --transfer-mode=memory --unicode-placeholder --stdin=no --place="$dim@0x0" "$file" | sed "\$d" | sed "$(printf "\$s/\$/\033[m/")"
elif command -v icat >/dev/null 2>&1; then
icat --clear --transfer-mode=memory --unicode-placeholder --stdin=no --place="$dim@0x0" "$file" | sed "\$d" | sed "$(printf "\$s/\$/\033[m/")"
else
kitty icat --clear --transfer-mode=memory --unicode-placeholder --stdin=no --place="$dim@0x0" "$file" | sed "\$d" | sed "$(printf "\$s/\$/\033[m/")"
fi
elif [ -n "$GHOSTTY_BIN_DIR" ]; then
if command -v kitten >/dev/null 2>&1; then
kitten icat --clear --transfer-mode=memory --unicode-placeholder --stdin=no --place="$dim@0x0" "$file" | sed "\$d" | sed "$(printf "\$s/\$/\033[m/")"
elif command -v icat >/dev/null 2>&1; then
icat --clear --transfer-mode=memory --unicode-placeholder --stdin=no --place="$dim@0x0" "$file" | sed "\$d" | sed "$(printf "\$s/\$/\033[m/")"
else
chafa -s "$dim" "$file"
fi
elif command -v chafa >/dev/null 2>&1; then
case "$PLATFORM" in
android) chafa -s "$dim" "$file" ;;
windows) chafa -f sixel -s "$dim" "$file" ;;
*) chafa -s "$dim" "$file" ;;
esac
echo
elif command -v imgcat >/dev/null; then
imgcat -W "${dim%%x*}" -H "${dim##*x}" "$file"
else
echo please install a terminal image viewer
echo either icat for kitty terminal and wezterm or imgcat or chafa
fi
}
print_kv() {
local key="$1"
local value="$2"
local key_len=${#key}
local value_len=${#value}
local multiplier="${3:-1}"
local padding_len=$((WIDTH - key_len - 2 - value_len * multiplier))
if [ "$padding_len" -lt 1 ]; then
padding_len=1
value=$(echo $value| fold -s -w "$((WIDTH - key_len - 3))")
printf "{C_KEY}%s:{RESET}%*s%s\\n" "$key" "$padding_len" "" " $value"
else
printf "{C_KEY}%s:{RESET}%*s%s\\n" "$key" "$padding_len" "" " $value"
fi
}
draw_rule() {
ll=2
while [ $ll -le $FZF_PREVIEW_COLUMNS ];do
echo -n -e "{C_RULE}─{RESET}"
((ll++))
done
echo
}
clean_html() {
echo "$1" | sed 's/<[^>]*>//g' | sed 's/&lt;/</g' | sed 's/&gt;/>/g' | sed 's/&amp;/\&/g' | sed 's/&quot;/"/g' | sed "s/&#39;/'/g"
}
format_date() {
local date_obj="$1"
if [ "$date_obj" = "null" ] || [ -z "$date_obj" ]; then
echo "N/A"
return
fi
# Extract year, month, day from the date object
if command -v jq >/dev/null 2>&1; then
year=$(echo "$date_obj" | jq -r '.year // "N/A"' 2>/dev/null || echo "N/A")
month=$(echo "$date_obj" | jq -r '.month // ""' 2>/dev/null || echo "")
day=$(echo "$date_obj" | jq -r '.day // ""' 2>/dev/null || echo "")
else
year=$(echo "$date_obj" | python3 -c "import json, sys; data=json.load(sys.stdin); print(data.get('year', 'N/A'))" 2>/dev/null || echo "N/A")
month=$(echo "$date_obj" | python3 -c "import json, sys; data=json.load(sys.stdin); print(data.get('month', ''))" 2>/dev/null || echo "")
day=$(echo "$date_obj" | python3 -c "import json, sys; data=json.load(sys.stdin); print(data.get('day', ''))" 2>/dev/null || echo "")
fi
if [ "$year" = "N/A" ] || [ "$year" = "null" ]; then
echo "N/A"
elif [ -n "$month" ] && [ "$month" != "null" ] && [ -n "$day" ] && [ "$day" != "null" ]; then
echo "$day/$month/$year"
elif [ -n "$month" ] && [ "$month" != "null" ]; then
echo "$month/$year"
else
echo "$year"
fi
}
# If no selection or search results file doesn't exist, show placeholder
if [ -z "$SELECTED_ITEM" ] || [ ! -f "$SEARCH_RESULTS_FILE" ]; then
echo "${C_TITLE}Dynamic Search Preview${RESET}"
draw_rule
echo "Type to search for anime..."
echo "Results will appear here as you type."
echo
echo "DEBUG:"
echo "SELECTED_ITEM='$SELECTED_ITEM'"
echo "SEARCH_RESULTS_FILE='$SEARCH_RESULTS_FILE'"
if [ -f "$SEARCH_RESULTS_FILE" ]; then
echo "Search results file exists"
else
echo "Search results file missing"
fi
exit 0
fi
# Parse the search results JSON and find the matching item
if command -v jq >/dev/null 2>&1; then
MEDIA_DATA=$(cat "$SEARCH_RESULTS_FILE" | jq --arg anime_title "$SELECTED_ITEM" '
.data.Page.media[]? |
select((.title.english // .title.romaji // .title.native // "Unknown") == $anime_title )
' )
else
# Fallback to Python for JSON parsing
MEDIA_DATA=$(cat "$SEARCH_RESULTS_FILE" | python3 -c "
import json
import sys
try:
data = json.load(sys.stdin)
selected_item = '''$SELECTED_ITEM'''
if 'data' not in data or 'Page' not in data['data'] or 'media' not in data['data']['Page']:
sys.exit(1)
media_list = data['data']['Page']['media']
for media in media_list:
title = media.get('title', {})
english_title = title.get('english') or title.get('romaji') or title.get('native', 'Unknown')
year = media.get('startDate', {}).get('year', 'Unknown') if media.get('startDate') else 'Unknown'
status = media.get('status', 'Unknown')
genres = ', '.join(media.get('genres', [])[:3]) or 'Unknown'
display_format = f'{english_title} ({year}) [{status}] - {genres}'
# Debug output for matching
print(f"DEBUG: selected_item='{selected_item.strip()}' display_format='{display_format.strip()}'", file=sys.stderr)
if selected_item.strip() == display_format.strip():
json.dump(media, sys.stdout, indent=2)
sys.exit(0)
print(f"DEBUG: No match found for selected_item='{selected_item.strip()}'", file=sys.stderr)
sys.exit(1)
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
sys.exit(1)
" 2>/dev/null)
fi
# If we couldn't find the media data, show error
if [ $? -ne 0 ] || [ -z "$MEDIA_DATA" ]; then
echo "${C_TITLE}Preview Error${RESET}"
draw_rule
echo "Could not load preview data for:"
echo "$SELECTED_ITEM"
echo
echo "DEBUG INFO:"
echo "Search results file: $SEARCH_RESULTS_FILE"
if [ -f "$SEARCH_RESULTS_FILE" ]; then
echo "File exists, size: $(wc -c < "$SEARCH_RESULTS_FILE") bytes"
echo "First few lines of search results:"
head -3 "$SEARCH_RESULTS_FILE" 2>/dev/null || echo "Cannot read file"
else
echo "Search results file does not exist"
fi
exit 0
fi
# Extract information from the media data
if command -v jq >/dev/null 2>&1; then
# Use jq for faster extraction
TITLE=$(echo "$MEDIA_DATA" | jq -r '.title.english // .title.romaji // .title.native // "Unknown"' 2>/dev/null || echo "Unknown")
STATUS=$(echo "$MEDIA_DATA" | jq -r '.status // "Unknown"' 2>/dev/null || echo "Unknown")
FORMAT=$(echo "$MEDIA_DATA" | jq -r '.format // "Unknown"' 2>/dev/null || echo "Unknown")
EPISODES=$(echo "$MEDIA_DATA" | jq -r '.episodes // "Unknown"' 2>/dev/null || echo "Unknown")
DURATION=$(echo "$MEDIA_DATA" | jq -r 'if .duration then "\(.duration) min" else "Unknown" end' 2>/dev/null || echo "Unknown")
SCORE=$(echo "$MEDIA_DATA" | jq -r 'if .averageScore then "\(.averageScore)/100" else "N/A" end' 2>/dev/null || echo "N/A")
FAVOURITES=$(echo "$MEDIA_DATA" | jq -r '.favourites // 0' 2>/dev/null | sed ':a;s/\B[0-9]\{3\}\>/,&/;ta' || echo "0")
POPULARITY=$(echo "$MEDIA_DATA" | jq -r '.popularity // 0' 2>/dev/null | sed ':a;s/\B[0-9]\{3\}\>/,&/;ta' || echo "0")
GENRES=$(echo "$MEDIA_DATA" | jq -r '(.genres[:5] // []) | join(", ") | if . == "" then "Unknown" else . end' 2>/dev/null || echo "Unknown")
DESCRIPTION=$(echo "$MEDIA_DATA" | jq -r '.description // "No description available."' 2>/dev/null || echo "No description available.")
# Get start and end dates as JSON objects
START_DATE_OBJ=$(echo "$MEDIA_DATA" | jq -c '.startDate' 2>/dev/null || echo "null")
END_DATE_OBJ=$(echo "$MEDIA_DATA" | jq -c '.endDate' 2>/dev/null || echo "null")
# Get cover image URL
COVER_IMAGE=$(echo "$MEDIA_DATA" | jq -r '.coverImage.large // ""' 2>/dev/null || echo "")
else
# Fallback to Python for extraction
TITLE=$(echo "$MEDIA_DATA" | python3 -c "import json, sys; data=json.load(sys.stdin); title=data.get('title',{}); print(title.get('english') or title.get('romaji') or title.get('native', 'Unknown'))" 2>/dev/null || echo "Unknown")
STATUS=$(echo "$MEDIA_DATA" | python3 -c "import json, sys; data=json.load(sys.stdin); print(data.get('status', 'Unknown'))" 2>/dev/null || echo "Unknown")
FORMAT=$(echo "$MEDIA_DATA" | python3 -c "import json, sys; data=json.load(sys.stdin); print(data.get('format', 'Unknown'))" 2>/dev/null || echo "Unknown")
EPISODES=$(echo "$MEDIA_DATA" | python3 -c "import json, sys; data=json.load(sys.stdin); print(data.get('episodes', 'Unknown'))" 2>/dev/null || echo "Unknown")
DURATION=$(echo "$MEDIA_DATA" | python3 -c "import json, sys; data=json.load(sys.stdin); duration=data.get('duration'); print(f'{duration} min' if duration else 'Unknown')" 2>/dev/null || echo "Unknown")
SCORE=$(echo "$MEDIA_DATA" | python3 -c "import json, sys; data=json.load(sys.stdin); score=data.get('averageScore'); print(f'{score}/100' if score else 'N/A')" 2>/dev/null || echo "N/A")
FAVOURITES=$(echo "$MEDIA_DATA" | python3 -c "import json, sys; data=json.load(sys.stdin); print(f\"{data.get('favourites', 0):,}\")" 2>/dev/null || echo "0")
POPULARITY=$(echo "$MEDIA_DATA" | python3 -c "import json, sys; data=json.load(sys.stdin); print(f\"{data.get('popularity', 0):,}\")" 2>/dev/null || echo "0")
GENRES=$(echo "$MEDIA_DATA" | python3 -c "import json, sys; data=json.load(sys.stdin); print(', '.join(data.get('genres', [])[:5]))" 2>/dev/null || echo "Unknown")
DESCRIPTION=$(echo "$MEDIA_DATA" | python3 -c "import json, sys; data=json.load(sys.stdin); print(data.get('description', 'No description available.'))" 2>/dev/null || echo "No description available.")
# Get start and end dates
START_DATE_OBJ=$(echo "$MEDIA_DATA" | python3 -c "import json, sys; data=json.load(sys.stdin); json.dump(data.get('startDate'), sys.stdout)" 2>/dev/null || echo "null")
END_DATE_OBJ=$(echo "$MEDIA_DATA" | python3 -c "import json, sys; data=json.load(sys.stdin); json.dump(data.get('endDate'), sys.stdout)" 2>/dev/null || echo "null")
# Get cover image URL
COVER_IMAGE=$(echo "$MEDIA_DATA" | python3 -c "import json, sys; data=json.load(sys.stdin); cover=data.get('coverImage',{}); print(cover.get('large', ''))" 2>/dev/null || echo "")
fi
# Format the dates
START_DATE=$(format_date "$START_DATE_OBJ")
END_DATE=$(format_date "$END_DATE_OBJ")
# Generate cache hash for this item (using selected item like regular preview)
CACHE_HASH=$(generate_sha256 "$SELECTED_ITEM")
# Try to show image if available
if [ "{PREVIEW_MODE}" = "full" ] || [ "{PREVIEW_MODE}" = "image" ]; then
image_file="{IMAGE_CACHE_PATH}{PATH_SEP}${CACHE_HASH}.png"
# If image not cached and we have a URL, try to download it quickly
if [ ! -f "$image_file" ] && [ -n "$COVER_IMAGE" ]; then
if command -v curl >/dev/null 2>&1; then
# Quick download with timeout
curl -s -m 3 -L "$COVER_IMAGE" -o "$image_file" 2>/dev/null || rm -f "$image_file" 2>/dev/null
fi
fi
if [ -f "$image_file" ]; then
fzf_preview "$image_file"
else
echo "🖼️ Loading image..."
fi
echo
fi
# Display text info if configured
if [ "{PREVIEW_MODE}" = "full" ] || [ "{PREVIEW_MODE}" = "text" ]; then
draw_rule
print_kv "Title" "$TITLE"
draw_rule
print_kv "Score" "$SCORE"
print_kv "Favourites" "$FAVOURITES"
print_kv "Popularity" "$POPULARITY"
print_kv "Status" "$STATUS"
draw_rule
print_kv "Episodes" "$EPISODES"
print_kv "Duration" "$DURATION"
print_kv "Format" "$FORMAT"
draw_rule
print_kv "Genres" "$GENRES"
print_kv "Start Date" "$START_DATE"
print_kv "End Date" "$END_DATE"
draw_rule
# Clean and display description
CLEAN_DESCRIPTION=$(clean_html "$DESCRIPTION")
echo "$CLEAN_DESCRIPTION" | fold -s -w "$WIDTH"
fi

View File

@@ -1,31 +0,0 @@
#!/bin/sh
#
# Episode Preview Info Script Template
# This script formats and displays episode information in the FZF preview pane.
# Some values are injected by python those with '{name}' syntax using .replace()
draw_rule
echo "{TITLE}" | fold -s -w "$WIDTH"
draw_rule
print_kv "Duration" "{DURATION}"
print_kv "Status" "{STATUS}"
draw_rule
print_kv "Total Episodes" "{EPISODES}"
print_kv "Next Episode" "{NEXT_EPISODE}"
draw_rule
print_kv "Progress" "{USER_PROGRESS}"
print_kv "List Status" "{USER_STATUS}"
draw_rule
print_kv "Start Date" "{START_DATE}"
print_kv "End Date" "{END_DATE}"
draw_rule

View File

@@ -1,54 +0,0 @@
#!/bin/sh
#
# Viu Preview Info Script Template
# This script formats and displays the textual information in the FZF preview pane.
# Some values are injected by python those with '{name}' syntax using .replace()
draw_rule
print_kv "Title" "{TITLE}"
draw_rule
# Emojis take up double the space
score_multiplier=1
if ! [ "{SCORE}" = "N/A" ]; then
score_multiplier=2
fi
print_kv "Score" "{SCORE}" $score_multiplier
print_kv "Favourites" "{FAVOURITES}"
print_kv "Popularity" "{POPULARITY}"
print_kv "Status" "{STATUS}"
draw_rule
print_kv "Episodes" "{EPISODES}"
print_kv "Next Episode" "{NEXT_EPISODE}"
print_kv "Duration" "{DURATION}"
draw_rule
print_kv "Genres" "{GENRES}"
print_kv "Format" "{FORMAT}"
draw_rule
print_kv "List Status" "{USER_STATUS}"
print_kv "Progress" "{USER_PROGRESS}"
draw_rule
print_kv "Start Date" "{START_DATE}"
print_kv "End Date" "{END_DATE}"
draw_rule
print_kv "Studios" "{STUDIOS}"
print_kv "Synonymns" "{SYNONYMNS}"
print_kv "Tags" "{TAGS}"
draw_rule
# Synopsis
echo "{SYNOPSIS}" | fold -s -w "$WIDTH"

View File

@@ -1,147 +0,0 @@
#!/bin/sh
#
# FZF Preview Script Template
#
# This script is a template. The placeholders in curly braces, like {NAME}
# are dynamically filled by python using .replace()
WIDTH=${FZF_PREVIEW_COLUMNS:-80} # Set a fallback width of 80
IMAGE_RENDERER="{IMAGE_RENDERER}"
generate_sha256() {
local input
# Check if input is passed as an argument or piped
if [ -n "$1" ]; then
input="$1"
else
input=$(cat)
fi
if command -v sha256sum &>/dev/null; then
echo -n "$input" | sha256sum | awk '{print $1}'
elif command -v shasum &>/dev/null; then
echo -n "$input" | shasum -a 256 | awk '{print $1}'
elif command -v sha256 &>/dev/null; then
echo -n "$input" | sha256 | awk '{print $1}'
elif command -v openssl &>/dev/null; then
echo -n "$input" | openssl dgst -sha256 | awk '{print $2}'
else
echo -n "$input" | base64 | tr '/+' '_-' | tr -d '\n'
fi
}
fzf_preview() {
file=$1
dim=${FZF_PREVIEW_COLUMNS}x${FZF_PREVIEW_LINES}
if [ "$dim" = x ]; then
dim=$(stty size </dev/tty | awk "{print \$2 \"x\" \$1}")
fi
if ! [ "$IMAGE_RENDERER" = "icat" ] && [ -z "$KITTY_WINDOW_ID" ] && [ "$((FZF_PREVIEW_TOP + FZF_PREVIEW_LINES))" -eq "$(stty size </dev/tty | awk "{print \$1}")" ]; then
dim=${FZF_PREVIEW_COLUMNS}x$((FZF_PREVIEW_LINES - 1))
fi
if [ "$IMAGE_RENDERER" = "icat" ] && [ -z "$GHOSTTY_BIN_DIR" ]; then
if command -v kitten >/dev/null 2>&1; then
kitten icat --clear --transfer-mode=memory --unicode-placeholder{SCALE_UP} --stdin=no --place="$dim@0x0" "$file" | sed "\$d" | sed "$(printf "\$s/\$/\033[m/")"
elif command -v icat >/dev/null 2>&1; then
icat --clear --transfer-mode=memory --unicode-placeholder{SCALE_UP} --stdin=no --place="$dim@0x0" "$file" | sed "\$d" | sed "$(printf "\$s/\$/\033[m/")"
else
kitty icat --clear --transfer-mode=memory --unicode-placeholder{SCALE_UP} --stdin=no --place="$dim@0x0" "$file" | sed "\$d" | sed "$(printf "\$s/\$/\033[m/")"
fi
elif [ -n "$GHOSTTY_BIN_DIR" ]; then
dim=$((FZF_PREVIEW_COLUMNS - 1))x${FZF_PREVIEW_LINES}
if command -v kitten >/dev/null 2>&1; then
kitten icat --clear --transfer-mode=memory --unicode-placeholder{SCALE_UP} --stdin=no --place="$dim@0x0" "$file" | sed "\$d" | sed "$(printf "\$s/\$/\033[m/")"
elif command -v icat >/dev/null 2>&1; then
icat --clear --transfer-mode=memory --unicode-placeholder{SCALE_UP} --stdin=no --place="$dim@0x0" "$file" | sed "\$d" | sed "$(printf "\$s/\$/\033[m/")"
else
chafa -s "$dim" "$file"
fi
elif command -v chafa >/dev/null 2>&1; then
case "$PLATFORM" in
android) chafa -s "$dim" "$file" ;;
windows) chafa -f sixel -s "$dim" "$file" ;;
*) chafa -s "$dim" "$file" ;;
esac
echo
elif command -v imgcat >/dev/null; then
imgcat -W "${dim%%x*}" -H "${dim##*x}" "$file"
else
echo please install a terminal image viewer
echo either icat for kitty terminal and wezterm or imgcat or chafa
fi
}
# --- Helper function for printing a key-value pair, aligning the value to the right ---
print_kv() {
local key="$1"
local value="$2"
local key_len=${#key}
local value_len=${#value}
local multiplier="${3:-1}"
# Correctly calculate padding by accounting for the key, the ": ", and the value.
local padding_len=$((WIDTH - key_len - 2 - value_len * multiplier))
# If the text is too long to fit, just add a single space for separation.
if [ "$padding_len" -lt 1 ]; then
padding_len=1
value=$(echo "$value"| fold -s -w "$((WIDTH - key_len - 3))")
printf "{C_KEY}%s:{RESET}%*s%s\\n" "$key" "$padding_len" "" " $value"
else
printf "{C_KEY}%s:{RESET}%*s%s\\n" "$key" "$padding_len" "" " $value"
fi
}
# --- Draw a rule across the screen ---
# TODO: figure out why this method does not work in fzf
draw_rule() {
local rule
# Generate the line of '─' characters, removing the trailing newline `tr` adds.
rule=$(printf '%*s' "$WIDTH" | tr ' ' '─' | tr -d '\n')
# Print the rule with colors and a single, clean newline.
printf "{C_RULE}%s{RESET}\\n" "$rule"
}
draw_rule(){
ll=2
while [ $ll -le $FZF_PREVIEW_COLUMNS ];do
echo -n -e "{C_RULE}─{RESET}"
((ll++))
done
echo
}
# Generate the same cache key that the Python worker uses
# {PREFIX} is used only on episode previews to make sure they are unique
title={}
hash=$(generate_sha256 "{PREFIX}$title")
#
# --- Display image if configured and the cached file exists ---
#
if [ "{PREVIEW_MODE}" = "full" ] || [ "{PREVIEW_MODE}" = "image" ]; then
image_file="{IMAGE_CACHE_PATH}{PATH_SEP}$hash.png"
if [ -f "$image_file" ]; then
fzf_preview "$image_file"
else
echo "🖼️ Loading image..."
fi
echo # Add a newline for spacing
fi
# Display text info if configured and the cached file exists
if [ "{PREVIEW_MODE}" = "full" ] || [ "{PREVIEW_MODE}" = "text" ]; then
info_file="{INFO_CACHE_PATH}{PATH_SEP}$hash"
if [ -f "$info_file" ]; then
source "$info_file"
else
echo "📝 Loading details..."
fi
fi

View File

@@ -1,19 +0,0 @@
#!/bin/sh
#
# Viu Review Info Script Template
# This script formats and displays review details in the FZF preview pane.
# Python injects the actual data values into the placeholders.
draw_rule
print_kv "Review By" "{REVIEWER_NAME}"
draw_rule
print_kv "Summary" "{REVIEW_SUMMARY}"
draw_rule
echo "{REVIEW_BODY}" | fold -s -w "$WIDTH"
draw_rule

View File

@@ -1,75 +0,0 @@
#!/bin/sh
#
# FZF Preview Script Template
#
# This script is a template. The placeholders in curly braces, like {NAME}
# are dynamically filled by python using .replace()
WIDTH=${FZF_PREVIEW_COLUMNS:-80} # Set a fallback width of 80
IMAGE_RENDERER="{IMAGE_RENDERER}"
generate_sha256() {
local input
# Check if input is passed as an argument or piped
if [ -n "$1" ]; then
input="$1"
else
input=$(cat)
fi
if command -v sha256sum &>/dev/null; then
echo -n "$input" | sha256sum | awk '{print $1}'
elif command -v shasum &>/dev/null; then
echo -n "$input" | shasum -a 256 | awk '{print $1}'
elif command -v sha256 &>/dev/null; then
echo -n "$input" | sha256 | awk '{print $1}'
elif command -v openssl &>/dev/null; then
echo -n "$input" | openssl dgst -sha256 | awk '{print $2}'
else
echo -n "$input" | base64 | tr '/+' '_-' | tr -d '\n'
fi
}
print_kv() {
local key="$1"
local value="$2"
local key_len=${#key}
local value_len=${#value}
local multiplier="${3:-1}"
# Correctly calculate padding by accounting for the key, the ": ", and the value.
local padding_len=$((WIDTH - key_len - 2 - value_len * multiplier))
# If the text is too long to fit, just add a single space for separation.
if [ "$padding_len" -lt 1 ]; then
padding_len=1
value=$(echo $value| fold -s -w "$((WIDTH - key_len - 3))")
printf "{C_KEY}%s:{RESET}%*s%s\\n" "$key" "$padding_len" "" " $value"
else
printf "{C_KEY}%s:{RESET}%*s%s\\n" "$key" "$padding_len" "" " $value"
fi
}
draw_rule(){
ll=2
while [ $ll -le $FZF_PREVIEW_COLUMNS ];do
echo -n -e "{C_RULE}─{RESET}"
((ll++))
done
echo
}
title={}
hash=$(generate_sha256 "$title")
if [ "{PREVIEW_MODE}" = "full" ] || [ "{PREVIEW_MODE}" = "text" ]; then
info_file="{INFO_CACHE_DIR}{PATH_SEP}$hash"
if [ -f "$info_file" ]; then
source "$info_file"
else
echo "📝 Loading details..."
fi
fi

View File

@@ -1,118 +0,0 @@
#!/bin/bash
#
# FZF Dynamic Search Script Template
#
# This script is a template for dynamic search functionality in fzf.
# The placeholders in curly braces, like {QUERY} are dynamically filled by Python using .replace()
# Configuration variables (injected by Python)
GRAPHQL_ENDPOINT="{GRAPHQL_ENDPOINT}"
CACHE_DIR="{CACHE_DIR}"
SEARCH_RESULTS_FILE="{SEARCH_RESULTS_FILE}"
AUTH_HEADER="{AUTH_HEADER}"
# Get the current query from fzf
QUERY="{{q}}"
# If query is empty, exit with empty results
if [ -z "$QUERY" ]; then
echo ""
exit 0
fi
# Create GraphQL variables
VARIABLES=$(cat <<EOF
{
"query": "$QUERY",
"type": "ANIME",
"per_page": 50,
"genre_not_in": ["Hentai"]
}
EOF
)
# The GraphQL query is injected here as a properly escaped string
GRAPHQL_QUERY='{GRAPHQL_QUERY}'
# Create the GraphQL request payload
PAYLOAD=$(cat <<EOF
{
"query": $GRAPHQL_QUERY,
"variables": $VARIABLES
}
EOF
)
# Make the GraphQL request and save raw results
if [ -n "$AUTH_HEADER" ]; then
RESPONSE=$(curl -s -X POST \
-H "Content-Type: application/json" \
-H "Authorization: $AUTH_HEADER" \
-d "$PAYLOAD" \
"$GRAPHQL_ENDPOINT")
else
RESPONSE=$(curl -s -X POST \
-H "Content-Type: application/json" \
-d "$PAYLOAD" \
"$GRAPHQL_ENDPOINT")
fi
# Check if the request was successful
if [ $? -ne 0 ] || [ -z "$RESPONSE" ]; then
echo "❌ Search failed"
exit 1
fi
# Save the raw response for later processing
echo "$RESPONSE" > "$SEARCH_RESULTS_FILE"
# Parse and display results
if command -v jq >/dev/null 2>&1; then
# Use jq for faster and more reliable JSON parsing
echo "$RESPONSE" | jq -r '
if .errors then
"❌ Search error: " + (.errors | tostring)
elif (.data.Page.media // []) | length == 0 then
"❌ No results found"
else
.data.Page.media[] | (.title.english // .title.romaji // .title.native // "Unknown")
end
' 2>/dev/null || echo "❌ Parse error"
else
# Fallback to Python for JSON parsing
echo "$RESPONSE" | python3 -c "
import json
import sys
try:
data = json.load(sys.stdin)
if 'errors' in data:
print('❌ Search error: ' + str(data['errors']))
sys.exit(1)
if 'data' not in data or 'Page' not in data['data'] or 'media' not in data['data']['Page']:
print('❌ No results found')
sys.exit(0)
media_list = data['data']['Page']['media']
if not media_list:
print('❌ No results found')
sys.exit(0)
for media in media_list:
title = media.get('title', {})
english_title = title.get('english') or title.get('romaji') or title.get('native', 'Unknown')
year = media.get('startDate', {}).get('year', 'Unknown') if media.get('startDate') else 'Unknown'
status = media.get('status', 'Unknown')
genres = ', '.join(media.get('genres', [])[:3]) or 'Unknown'
# Format: Title (Year) [Status] - Genres
print(f'{english_title} ({year}) [{status}] - {genres}')
except Exception as e:
print(f'❌ Parse error: {str(e)}')
sys.exit(1)
"
fi

View File

@@ -244,12 +244,13 @@ def fzf_image_preview(file_path: str):
def fzf_text_info_render():
"""Renders the text-based info via the cached python script."""
import shutil
# Get terminal dimensions from FZF environment or fallback
cols, lines = get_terminal_dimensions()
# Print simple separator line
width = shutil.get_terminal_size((80, 24)).columns
# Print simple separator line with proper width
r, g, b = map(int, SEPARATOR_COLOR.split(","))
print(f"\x1b[38;2;{r};{g};{b}m" + "" * width + "\x1b[0m")
separator = f"\x1b[38;2;{r};{g};{b}m" + ("" * cols) + "\x1b[0m"
print(separator, flush=True)
if PREVIEW_MODE == "text" or PREVIEW_MODE == "full":
preview_info_path = INFO_CACHE_DIR / f"{hash_id}.py"

View File

@@ -1,12 +1,17 @@
import sys
import shutil
from _ansi_utils import print_rule, print_table_row, strip_markdown, wrap_text
from _ansi_utils import (
print_rule,
print_table_row,
strip_markdown,
wrap_text,
get_terminal_width,
)
HEADER_COLOR = sys.argv[1]
SEPARATOR_COLOR = sys.argv[2]
# Get terminal dimensions
term_width = shutil.get_terminal_size((80, 24)).columns
term_width = get_terminal_width()
# Print title centered
print("{REVIEWER_NAME}".center(term_width))

View File

@@ -0,0 +1,145 @@
#!/usr/bin/env python3
#
# FZF Dynamic Search Script Template
#
# This script is a template for dynamic search functionality in fzf.
# The placeholders in curly braces, like {GRAPHQL_ENDPOINT} are dynamically
# filled by Python using .replace() during runtime.
import json
import sys
from pathlib import Path
from urllib import request
from urllib.error import URLError
# --- Template Variables (Injected by Python) ---
GRAPHQL_ENDPOINT = "{GRAPHQL_ENDPOINT}"
SEARCH_RESULTS_FILE = Path("{SEARCH_RESULTS_FILE}")
AUTH_HEADER = "{AUTH_HEADER}"
# The GraphQL query is injected as a properly escaped JSON string
GRAPHQL_QUERY = "{GRAPHQL_QUERY}"
# --- Get Query from fzf ---
# fzf passes the current query as the first argument when using --bind change:reload
QUERY = sys.argv[1] if len(sys.argv) > 1 else ""
# If query is empty, exit with empty results
if not QUERY.strip():
print("")
sys.exit(0)
def make_graphql_request(
endpoint: str, query: str, variables: dict, auth_token: str = ""
) -> dict | None:
"""
Make a GraphQL request to the specified endpoint.
Args:
endpoint: GraphQL API endpoint URL
query: GraphQL query string
variables: Query variables as a dictionary
auth_token: Optional authorization token (Bearer token)
Returns:
Response JSON as a dictionary, or None if request fails
"""
payload = {"query": query, "variables": variables}
headers = {"Content-Type": "application/json", "User-Agent": "viu/1.0"}
if auth_token:
headers["Authorization"] = auth_token
try:
req = request.Request(
endpoint,
data=json.dumps(payload).encode("utf-8"),
headers=headers,
method="POST",
)
with request.urlopen(req, timeout=10) as response:
return json.loads(response.read().decode("utf-8"))
except (URLError, json.JSONDecodeError, Exception) as e:
print(f"❌ Request failed: {e}", file=sys.stderr)
return None
def extract_title(media_item: dict) -> str:
"""
Extract the best available title from a media item.
Args:
media_item: Media object from GraphQL response
Returns:
Title string (english > romaji > native > "Unknown")
"""
title_obj = media_item.get("title", {})
return (
title_obj.get("english")
or title_obj.get("romaji")
or title_obj.get("native")
or "Unknown"
)
def main():
# Ensure parent directory exists
SEARCH_RESULTS_FILE.parent.mkdir(parents=True, exist_ok=True)
# Create GraphQL variables
variables = {
"query": QUERY,
"type": "ANIME",
"per_page": 50,
"genre_not_in": ["Hentai"],
}
# Make the GraphQL request
response = make_graphql_request(
GRAPHQL_ENDPOINT, GRAPHQL_QUERY, variables, AUTH_HEADER
)
if response is None:
print("❌ Search failed")
sys.exit(1)
# Save the raw response for later processing by dynamic_search.py
try:
with open(SEARCH_RESULTS_FILE, "w", encoding="utf-8") as f:
json.dump(response, f, ensure_ascii=False, indent=2)
except IOError as e:
print(f"❌ Failed to save results: {e}", file=sys.stderr)
sys.exit(1)
# Parse and display results
if "errors" in response:
print(f"❌ Search error: {response['errors']}")
sys.exit(1)
# Navigate the response structure
data = response.get("data", {})
page = data.get("Page", {})
media_list = page.get("media", [])
if not media_list:
print("❌ No results found")
sys.exit(0)
# Output titles for fzf (one per line)
for media in media_list:
title = extract_title(media)
print(title)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
sys.exit(0)
except Exception as e:
print(f"❌ Unexpected error: {e}", file=sys.stderr)
sys.exit(1)

View File

@@ -1,3 +1,9 @@
from .cli import cli as run_cli
import sys
import os
if sys.platform.startswith("win"):
os.environ.setdefault("PYTHONUTF8", "1")
__all__ = ["run_cli"]

View File

@@ -1,4 +1,5 @@
import logging
import shutil
import sys
from typing import TYPE_CHECKING
@@ -109,12 +110,103 @@ def cli(ctx: click.Context, **options: "Unpack[Options]"):
)
ctx.obj = config
if config.general.welcome_screen:
import time
from ..core.constants import APP_CACHE_DIR, USER_NAME, SUPPORT_PROJECT_URL
last_welcomed_at_file = APP_CACHE_DIR / ".last_welcome"
should_welcome = False
if last_welcomed_at_file.exists():
try:
last_welcomed_at = float(
last_welcomed_at_file.read_text(encoding="utf-8")
)
# runs once a month
if (time.time() - last_welcomed_at) > 30 * 24 * 3600:
should_welcome = True
except Exception as e:
logger.warning(f"Failed to read welcome screen timestamp: {e}")
else:
should_welcome = True
if should_welcome:
last_welcomed_at_file.write_text(str(time.time()), encoding="utf-8")
from rich.prompt import Confirm
if Confirm.ask(f"""\
[green]How are you, {USER_NAME} 🙂?
If you enjoy the project and would like to support it, you can buy me a coffee at {SUPPORT_PROJECT_URL}.
Would you like to open the support page? Select yes to continue — otherwise, enjoy your terminal-anime browsing experience 😁.[/]
You can disable this message by turning off the welcome_screen option in the config. It only appears once a month.
"""):
from webbrowser import open
open(SUPPORT_PROJECT_URL)
if config.general.show_new_release:
import time
from ..core.constants import APP_CACHE_DIR
last_release_file = APP_CACHE_DIR / ".last_release"
should_print_release_notes = False
if last_release_file.exists():
last_release = last_release_file.read_text(encoding="utf-8")
current_version = list(map(int, __version__.replace("v", "").split(".")))
last_saved_version = list(
map(int, last_release.replace("v", "").split("."))
)
if (
(current_version[0] > last_saved_version[0])
or (
current_version[1] > last_saved_version[1]
and current_version[0] == last_saved_version[0]
)
or (
current_version[2] > last_saved_version[2]
and current_version[0] == last_saved_version[0]
and current_version[1] == last_saved_version[1]
)
):
should_print_release_notes = True
else:
should_print_release_notes = True
if should_print_release_notes:
last_release_file.write_text(__version__, encoding="utf-8")
from .service.feedback import FeedbackService
from .utils.update import check_for_updates, print_release_json, update_app
from rich.prompt import Confirm
feedback = FeedbackService(config)
feedback.info("Getting release notes...")
is_latest, release_json = check_for_updates()
if Confirm.ask(
"Would you also like to update your config with the latest options and config notes"
):
import subprocess
_cli_cmd_name="viu" if not shutil.which("viu-media") else "viu-media"
cmd = [_cli_cmd_name, "config", "--update"]
print(f"running '{' '.join(cmd)}'...")
subprocess.run(cmd)
if is_latest:
print_release_json(release_json)
else:
print_release_json(release_json)
print("It seems theres another update waiting for you as well 😁")
click.pause("Press Any Key To Proceed...")
if config.general.check_for_updates:
import time
from ..core.constants import APP_CACHE_DIR
last_updated_at_file = APP_CACHE_DIR / "last_update"
last_updated_at_file = APP_CACHE_DIR / ".last_update"
should_check_for_update = False
if last_updated_at_file.exists():
try:

View File

@@ -2,6 +2,7 @@ from typing import TYPE_CHECKING
import click
from ...core.config import AppConfig
from ...core.exceptions import ViuError
from ..utils.completion import anime_titles_shell_complete
@@ -49,6 +50,7 @@ def search(config: AppConfig, **options: "Unpack[Options]"):
SearchParams,
)
from ...libs.provider.anime.provider import create_provider
from viu_media.core.utils.normalizer import normalize_title
from ...libs.selectors.selector import create_selector
if not options["anime_title"]:
@@ -67,7 +69,10 @@ def search(config: AppConfig, **options: "Unpack[Options]"):
with feedback.progress(f"Fetching anime search results for {anime_title}"):
search_results = provider.search(
SearchParams(
query=anime_title, translation_type=config.stream.translation_type
query=normalize_title(
anime_title, config.general.provider.value, True
).lower(),
translation_type=config.stream.translation_type,
)
)
if not search_results:

View File

@@ -10,7 +10,13 @@ from pydantic.fields import ComputedFieldInfo, FieldInfo
from pydantic_core import PydanticUndefined
from ...core.config import AppConfig
from ...core.constants import APP_ASCII_ART, CLI_NAME, DISCORD_INVITE, REPO_HOME
from ...core.constants import (
APP_ASCII_ART,
CLI_NAME,
DISCORD_INVITE,
REPO_HOME,
SUPPORT_PROJECT_URL,
)
# The header for the config file.
config_asci = "\n".join(
@@ -38,6 +44,9 @@ CONFIG_FOOTER = f"""
# Also join the discord server
# where the anime tech community lives :)
# {DISCORD_INVITE}
# If you like the project and are able to support it please consider buying me a coffee at {SUPPORT_PROJECT_URL}.
# If you would like to connect with me join the discord server from there you can dm for hackathons, or even to tell me a joke 😂
# Otherwise enjoy your terminal anime browser experience 😁
#
# ==============================================================================
""".lstrip()

View File

@@ -1,5 +1,7 @@
import json
import logging
import sys
from pathlib import Path
from .....core.constants import APP_CACHE_DIR, SCRIPTS_DIR
from .....libs.media_api.params import MediaSearchParams
@@ -8,12 +10,10 @@ from ...state import InternalDirective, MediaApiState, MenuName, State
logger = logging.getLogger(__name__)
SEARCH_CACHE_DIR = APP_CACHE_DIR / "search"
SEARCH_CACHE_DIR = APP_CACHE_DIR / "previews" / "dynamic-search"
SEARCH_RESULTS_FILE = SEARCH_CACHE_DIR / "current_search_results.json"
FZF_SCRIPTS_DIR = SCRIPTS_DIR / "fzf"
SEARCH_TEMPLATE_SCRIPT = (FZF_SCRIPTS_DIR / "search.template.sh").read_text(
encoding="utf-8"
)
SEARCH_TEMPLATE_SCRIPT = (FZF_SCRIPTS_DIR / "search.py").read_text(encoding="utf-8")
@session.menu
@@ -29,8 +29,8 @@ def dynamic_search(ctx: Context, state: State) -> State | InternalDirective:
from .....libs.media_api.anilist import gql
search_query = gql.SEARCH_MEDIA.read_text(encoding="utf-8")
# Properly escape the GraphQL query for JSON
search_query_escaped = json.dumps(search_query)
# Escape the GraphQL query as a JSON string literal for Python script
search_query_json = json.dumps(search_query).replace('"', "")
# Prepare the search script
auth_header = ""
@@ -42,15 +42,24 @@ def dynamic_search(ctx: Context, state: State) -> State | InternalDirective:
replacements = {
"GRAPHQL_ENDPOINT": "https://graphql.anilist.co",
"GRAPHQL_QUERY": search_query_escaped,
"CACHE_DIR": str(SEARCH_CACHE_DIR),
"SEARCH_RESULTS_FILE": str(SEARCH_RESULTS_FILE),
"GRAPHQL_QUERY": search_query_json,
"SEARCH_RESULTS_FILE": SEARCH_RESULTS_FILE.as_posix(),
"AUTH_HEADER": auth_header,
}
for key, value in replacements.items():
search_command = search_command.replace(f"{{{key}}}", str(value))
# Write the filled template to a cache file
search_script_file = SEARCH_CACHE_DIR / "search.py"
search_script_file.write_text(search_command, encoding="utf-8")
# Make the search script executable by calling it with python3
# fzf will pass the query as {q} which becomes the first argument
search_command_final = (
f"{Path(sys.executable).as_posix()} {search_script_file.as_posix()} {{q}}"
)
try:
# Prepare preview functionality
preview_command = None
@@ -62,13 +71,13 @@ def dynamic_search(ctx: Context, state: State) -> State | InternalDirective:
choice = ctx.selector.search(
prompt="Search Anime",
search_command=search_command,
search_command=search_command_final,
preview=preview_command,
)
else:
choice = ctx.selector.search(
prompt="Search Anime",
search_command=search_command,
search_command=search_command_final,
)
except NotImplementedError:
feedback.error("Dynamic search is not supported by your current selector")

View File

@@ -28,7 +28,9 @@ def provider_search(ctx: Context, state: State) -> State | InternalDirective:
provider_search_results = provider.search(
SearchParams(
query=normalize_title(media_title, config.general.provider.value, True),
query=normalize_title(
media_title, config.general.provider.value, True
).lower(),
translation_type=config.stream.translation_type,
)
)

View File

@@ -1,4 +1,5 @@
import logging
from pathlib import Path
import re
from hashlib import sha256
import sys
@@ -9,7 +10,7 @@ import httpx
from viu_media.core.utils import formatter
from ...core.config import AppConfig
from ...core.constants import APP_CACHE_DIR, PLATFORM, SCRIPTS_DIR
from ...core.constants import APP_CACHE_DIR, SCRIPTS_DIR
from ...core.utils.file import AtomicWriter
from ...libs.media_api.types import (
AiringScheduleResult,
@@ -17,7 +18,6 @@ from ...libs.media_api.types import (
MediaItem,
MediaReview,
)
from . import ansi
from .preview_workers import PreviewWorkerManager
@@ -127,7 +127,9 @@ INFO_CACHE_DIR = PREVIEWS_CACHE_DIR / "info"
FZF_SCRIPTS_DIR = SCRIPTS_DIR / "fzf"
TEMPLATE_PREVIEW_SCRIPT = (FZF_SCRIPTS_DIR / "preview.py").read_text(encoding="utf-8")
DYNAMIC_PREVIEW_SCRIPT = ""
DYNAMIC_PREVIEW_SCRIPT = (FZF_SCRIPTS_DIR / "dynamic_preview.py").read_text(
encoding="utf-8"
)
EPISODE_PATTERN = re.compile(r"^Episode\s+(\d+)\s-\s.*")
@@ -307,8 +309,8 @@ def get_anime_preview(
# Format the template with the dynamic values
replacements = {
"PREVIEW_MODE": config.general.preview,
"IMAGE_CACHE_DIR": str(IMAGES_CACHE_DIR),
"INFO_CACHE_DIR": str(INFO_CACHE_DIR),
"IMAGE_CACHE_DIR": IMAGES_CACHE_DIR.as_posix(),
"INFO_CACHE_DIR": INFO_CACHE_DIR.as_posix(),
"IMAGE_RENDERER": config.general.image_renderer,
# Color codes
"HEADER_COLOR": ",".join(HEADER_COLOR),
@@ -324,7 +326,9 @@ def get_anime_preview(
preview_file = PREVIEWS_CACHE_DIR / "search-result-preview-script.py"
preview_file.write_text(preview_script, encoding="utf-8")
preview_script_final = f"{sys.executable} {preview_file} {{}}"
preview_script_final = (
f"{Path(sys.executable).as_posix()} {preview_file.as_posix()} {{}}"
)
return preview_script_final
@@ -365,8 +369,8 @@ def get_episode_preview(
# Format the template with the dynamic values
replacements = {
"PREVIEW_MODE": config.general.preview,
"IMAGE_CACHE_DIR": str(IMAGES_CACHE_DIR),
"INFO_CACHE_DIR": str(INFO_CACHE_DIR),
"IMAGE_CACHE_DIR": IMAGES_CACHE_DIR.as_posix(),
"INFO_CACHE_DIR": INFO_CACHE_DIR.as_posix(),
"IMAGE_RENDERER": config.general.image_renderer,
# Color codes
"HEADER_COLOR": ",".join(HEADER_COLOR),
@@ -382,7 +386,9 @@ def get_episode_preview(
preview_file = PREVIEWS_CACHE_DIR / "episode-preview-script.py"
preview_file.write_text(preview_script, encoding="utf-8")
preview_script_final = f"{sys.executable} {preview_file} {{}}"
preview_script_final = (
f"{Path(sys.executable).as_posix()} {preview_file.as_posix()} {{}}"
)
return preview_script_final
@@ -411,8 +417,8 @@ def get_character_preview(choice_map: Dict[str, Character], config: AppConfig) -
replacements = {
"PREVIEW_MODE": config.general.preview,
"IMAGE_CACHE_DIR": str(IMAGES_CACHE_DIR),
"INFO_CACHE_DIR": str(INFO_CACHE_DIR),
"IMAGE_CACHE_DIR": IMAGES_CACHE_DIR.as_posix(),
"INFO_CACHE_DIR": INFO_CACHE_DIR.as_posix(),
"IMAGE_RENDERER": config.general.image_renderer,
# Color codes
"HEADER_COLOR": ",".join(HEADER_COLOR),
@@ -428,7 +434,9 @@ def get_character_preview(choice_map: Dict[str, Character], config: AppConfig) -
preview_file = PREVIEWS_CACHE_DIR / "character-preview-script.py"
preview_file.write_text(preview_script, encoding="utf-8")
preview_script_final = f"{sys.executable} {preview_file} {{}}"
preview_script_final = (
f"{Path(sys.executable).as_posix()} {preview_file.as_posix()} {{}}"
)
return preview_script_final
@@ -457,8 +465,8 @@ def get_review_preview(choice_map: Dict[str, MediaReview], config: AppConfig) ->
replacements = {
"PREVIEW_MODE": config.general.preview,
"IMAGE_CACHE_DIR": str(IMAGES_CACHE_DIR),
"INFO_CACHE_DIR": str(INFO_CACHE_DIR),
"IMAGE_CACHE_DIR": IMAGES_CACHE_DIR.as_posix(),
"INFO_CACHE_DIR": INFO_CACHE_DIR.as_posix(),
"IMAGE_RENDERER": config.general.image_renderer,
# Color codes
"HEADER_COLOR": ",".join(HEADER_COLOR),
@@ -474,7 +482,9 @@ def get_review_preview(choice_map: Dict[str, MediaReview], config: AppConfig) ->
preview_file = PREVIEWS_CACHE_DIR / "review-preview-script.py"
preview_file.write_text(preview_script, encoding="utf-8")
preview_script_final = f"{sys.executable} {preview_file} {{}}"
preview_script_final = (
f"{Path(sys.executable).as_posix()} {preview_file.as_posix()} {{}}"
)
return preview_script_final
@@ -505,8 +515,8 @@ def get_airing_schedule_preview(
replacements = {
"PREVIEW_MODE": config.general.preview,
"IMAGE_CACHE_DIR": str(IMAGES_CACHE_DIR),
"INFO_CACHE_DIR": str(INFO_CACHE_DIR),
"IMAGE_CACHE_DIR": IMAGES_CACHE_DIR.as_posix(),
"INFO_CACHE_DIR": INFO_CACHE_DIR.as_posix(),
"IMAGE_RENDERER": config.general.image_renderer,
# Color codes
"HEADER_COLOR": ",".join(HEADER_COLOR),
@@ -522,7 +532,7 @@ def get_airing_schedule_preview(
preview_file = PREVIEWS_CACHE_DIR / "airing-schedule-preview-script.py"
preview_file.write_text(preview_script, encoding="utf-8")
preview_script_final = f"{sys.executable} {preview_file} {{}}"
# preview_script_final = f"{sys.executable} {preview_file} {{}}"
# NOTE: disabled cause not very useful
return ""
@@ -534,18 +544,32 @@ def get_dynamic_anime_preview(config: AppConfig) -> str:
This is different from regular anime preview because:
1. We don't have media items upfront
2. The preview needs to work with search results as they come in
3. Preview is handled entirely in shell by parsing JSON results
3. Preview script dynamically loads data from search results JSON
Args:
config: Application configuration
Returns:
Preview script content for fzf dynamic search
Preview script command for fzf dynamic search
"""
# Ensure cache directories exist
IMAGES_CACHE_DIR.mkdir(parents=True, exist_ok=True)
INFO_CACHE_DIR.mkdir(parents=True, exist_ok=True)
_ensure_ansi_utils_in_cache()
search_cache_dir = APP_CACHE_DIR / "previews" / "dynamic-search"
search_cache_dir.mkdir(parents=True, exist_ok=True)
source = FZF_SCRIPTS_DIR / "_ansi_utils.py"
dest = search_cache_dir / "_ansi_utils.py"
if source.exists() and (
not dest.exists() or source.stat().st_mtime > dest.stat().st_mtime
):
try:
import shutil
shutil.copy2(source, dest)
logger.debug(f"Copied _ansi_utils.py to {INFO_CACHE_DIR}")
except Exception as e:
logger.warning(f"Failed to copy _ansi_utils.py to cache: {e}")
HEADER_COLOR = config.fzf.preview_header_color.split(",")
SEPARATOR_COLOR = config.fzf.preview_separator_color.split(",")
@@ -553,33 +577,31 @@ def get_dynamic_anime_preview(config: AppConfig) -> str:
# Use the dynamic preview script template
preview_script = DYNAMIC_PREVIEW_SCRIPT
search_cache_dir = APP_CACHE_DIR / "search"
search_results_file = search_cache_dir / "current_search_results.json"
# Prepare values to inject into the template
path_sep = "\\" if PLATFORM == "win32" else "/"
# Format the template with the dynamic values
# Prepare replacements for the template
replacements = {
"SEARCH_RESULTS_FILE": search_results_file.as_posix(),
"IMAGE_CACHE_DIR": IMAGES_CACHE_DIR.as_posix(),
"PREVIEW_MODE": config.general.preview,
"IMAGE_CACHE_PATH": str(IMAGES_CACHE_DIR),
"INFO_CACHE_PATH": str(INFO_CACHE_DIR),
"PATH_SEP": path_sep,
"IMAGE_RENDERER": config.general.image_renderer,
"SEARCH_RESULTS_FILE": str(search_results_file),
# Color codes
"C_TITLE": ansi.get_true_fg(HEADER_COLOR, bold=True),
"C_KEY": ansi.get_true_fg(HEADER_COLOR, bold=True),
"C_VALUE": ansi.get_true_fg(HEADER_COLOR, bold=True),
"C_RULE": ansi.get_true_fg(SEPARATOR_COLOR, bold=True),
"RESET": ansi.RESET,
"SCALE_UP": " --scale-up" if config.general.preview_scale_up else "",
"HEADER_COLOR": ",".join(HEADER_COLOR),
"SEPARATOR_COLOR": ",".join(SEPARATOR_COLOR),
"SCALE_UP": str(config.general.preview_scale_up),
}
for key, value in replacements.items():
preview_script = preview_script.replace(f"{{{key}}}", value)
return preview_script
# Write the preview script to cache
preview_file = search_cache_dir / "dynamic-search-preview-script.py"
preview_file.write_text(preview_script, encoding="utf-8")
# Return the command to execute the preview script
preview_script_final = (
f"{Path(sys.executable).as_posix()} {preview_file.as_posix()} {{}}"
)
return preview_script_final
def _get_preview_manager() -> PreviewWorkerManager:

View File

@@ -2,6 +2,7 @@ from ..constants import APP_DATA_DIR, DEFAULTS_DIR, PLATFORM, USER_VIDEOS_DIR
from ..utils import detect
# GeneralConfig
GENERAL_WELCOME_SCREEN = True
GENERAL_PYGMENT_STYLE = "github-dark"
GENERAL_PREFERRED_SPINNER = "smiley"
GENERAL_API_CLIENT = "anilist"
@@ -32,6 +33,7 @@ def GENERAL_IMAGE_RENDERER():
GENERAL_MANGA_VIEWER = "feh"
GENERAL_CHECK_FOR_UPDATES = True
GENERAL_SHOW_NEW_RELEASE = True
GENERAL_UPDATE_CHECK_INTERVAL = 12
GENERAL_CACHE_REQUESTS = True
GENERAL_MAX_CACHE_LIFETIME = "03:00:00"

View File

@@ -1,5 +1,6 @@
# GeneralConfig
GENERAL_WELCOME_SCREEN = "Whether to enable the welcome screen, that runs once per day"
GENERAL_PYGMENT_STYLE = "The pygment style to use"
GENERAL_PREFERRED_SPINNER = "The spinner to use"
GENERAL_API_CLIENT = "The media database API to use (e.g., 'anilist', 'jikan')."
@@ -24,6 +25,9 @@ GENERAL_IMAGE_RENDERER = (
)
GENERAL_MANGA_VIEWER = "The external application to use for viewing manga pages."
GENERAL_CHECK_FOR_UPDATES = "Automatically check for new versions of Viu on startup."
GENERAL_SHOW_NEW_RELEASE = (
"Whether to show release notes after every update when running the new version"
)
GENERAL_UPDATE_CHECK_INTERVAL = "The interval in hours to check for updates"
GENERAL_CACHE_REQUESTS = (
"Enable caching of network requests to speed up subsequent operations."

View File

@@ -156,6 +156,9 @@ class GeneralConfig(BaseModel):
default=defaults.GENERAL_API_CLIENT,
description=desc.GENERAL_API_CLIENT,
)
welcome_screen: bool = Field(
default=defaults.GENERAL_WELCOME_SCREEN, description=desc.GENERAL_WELCOME_SCREEN
)
provider: ProviderName = Field(
default=ProviderName.ALLANIME,
description=desc.GENERAL_PROVIDER,
@@ -192,6 +195,10 @@ class GeneralConfig(BaseModel):
default=defaults.GENERAL_CHECK_FOR_UPDATES,
description=desc.GENERAL_CHECK_FOR_UPDATES,
)
show_new_release: bool = Field(
default=defaults.GENERAL_SHOW_NEW_RELEASE,
description=desc.GENERAL_SHOW_NEW_RELEASE,
)
update_check_interval: float = Field(
default=defaults.GENERAL_UPDATE_CHECK_INTERVAL,
description=desc.GENERAL_UPDATE_CHECK_INTERVAL,

View File

@@ -9,7 +9,8 @@ CLI_NAME_LOWER = "viu"
PROJECT_NAME = "viu-media"
APP_NAME = os.environ.get(f"{CLI_NAME}_APP_NAME", CLI_NAME_LOWER)
USER_NAME = os.environ.get("USERNAME", "User")
USER_NAME = os.environ.get("USERNAME", os.environ.get("USER", "User"))
__version__ = metadata.version("viu_media")
@@ -85,3 +86,4 @@ USER_VIDEOS_DIR.mkdir(parents=True, exist_ok=True)
USER_CONFIG = APP_DATA_DIR / "config.toml"
LOG_FILE = LOG_FOLDER / "app.log"
SUPPORT_PROJECT_URL = "https://buymeacoffee.com/benexl"

View File

@@ -9,6 +9,7 @@ class DownloadParams:
episode_title: str
silent: bool
progress_hooks: list[Callable] = field(default_factory=list)
logger: object | None = None
vid_format: str = "best"
force_unknown_ext: bool = False
verbose: bool = False

View File

@@ -91,6 +91,7 @@ class YtDLPDownloader(BaseDownloader):
else tuple(),
"progress_hooks": params.progress_hooks,
"nocheckcertificate": params.no_check_certificate,
"logger": params.logger,
}
opts = opts
if params.force_ffmpeg or params.hls_use_mpegts or params.hls_use_h264:

View File

@@ -11,4 +11,7 @@ REPLACEMENT_WORDS = {"Season ": "", "Cour": "Part"}
# Server Specific
AVAILABLE_VIDEO_QUALITY = ["1080", "720", "480"]
VIDEO_INFO_REGEX = re.compile(r"window.video\s*=\s*(\{[^\}]*\})")
VIDEO_INFO_CLEAN_REGEX = re.compile(r'(?<!["\'])(\b\w+\b)(?=\s*:)')
DOWNLOAD_FILENAME_REGEX = re.compile(r"[?&]filename=([^&]+)")
QUALITY_REGEX = re.compile(r"/(\d{3,4}p)")
DOWNLOAD_URL_REGEX = re.compile(r"window.downloadUrl\s*=\s*'([^']*)'")

View File

@@ -0,0 +1,50 @@
import logging
from .constants import (
DOWNLOAD_FILENAME_REGEX,
DOWNLOAD_URL_REGEX,
QUALITY_REGEX,
VIDEO_INFO_CLEAN_REGEX,
VIDEO_INFO_REGEX,
)
logger = logging.getLogger(__name__)
def extract_server_info(html_content: str, episode_title: str | None) -> dict | None:
"""
Extracts server information from the VixCloud/AnimeUnity embed page.
Handles extraction from both window.video object and download URL.
"""
video_info = VIDEO_INFO_REGEX.search(html_content)
download_url_match = DOWNLOAD_URL_REGEX.search(html_content)
if not (download_url_match and video_info):
return None
info_str = VIDEO_INFO_CLEAN_REGEX.sub(r'"\1"', video_info.group(1))
# Use eval context for JS constants
ctx = {"null": None, "true": True, "false": False}
try:
info = eval(info_str, ctx)
except Exception as e:
logger.error(f"Failed to parse JS object: {e}")
return None
download_url = download_url_match.group(1)
info["link"] = download_url
# Extract metadata from download URL if missing in window.video
if filename_match := DOWNLOAD_FILENAME_REGEX.search(download_url):
info["name"] = filename_match.group(1)
else:
info["name"] = f"{episode_title or 'Unknown'}"
if quality_match := QUALITY_REGEX.search(download_url):
# "720p" -> 720
info["quality"] = int(quality_match.group(1)[:-1])
else:
info["quality"] = 0 # Fallback
return info

View File

@@ -99,7 +99,11 @@ def map_to_server(
translation_type=MediaTranslationType(translation_type),
mp4=True,
)
for quality in AVAILABLE_VIDEO_QUALITY
for quality in sorted(
list(set(AVAILABLE_VIDEO_QUALITY + [str(info["quality"])])),
key=lambda x: int(x),
reverse=True,
)
if int(quality) <= info["quality"]
],
episode_title=episode.title,

View File

@@ -8,12 +8,11 @@ from ..types import Anime, AnimeEpisodeInfo, SearchResult, SearchResults
from ..utils.debug import debug_provider
from .constants import (
ANIMEUNITY_BASE,
DOWNLOAD_URL_REGEX,
MAX_TIMEOUT,
REPLACEMENT_WORDS,
TOKEN_REGEX,
VIDEO_INFO_REGEX,
)
from .extractor import extract_server_info
from .mappers import (
map_to_anime_result,
map_to_search_result,
@@ -158,14 +157,10 @@ class AnimeUnity(BaseAnimeProvider):
video_response = self.client.get(url=response.text.strip(), timeout=MAX_TIMEOUT)
video_response.raise_for_status()
video_info = VIDEO_INFO_REGEX.search(video_response.text)
download_url_match = DOWNLOAD_URL_REGEX.search(video_response.text)
if not (download_url_match and video_info):
if not (info := extract_server_info(video_response.text, episode.title)):
logger.error(f"Failed to extract video info for episode {episode.id}")
return None
info = eval(video_info.group(1).replace("null", "None"))
info["link"] = download_url_match.group(1)
yield map_to_server(episode, info, params.translation_type)