Merge pull request #168 from viu-media/feature/preview-scripts-rewrite-to-python

This commit is contained in:
Benedict Xavier
2025-12-02 17:52:21 +03:00
committed by GitHub
25 changed files with 1611 additions and 1252 deletions

View File

@@ -0,0 +1,202 @@
"""
ANSI utilities for FZF preview scripts.
Lightweight stdlib-only utilities to replace Rich dependency in preview scripts.
Provides RGB color formatting, table rendering, and markdown stripping.
"""
import os
import re
import shutil
import textwrap
import unicodedata
def get_terminal_width() -> int:
"""
Get terminal width, prioritizing FZF preview environment variables.
Returns:
Terminal width in columns
"""
fzf_cols = os.environ.get("FZF_PREVIEW_COLUMNS")
if fzf_cols:
return int(fzf_cols)
return shutil.get_terminal_size((80, 24)).columns
def display_width(text: str) -> int:
"""
Calculate the actual display width of text, accounting for wide characters.
Args:
text: Text to measure
Returns:
Display width in terminal columns
"""
width = 0
for char in text:
# East Asian Width property: 'F' (Fullwidth) and 'W' (Wide) take 2 columns
if unicodedata.east_asian_width(char) in ("F", "W"):
width += 2
else:
width += 1
return width
def rgb_color(r: int, g: int, b: int, text: str, bold: bool = False) -> str:
"""
Format text with RGB color using ANSI escape codes.
Args:
r: Red component (0-255)
g: Green component (0-255)
b: Blue component (0-255)
text: Text to colorize
bold: Whether to make text bold
Returns:
ANSI-escaped colored text
"""
color_code = f"\x1b[38;2;{r};{g};{b}m"
bold_code = "\x1b[1m" if bold else ""
reset = "\x1b[0m"
return f"{color_code}{bold_code}{text}{reset}"
def parse_color(color_csv: str) -> tuple[int, int, int]:
"""
Parse RGB color from comma-separated string.
Args:
color_csv: Color as 'R,G,B' string
Returns:
Tuple of (r, g, b) integers
"""
parts = color_csv.split(",")
return int(parts[0]), int(parts[1]), int(parts[2])
def print_rule(sep_color: str) -> None:
"""
Print a horizontal rule line.
Args:
sep_color: Color as 'R,G,B' string
"""
width = get_terminal_width()
r, g, b = parse_color(sep_color)
print(rgb_color(r, g, b, "" * width))
def print_table_row(
key: str, value: str, header_color: str, key_width: int, value_width: int
) -> None:
"""
Print a two-column table row with left-aligned key and right-aligned value.
Args:
key: Left column text (header/key)
value: Right column text (value)
header_color: Color for key as 'R,G,B' string
key_width: Width for key column
value_width: Width for value column
"""
r, g, b = parse_color(header_color)
key_styled = rgb_color(r, g, b, key, bold=True)
# Get actual terminal width
term_width = get_terminal_width()
# Calculate display widths accounting for wide characters
key_display_width = display_width(key)
# Calculate actual value width based on terminal and key display width
actual_value_width = max(20, term_width - key_display_width - 2)
# Wrap value if it's too long (use character count, not display width for wrapping)
value_lines = textwrap.wrap(str(value), width=actual_value_width) if value else [""]
if not value_lines:
value_lines = [""]
# Print first line with properly aligned value
first_line = value_lines[0]
first_line_display_width = display_width(first_line)
# Use manual spacing to right-align based on display width
spacing = term_width - key_display_width - first_line_display_width - 2
if spacing > 0:
print(f"{key_styled} {' ' * spacing}{first_line}")
else:
print(f"{key_styled} {first_line}")
# Print remaining wrapped lines (left-aligned, indented)
for line in value_lines[1:]:
print(f"{' ' * (key_display_width + 2)}{line}")
def strip_markdown(text: str) -> str:
"""
Strip markdown formatting from text.
Removes:
- Headers (# ## ###)
- Bold (**text** or __text__)
- Italic (*text* or _text_)
- Links ([text](url))
- Code blocks (```code```)
- Inline code (`code`)
Args:
text: Markdown-formatted text
Returns:
Plain text with markdown removed
"""
if not text:
return ""
# Remove code blocks first
text = re.sub(r"```[\s\S]*?```", "", text)
# Remove inline code
text = re.sub(r"`([^`]+)`", r"\1", text)
# Remove headers
text = re.sub(r"^#{1,6}\s+", "", text, flags=re.MULTILINE)
# Remove bold (** or __)
text = re.sub(r"\*\*(.+?)\*\*", r"\1", text)
text = re.sub(r"__(.+?)__", r"\1", text)
# Remove italic (* or _)
text = re.sub(r"\*(.+?)\*", r"\1", text)
text = re.sub(r"_(.+?)_", r"\1", text)
# Remove links, keep text
text = re.sub(r"\[(.+?)\]\(.+?\)", r"\1", text)
# Remove images
text = re.sub(r"!\[.*?\]\(.+?\)", "", text)
return text.strip()
def wrap_text(text: str, width: int | None = None) -> str:
"""
Wrap text to terminal width.
Args:
text: Text to wrap
width: Width to wrap to (defaults to terminal width)
Returns:
Wrapped text
"""
if width is None:
width = get_terminal_width()
return textwrap.fill(text, width=width)

View File

@@ -1,22 +0,0 @@
#!/bin/sh
#
# Viu Airing Schedule Info Script Template
# This script formats and displays airing schedule details in the FZF preview pane.
# Python injects the actual data values into the placeholders.
draw_rule
print_kv "Anime Title" "{ANIME_TITLE}"
draw_rule
print_kv "Total Episodes" "{TOTAL_EPISODES}"
print_kv "Upcoming Episodes" "{UPCOMING_EPISODES}"
draw_rule
echo "{C_KEY}Next Episodes:{RESET}"
echo
echo "{SCHEDULE_TABLE}" | fold -s -w "$WIDTH"
draw_rule

View File

@@ -1,75 +0,0 @@
#!/bin/sh
#
# FZF Airing Schedule Preview Script Template
#
# This script is a template. The placeholders in curly braces, like {NAME}
# are dynamically filled by python using .replace()
WIDTH=${FZF_PREVIEW_COLUMNS:-80} # Set a fallback width of 80
IMAGE_RENDERER="{IMAGE_RENDERER}"
generate_sha256() {
local input
# Check if input is passed as an argument or piped
if [ -n "$1" ]; then
input="$1"
else
input=$(cat)
fi
if command -v sha256sum &>/dev/null; then
echo -n "$input" | sha256sum | awk '{print $1}'
elif command -v shasum &>/dev/null; then
echo -n "$input" | shasum -a 256 | awk '{print $1}'
elif command -v sha256 &>/dev/null; then
echo -n "$input" | sha256 | awk '{print $1}'
elif command -v openssl &>/dev/null; then
echo -n "$input" | openssl dgst -sha256 | awk '{print $2}'
else
echo -n "$input" | base64 | tr '/+' '_-' | tr -d '\n'
fi
}
print_kv() {
local key="$1"
local value="$2"
local key_len=${#key}
local value_len=${#value}
local multiplier="${3:-1}"
# Correctly calculate padding by accounting for the key, the ": ", and the value.
local padding_len=$((WIDTH - key_len - 2 - value_len * multiplier))
# If the text is too long to fit, just add a single space for separation.
if [ "$padding_len" -lt 1 ]; then
padding_len=1
value=$(echo $value| fold -s -w "$((WIDTH - key_len - 3))")
printf "{C_KEY}%s:{RESET}%*s%s\\n" "$key" "$padding_len" "" " $value"
else
printf "{C_KEY}%s:{RESET}%*s%s\\n" "$key" "$padding_len" "" " $value"
fi
}
draw_rule(){
ll=2
while [ $ll -le $FZF_PREVIEW_COLUMNS ];do
echo -n -e "{C_RULE}─{RESET}"
((ll++))
done
echo
}
title={}
hash=$(generate_sha256 "$title")
if [ "{PREVIEW_MODE}" = "full" ] || [ "{PREVIEW_MODE}" = "text" ]; then
info_file="{INFO_CACHE_DIR}{PATH_SEP}$hash"
if [ -f "$info_file" ]; then
source "$info_file"
else
echo "📅 Loading airing schedule..."
fi
fi

View File

@@ -0,0 +1,36 @@
import sys
from _ansi_utils import (
print_rule,
print_table_row,
strip_markdown,
wrap_text,
get_terminal_width,
)
HEADER_COLOR = sys.argv[1]
SEPARATOR_COLOR = sys.argv[2]
# Get terminal dimensions
term_width = get_terminal_width()
# Print title centered
print("{ANIME_TITLE}".center(term_width))
rows = [
("Total Episodes", "{TOTAL_EPISODES}"),
]
print_rule(SEPARATOR_COLOR)
for key, value in rows:
print_table_row(key, value, HEADER_COLOR, 15, term_width - 20)
rows = [
("Upcoming Episodes", "{UPCOMING_EPISODES}"),
]
print_rule(SEPARATOR_COLOR)
for key, value in rows:
print_table_row(key, value, HEADER_COLOR, 15, term_width - 20)
print_rule(SEPARATOR_COLOR)
print(wrap_text(strip_markdown("""{SCHEDULE_TABLE}"""), term_width))

View File

@@ -1,41 +0,0 @@
#!/bin/sh
#
# Viu Character Info Script Template
# This script formats and displays character details in the FZF preview pane.
# Python injects the actual data values into the placeholders.
draw_rule
print_kv "Character Name" "{CHARACTER_NAME}"
if [ -n "{CHARACTER_NATIVE_NAME}" ] && [ "{CHARACTER_NATIVE_NAME}" != "N/A" ]; then
print_kv "Native Name" "{CHARACTER_NATIVE_NAME}"
fi
draw_rule
if [ -n "{CHARACTER_GENDER}" ] && [ "{CHARACTER_GENDER}" != "Unknown" ]; then
print_kv "Gender" "{CHARACTER_GENDER}"
fi
if [ -n "{CHARACTER_AGE}" ] && [ "{CHARACTER_AGE}" != "Unknown" ]; then
print_kv "Age" "{CHARACTER_AGE}"
fi
if [ -n "{CHARACTER_BLOOD_TYPE}" ] && [ "{CHARACTER_BLOOD_TYPE}" != "N/A" ]; then
print_kv "Blood Type" "{CHARACTER_BLOOD_TYPE}"
fi
if [ -n "{CHARACTER_BIRTHDAY}" ] && [ "{CHARACTER_BIRTHDAY}" != "N/A" ]; then
print_kv "Birthday" "{CHARACTER_BIRTHDAY}"
fi
if [ -n "{CHARACTER_FAVOURITES}" ] && [ "{CHARACTER_FAVOURITES}" != "0" ]; then
print_kv "Favorites" "{CHARACTER_FAVOURITES}"
fi
draw_rule
echo "{CHARACTER_DESCRIPTION}" | fold -s -w "$WIDTH"
draw_rule

View File

@@ -1,130 +0,0 @@
#!/bin/sh
#
# FZF Character Preview Script Template
#
# This script is a template. The placeholders in curly braces, like {NAME}
# are dynamically filled by python using .replace()
WIDTH=${FZF_PREVIEW_COLUMNS:-80} # Set a fallback width of 80
IMAGE_RENDERER="{IMAGE_RENDERER}"
generate_sha256() {
local input
# Check if input is passed as an argument or piped
if [ -n "$1" ]; then
input="$1"
else
input=$(cat)
fi
if command -v sha256sum &>/dev/null; then
echo -n "$input" | sha256sum | awk '{print $1}'
elif command -v shasum &>/dev/null; then
echo -n "$input" | shasum -a 256 | awk '{print $1}'
elif command -v sha256 &>/dev/null; then
echo -n "$input" | sha256 | awk '{print $1}'
elif command -v openssl &>/dev/null; then
echo -n "$input" | openssl dgst -sha256 | awk '{print $2}'
else
echo -n "$input" | base64 | tr '/+' '_-' | tr -d '\n'
fi
}
fzf_preview() {
file=$1
dim=${FZF_PREVIEW_COLUMNS}x${FZF_PREVIEW_LINES}
if [ "$dim" = x ]; then
dim=$(stty size </dev/tty | awk "{print \$2 \"x\" \$1}")
fi
if ! [ "$IMAGE_RENDERER" = "icat" ] && [ -z "$KITTY_WINDOW_ID" ] && [ "$((FZF_PREVIEW_TOP + FZF_PREVIEW_LINES))" -eq "$(stty size </dev/tty | awk "{print \$1}")" ]; then
dim=${FZF_PREVIEW_COLUMNS}x$((FZF_PREVIEW_LINES - 1))
fi
if [ "$IMAGE_RENDERER" = "icat" ] && [ -z "$GHOSTTY_BIN_DIR" ]; then
if command -v kitten >/dev/null 2>&1; then
kitten icat --clear --transfer-mode=memory --unicode-placeholder --stdin=no --place="$dim@0x0" "$file" | sed "\$d" | sed "$(printf "\$s/\$/\033[m/")"
elif command -v icat >/dev/null 2>&1; then
icat --clear --transfer-mode=memory --unicode-placeholder --stdin=no --place="$dim@0x0" "$file" | sed "\$d" | sed "$(printf "\$s/\$/\033[m/")"
else
kitty icat --clear --transfer-mode=memory --unicode-placeholder --stdin=no --place="$dim@0x0" "$file" | sed "\$d" | sed "$(printf "\$s/\$/\033[m/")"
fi
elif [ -n "$GHOSTTY_BIN_DIR" ]; then
if command -v kitten >/dev/null 2>&1; then
kitten icat --clear --transfer-mode=memory --unicode-placeholder --stdin=no --place="$dim@0x0" "$file" | sed "\$d" | sed "$(printf "\$s/\$/\033[m/")"
elif command -v icat >/dev/null 2>&1; then
icat --clear --transfer-mode=memory --unicode-placeholder --stdin=no --place="$dim@0x0" "$file" | sed "\$d" | sed "$(printf "\$s/\$/\033[m/")"
else
chafa -s "$dim" "$file"
fi
elif command -v chafa >/dev/null 2>&1; then
case "$PLATFORM" in
android) chafa -s "$dim" "$file" ;;
windows) chafa -f sixel -s "$dim" "$file" ;;
*) chafa -s "$dim" "$file" ;;
esac
echo
elif command -v imgcat >/dev/null; then
imgcat -W "${dim%%x*}" -H "${dim##*x}" "$file"
else
echo please install a terminal image viewer
echo either icat for kitty terminal and wezterm or imgcat or chafa
fi
}
print_kv() {
local key="$1"
local value="$2"
local key_len=${#key}
local value_len=${#value}
local multiplier="${3:-1}"
# Correctly calculate padding by accounting for the key, the ": ", and the value.
local padding_len=$((WIDTH - key_len - 2 - value_len * multiplier))
# If the text is too long to fit, just add a single space for separation.
if [ "$padding_len" -lt 1 ]; then
padding_len=1
value=$(echo $value| fold -s -w "$((WIDTH - key_len - 3))")
printf "{C_KEY}%s:{RESET}%*s%s\\n" "$key" "$padding_len" "" " $value"
else
printf "{C_KEY}%s:{RESET}%*s%s\\n" "$key" "$padding_len" "" " $value"
fi
}
draw_rule(){
ll=2
while [ $ll -le $FZF_PREVIEW_COLUMNS ];do
echo -n -e "{C_RULE}─{RESET}"
((ll++))
done
echo
}
title={}
hash=$(generate_sha256 "$title")
# FIXME: Disabled since they cover the text perhaps its aspect ratio related or image format not sure
# if [ "{PREVIEW_MODE}" = "full" ] || [ "{PREVIEW_MODE}" = "image" ]; then
# image_file="{IMAGE_CACHE_DIR}{PATH_SEP}$hash.png"
# if [ -f "$image_file" ]; then
# fzf_preview "$image_file"
# echo # Add a newline for spacing
# fi
# fi
if [ "{PREVIEW_MODE}" = "full" ] || [ "{PREVIEW_MODE}" = "text" ]; then
info_file="{INFO_CACHE_DIR}{PATH_SEP}$hash"
if [ -f "$info_file" ]; then
source "$info_file"
else
echo "👤 Loading character details..."
fi
fi

View File

@@ -0,0 +1,47 @@
import sys
from _ansi_utils import (
print_rule,
print_table_row,
strip_markdown,
wrap_text,
get_terminal_width,
)
HEADER_COLOR = sys.argv[1]
SEPARATOR_COLOR = sys.argv[2]
# Get terminal dimensions
term_width = get_terminal_width()
# Print title centered
print("{CHARACTER_NAME}".center(term_width))
rows = [
("Native Name", "{CHARACTER_NATIVE_NAME}"),
("Gender", "{CHARACTER_GENDER}"),
]
print_rule(SEPARATOR_COLOR)
for key, value in rows:
print_table_row(key, value, HEADER_COLOR, 15, term_width - 20)
rows = [
("Age", "{CHARACTER_AGE}"),
("Blood Type", "{CHARACTER_BLOOD_TYPE}"),
]
print_rule(SEPARATOR_COLOR)
for key, value in rows:
print_table_row(key, value, HEADER_COLOR, 15, term_width - 20)
rows = [
("Birthday", "{CHARACTER_BIRTHDAY}"),
("Favourites", "{CHARACTER_FAVOURITES}"),
]
print_rule(SEPARATOR_COLOR)
for key, value in rows:
print_table_row(key, value, HEADER_COLOR, 15, term_width - 20)
print_rule(SEPARATOR_COLOR)
print(wrap_text(strip_markdown("""{CHARACTER_DESCRIPTION}"""), term_width))

View File

@@ -1,315 +0,0 @@
#!/bin/bash
#
# FZF Dynamic Preview Script Template
#
# This script handles previews for dynamic search results by parsing the JSON
# search results file and extracting info for the selected item.
# The placeholders in curly braces are dynamically filled by Python using .replace()
WIDTH=${FZF_PREVIEW_COLUMNS:-80}
IMAGE_RENDERER="{IMAGE_RENDERER}"
SEARCH_RESULTS_FILE="{SEARCH_RESULTS_FILE}"
IMAGE_CACHE_PATH="{IMAGE_CACHE_PATH}"
INFO_CACHE_PATH="{INFO_CACHE_PATH}"
PATH_SEP="{PATH_SEP}"
# Color codes injected by Python
C_TITLE="{C_TITLE}"
C_KEY="{C_KEY}"
C_VALUE="{C_VALUE}"
C_RULE="{C_RULE}"
RESET="{RESET}"
# Selected item from fzf
SELECTED_ITEM={}
generate_sha256() {
local input="$1"
if command -v sha256sum &>/dev/null; then
echo -n "$input" | sha256sum | awk '{print $1}'
elif command -v shasum &>/dev/null; then
echo -n "$input" | shasum -a 256 | awk '{print $1}'
elif command -v sha256 &>/dev/null; then
echo -n "$input" | sha256 | awk '{print $1}'
elif command -v openssl &>/dev/null; then
echo -n "$input" | openssl dgst -sha256 | awk '{print $2}'
else
echo -n "$input" | base64 | tr '/+' '_-' | tr -d '\n'
fi
}
fzf_preview() {
file=$1
dim=${FZF_PREVIEW_COLUMNS}x${FZF_PREVIEW_LINES}
if [ "$dim" = x ]; then
dim=$(stty size </dev/tty | awk "{print \$2 \"x\" \$1}")
fi
if ! [ "$IMAGE_RENDERER" = "icat" ] && [ -z "$KITTY_WINDOW_ID" ] && [ "$((FZF_PREVIEW_TOP + FZF_PREVIEW_LINES))" -eq "$(stty size </dev/tty | awk "{print \$1}")" ]; then
dim=${FZF_PREVIEW_COLUMNS}x$((FZF_PREVIEW_LINES - 1))
fi
if [ "$IMAGE_RENDERER" = "icat" ] && [ -z "$GHOSTTY_BIN_DIR" ]; then
if command -v kitten >/dev/null 2>&1; then
kitten icat --clear --transfer-mode=memory --unicode-placeholder --stdin=no --place="$dim@0x0" "$file" | sed "\$d" | sed "$(printf "\$s/\$/\033[m/")"
elif command -v icat >/dev/null 2>&1; then
icat --clear --transfer-mode=memory --unicode-placeholder --stdin=no --place="$dim@0x0" "$file" | sed "\$d" | sed "$(printf "\$s/\$/\033[m/")"
else
kitty icat --clear --transfer-mode=memory --unicode-placeholder --stdin=no --place="$dim@0x0" "$file" | sed "\$d" | sed "$(printf "\$s/\$/\033[m/")"
fi
elif [ -n "$GHOSTTY_BIN_DIR" ]; then
if command -v kitten >/dev/null 2>&1; then
kitten icat --clear --transfer-mode=memory --unicode-placeholder --stdin=no --place="$dim@0x0" "$file" | sed "\$d" | sed "$(printf "\$s/\$/\033[m/")"
elif command -v icat >/dev/null 2>&1; then
icat --clear --transfer-mode=memory --unicode-placeholder --stdin=no --place="$dim@0x0" "$file" | sed "\$d" | sed "$(printf "\$s/\$/\033[m/")"
else
chafa -s "$dim" "$file"
fi
elif command -v chafa >/dev/null 2>&1; then
case "$PLATFORM" in
android) chafa -s "$dim" "$file" ;;
windows) chafa -f sixel -s "$dim" "$file" ;;
*) chafa -s "$dim" "$file" ;;
esac
echo
elif command -v imgcat >/dev/null; then
imgcat -W "${dim%%x*}" -H "${dim##*x}" "$file"
else
echo please install a terminal image viewer
echo either icat for kitty terminal and wezterm or imgcat or chafa
fi
}
print_kv() {
local key="$1"
local value="$2"
local key_len=${#key}
local value_len=${#value}
local multiplier="${3:-1}"
local padding_len=$((WIDTH - key_len - 2 - value_len * multiplier))
if [ "$padding_len" -lt 1 ]; then
padding_len=1
value=$(echo $value| fold -s -w "$((WIDTH - key_len - 3))")
printf "{C_KEY}%s:{RESET}%*s%s\\n" "$key" "$padding_len" "" " $value"
else
printf "{C_KEY}%s:{RESET}%*s%s\\n" "$key" "$padding_len" "" " $value"
fi
}
draw_rule() {
ll=2
while [ $ll -le $FZF_PREVIEW_COLUMNS ];do
echo -n -e "{C_RULE}─{RESET}"
((ll++))
done
echo
}
clean_html() {
echo "$1" | sed 's/<[^>]*>//g' | sed 's/&lt;/</g' | sed 's/&gt;/>/g' | sed 's/&amp;/\&/g' | sed 's/&quot;/"/g' | sed "s/&#39;/'/g"
}
format_date() {
local date_obj="$1"
if [ "$date_obj" = "null" ] || [ -z "$date_obj" ]; then
echo "N/A"
return
fi
# Extract year, month, day from the date object
if command -v jq >/dev/null 2>&1; then
year=$(echo "$date_obj" | jq -r '.year // "N/A"' 2>/dev/null || echo "N/A")
month=$(echo "$date_obj" | jq -r '.month // ""' 2>/dev/null || echo "")
day=$(echo "$date_obj" | jq -r '.day // ""' 2>/dev/null || echo "")
else
year=$(echo "$date_obj" | python3 -c "import json, sys; data=json.load(sys.stdin); print(data.get('year', 'N/A'))" 2>/dev/null || echo "N/A")
month=$(echo "$date_obj" | python3 -c "import json, sys; data=json.load(sys.stdin); print(data.get('month', ''))" 2>/dev/null || echo "")
day=$(echo "$date_obj" | python3 -c "import json, sys; data=json.load(sys.stdin); print(data.get('day', ''))" 2>/dev/null || echo "")
fi
if [ "$year" = "N/A" ] || [ "$year" = "null" ]; then
echo "N/A"
elif [ -n "$month" ] && [ "$month" != "null" ] && [ -n "$day" ] && [ "$day" != "null" ]; then
echo "$day/$month/$year"
elif [ -n "$month" ] && [ "$month" != "null" ]; then
echo "$month/$year"
else
echo "$year"
fi
}
# If no selection or search results file doesn't exist, show placeholder
if [ -z "$SELECTED_ITEM" ] || [ ! -f "$SEARCH_RESULTS_FILE" ]; then
echo "${C_TITLE}Dynamic Search Preview${RESET}"
draw_rule
echo "Type to search for anime..."
echo "Results will appear here as you type."
echo
echo "DEBUG:"
echo "SELECTED_ITEM='$SELECTED_ITEM'"
echo "SEARCH_RESULTS_FILE='$SEARCH_RESULTS_FILE'"
if [ -f "$SEARCH_RESULTS_FILE" ]; then
echo "Search results file exists"
else
echo "Search results file missing"
fi
exit 0
fi
# Parse the search results JSON and find the matching item
if command -v jq >/dev/null 2>&1; then
MEDIA_DATA=$(cat "$SEARCH_RESULTS_FILE" | jq --arg anime_title "$SELECTED_ITEM" '
.data.Page.media[]? |
select((.title.english // .title.romaji // .title.native // "Unknown") == $anime_title )
' )
else
# Fallback to Python for JSON parsing
MEDIA_DATA=$(cat "$SEARCH_RESULTS_FILE" | python3 -c "
import json
import sys
try:
data = json.load(sys.stdin)
selected_item = '''$SELECTED_ITEM'''
if 'data' not in data or 'Page' not in data['data'] or 'media' not in data['data']['Page']:
sys.exit(1)
media_list = data['data']['Page']['media']
for media in media_list:
title = media.get('title', {})
english_title = title.get('english') or title.get('romaji') or title.get('native', 'Unknown')
year = media.get('startDate', {}).get('year', 'Unknown') if media.get('startDate') else 'Unknown'
status = media.get('status', 'Unknown')
genres = ', '.join(media.get('genres', [])[:3]) or 'Unknown'
display_format = f'{english_title} ({year}) [{status}] - {genres}'
# Debug output for matching
print(f"DEBUG: selected_item='{selected_item.strip()}' display_format='{display_format.strip()}'", file=sys.stderr)
if selected_item.strip() == display_format.strip():
json.dump(media, sys.stdout, indent=2)
sys.exit(0)
print(f"DEBUG: No match found for selected_item='{selected_item.strip()}'", file=sys.stderr)
sys.exit(1)
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
sys.exit(1)
" 2>/dev/null)
fi
# If we couldn't find the media data, show error
if [ $? -ne 0 ] || [ -z "$MEDIA_DATA" ]; then
echo "${C_TITLE}Preview Error${RESET}"
draw_rule
echo "Could not load preview data for:"
echo "$SELECTED_ITEM"
echo
echo "DEBUG INFO:"
echo "Search results file: $SEARCH_RESULTS_FILE"
if [ -f "$SEARCH_RESULTS_FILE" ]; then
echo "File exists, size: $(wc -c < "$SEARCH_RESULTS_FILE") bytes"
echo "First few lines of search results:"
head -3 "$SEARCH_RESULTS_FILE" 2>/dev/null || echo "Cannot read file"
else
echo "Search results file does not exist"
fi
exit 0
fi
# Extract information from the media data
if command -v jq >/dev/null 2>&1; then
# Use jq for faster extraction
TITLE=$(echo "$MEDIA_DATA" | jq -r '.title.english // .title.romaji // .title.native // "Unknown"' 2>/dev/null || echo "Unknown")
STATUS=$(echo "$MEDIA_DATA" | jq -r '.status // "Unknown"' 2>/dev/null || echo "Unknown")
FORMAT=$(echo "$MEDIA_DATA" | jq -r '.format // "Unknown"' 2>/dev/null || echo "Unknown")
EPISODES=$(echo "$MEDIA_DATA" | jq -r '.episodes // "Unknown"' 2>/dev/null || echo "Unknown")
DURATION=$(echo "$MEDIA_DATA" | jq -r 'if .duration then "\(.duration) min" else "Unknown" end' 2>/dev/null || echo "Unknown")
SCORE=$(echo "$MEDIA_DATA" | jq -r 'if .averageScore then "\(.averageScore)/100" else "N/A" end' 2>/dev/null || echo "N/A")
FAVOURITES=$(echo "$MEDIA_DATA" | jq -r '.favourites // 0' 2>/dev/null | sed ':a;s/\B[0-9]\{3\}\>/,&/;ta' || echo "0")
POPULARITY=$(echo "$MEDIA_DATA" | jq -r '.popularity // 0' 2>/dev/null | sed ':a;s/\B[0-9]\{3\}\>/,&/;ta' || echo "0")
GENRES=$(echo "$MEDIA_DATA" | jq -r '(.genres[:5] // []) | join(", ") | if . == "" then "Unknown" else . end' 2>/dev/null || echo "Unknown")
DESCRIPTION=$(echo "$MEDIA_DATA" | jq -r '.description // "No description available."' 2>/dev/null || echo "No description available.")
# Get start and end dates as JSON objects
START_DATE_OBJ=$(echo "$MEDIA_DATA" | jq -c '.startDate' 2>/dev/null || echo "null")
END_DATE_OBJ=$(echo "$MEDIA_DATA" | jq -c '.endDate' 2>/dev/null || echo "null")
# Get cover image URL
COVER_IMAGE=$(echo "$MEDIA_DATA" | jq -r '.coverImage.large // ""' 2>/dev/null || echo "")
else
# Fallback to Python for extraction
TITLE=$(echo "$MEDIA_DATA" | python3 -c "import json, sys; data=json.load(sys.stdin); title=data.get('title',{}); print(title.get('english') or title.get('romaji') or title.get('native', 'Unknown'))" 2>/dev/null || echo "Unknown")
STATUS=$(echo "$MEDIA_DATA" | python3 -c "import json, sys; data=json.load(sys.stdin); print(data.get('status', 'Unknown'))" 2>/dev/null || echo "Unknown")
FORMAT=$(echo "$MEDIA_DATA" | python3 -c "import json, sys; data=json.load(sys.stdin); print(data.get('format', 'Unknown'))" 2>/dev/null || echo "Unknown")
EPISODES=$(echo "$MEDIA_DATA" | python3 -c "import json, sys; data=json.load(sys.stdin); print(data.get('episodes', 'Unknown'))" 2>/dev/null || echo "Unknown")
DURATION=$(echo "$MEDIA_DATA" | python3 -c "import json, sys; data=json.load(sys.stdin); duration=data.get('duration'); print(f'{duration} min' if duration else 'Unknown')" 2>/dev/null || echo "Unknown")
SCORE=$(echo "$MEDIA_DATA" | python3 -c "import json, sys; data=json.load(sys.stdin); score=data.get('averageScore'); print(f'{score}/100' if score else 'N/A')" 2>/dev/null || echo "N/A")
FAVOURITES=$(echo "$MEDIA_DATA" | python3 -c "import json, sys; data=json.load(sys.stdin); print(f\"{data.get('favourites', 0):,}\")" 2>/dev/null || echo "0")
POPULARITY=$(echo "$MEDIA_DATA" | python3 -c "import json, sys; data=json.load(sys.stdin); print(f\"{data.get('popularity', 0):,}\")" 2>/dev/null || echo "0")
GENRES=$(echo "$MEDIA_DATA" | python3 -c "import json, sys; data=json.load(sys.stdin); print(', '.join(data.get('genres', [])[:5]))" 2>/dev/null || echo "Unknown")
DESCRIPTION=$(echo "$MEDIA_DATA" | python3 -c "import json, sys; data=json.load(sys.stdin); print(data.get('description', 'No description available.'))" 2>/dev/null || echo "No description available.")
# Get start and end dates
START_DATE_OBJ=$(echo "$MEDIA_DATA" | python3 -c "import json, sys; data=json.load(sys.stdin); json.dump(data.get('startDate'), sys.stdout)" 2>/dev/null || echo "null")
END_DATE_OBJ=$(echo "$MEDIA_DATA" | python3 -c "import json, sys; data=json.load(sys.stdin); json.dump(data.get('endDate'), sys.stdout)" 2>/dev/null || echo "null")
# Get cover image URL
COVER_IMAGE=$(echo "$MEDIA_DATA" | python3 -c "import json, sys; data=json.load(sys.stdin); cover=data.get('coverImage',{}); print(cover.get('large', ''))" 2>/dev/null || echo "")
fi
# Format the dates
START_DATE=$(format_date "$START_DATE_OBJ")
END_DATE=$(format_date "$END_DATE_OBJ")
# Generate cache hash for this item (using selected item like regular preview)
CACHE_HASH=$(generate_sha256 "$SELECTED_ITEM")
# Try to show image if available
if [ "{PREVIEW_MODE}" = "full" ] || [ "{PREVIEW_MODE}" = "image" ]; then
image_file="{IMAGE_CACHE_PATH}{PATH_SEP}${CACHE_HASH}.png"
# If image not cached and we have a URL, try to download it quickly
if [ ! -f "$image_file" ] && [ -n "$COVER_IMAGE" ]; then
if command -v curl >/dev/null 2>&1; then
# Quick download with timeout
curl -s -m 3 -L "$COVER_IMAGE" -o "$image_file" 2>/dev/null || rm -f "$image_file" 2>/dev/null
fi
fi
if [ -f "$image_file" ]; then
fzf_preview "$image_file"
else
echo "🖼️ Loading image..."
fi
echo
fi
# Display text info if configured
if [ "{PREVIEW_MODE}" = "full" ] || [ "{PREVIEW_MODE}" = "text" ]; then
draw_rule
print_kv "Title" "$TITLE"
draw_rule
print_kv "Score" "$SCORE"
print_kv "Favourites" "$FAVOURITES"
print_kv "Popularity" "$POPULARITY"
print_kv "Status" "$STATUS"
draw_rule
print_kv "Episodes" "$EPISODES"
print_kv "Duration" "$DURATION"
print_kv "Format" "$FORMAT"
draw_rule
print_kv "Genres" "$GENRES"
print_kv "Start Date" "$START_DATE"
print_kv "End Date" "$END_DATE"
draw_rule
# Clean and display description
CLEAN_DESCRIPTION=$(clean_html "$DESCRIPTION")
echo "$CLEAN_DESCRIPTION" | fold -s -w "$WIDTH"
fi

View File

@@ -0,0 +1,434 @@
#!/usr/bin/env python3
#
# FZF Dynamic Preview Script for Search Results
#
# This script handles previews for dynamic search by reading from the cached
# search results JSON and generating preview content on-the-fly.
# Template variables are injected by Python using .replace()
import json
import os
import shutil
import subprocess
import sys
from hashlib import sha256
from pathlib import Path
# Import the utility functions
from _ansi_utils import (
get_terminal_width,
print_rule,
print_table_row,
strip_markdown,
wrap_text,
)
# --- Template Variables (Injected by Python) ---
SEARCH_RESULTS_FILE = Path("{SEARCH_RESULTS_FILE}")
IMAGE_CACHE_DIR = Path("{IMAGE_CACHE_DIR}")
PREVIEW_MODE = "{PREVIEW_MODE}"
IMAGE_RENDERER = "{IMAGE_RENDERER}"
HEADER_COLOR = "{HEADER_COLOR}"
SEPARATOR_COLOR = "{SEPARATOR_COLOR}"
SCALE_UP = "{SCALE_UP}" == "True"
# --- Arguments ---
# sys.argv[1] is the selected anime title from fzf
SELECTED_TITLE = sys.argv[1] if len(sys.argv) > 1 else ""
def format_number(num):
"""Format number with thousand separators."""
if num is None:
return "N/A"
return f"{num:,}"
def format_date(date_obj):
"""Format date object to string."""
if not date_obj or date_obj == "null":
return "N/A"
year = date_obj.get("year")
month = date_obj.get("month")
day = date_obj.get("day")
if not year:
return "N/A"
if month and day:
return f"{day}/{month}/{year}"
if month:
return f"{month}/{year}"
return str(year)
def get_media_from_results(title):
"""Find media item in search results by title."""
if not SEARCH_RESULTS_FILE.exists():
return None
try:
with open(SEARCH_RESULTS_FILE, "r", encoding="utf-8") as f:
data = json.load(f)
media_list = data.get("data", {}).get("Page", {}).get("media", [])
for media in media_list:
title_obj = media.get("title", {})
eng = title_obj.get("english")
rom = title_obj.get("romaji")
nat = title_obj.get("native")
if title in (eng, rom, nat):
return media
return None
except Exception as e:
print(f"Error reading search results: {e}", file=sys.stderr)
return None
def download_image(url: str, output_path: Path) -> bool:
"""Download image from URL and save to file."""
try:
# Try using urllib (stdlib)
from urllib import request
req = request.Request(url, headers={"User-Agent": "viu/1.0"})
with request.urlopen(req, timeout=5) as response:
data = response.read()
output_path.write_bytes(data)
return True
except Exception:
# Silently fail - preview will just not show image
return False
def which(cmd):
"""Check if command exists."""
return shutil.which(cmd)
def get_terminal_dimensions():
"""Get terminal dimensions from FZF environment."""
fzf_cols = os.environ.get("FZF_PREVIEW_COLUMNS")
fzf_lines = os.environ.get("FZF_PREVIEW_LINES")
if fzf_cols and fzf_lines:
return int(fzf_cols), int(fzf_lines)
try:
rows, cols = (
subprocess.check_output(
["stty", "size"], text=True, stderr=subprocess.DEVNULL
)
.strip()
.split()
)
return int(cols), int(rows)
except Exception:
return 80, 24
def render_kitty(file_path, width, height, scale_up):
"""Render using the Kitty Graphics Protocol (kitten/icat)."""
cmd = []
if which("kitten"):
cmd = ["kitten", "icat"]
elif which("icat"):
cmd = ["icat"]
elif which("kitty"):
cmd = ["kitty", "+kitten", "icat"]
if not cmd:
return False
args = [
"--clear",
"--transfer-mode=memory",
"--unicode-placeholder",
"--stdin=no",
f"--place={width}x{height}@0x0",
]
if scale_up:
args.append("--scale-up")
args.append(file_path)
subprocess.run(cmd + args, stdout=sys.stdout, stderr=sys.stderr)
return True
def render_sixel(file_path, width, height):
"""Render using Sixel."""
if which("chafa"):
subprocess.run(
["chafa", "-f", "sixel", "-s", f"{width}x{height}", file_path],
stdout=sys.stdout,
stderr=sys.stderr,
)
return True
if which("img2sixel"):
pixel_width = width * 10
pixel_height = height * 20
subprocess.run(
[
"img2sixel",
f"--width={pixel_width}",
f"--height={pixel_height}",
file_path,
],
stdout=sys.stdout,
stderr=sys.stderr,
)
return True
return False
def render_iterm(file_path, width, height):
"""Render using iTerm2 Inline Image Protocol."""
if which("imgcat"):
subprocess.run(
["imgcat", "-W", str(width), "-H", str(height), file_path],
stdout=sys.stdout,
stderr=sys.stderr,
)
return True
if which("chafa"):
subprocess.run(
["chafa", "-f", "iterm", "-s", f"{width}x{height}", file_path],
stdout=sys.stdout,
stderr=sys.stderr,
)
return True
return False
def render_timg(file_path, width, height):
"""Render using timg."""
if which("timg"):
subprocess.run(
["timg", f"-g{width}x{height}", "--upscale", file_path],
stdout=sys.stdout,
stderr=sys.stderr,
)
return True
return False
def render_chafa_auto(file_path, width, height):
"""Render using Chafa in auto mode."""
if which("chafa"):
subprocess.run(
["chafa", "-s", f"{width}x{height}", file_path],
stdout=sys.stdout,
stderr=sys.stderr,
)
return True
return False
def fzf_image_preview(file_path: str):
"""Main dispatch function to choose the best renderer."""
cols, lines = get_terminal_dimensions()
width = cols
height = lines
# Check explicit configuration
if IMAGE_RENDERER == "icat" or IMAGE_RENDERER == "system-kitty":
if render_kitty(file_path, width, height, SCALE_UP):
return
elif IMAGE_RENDERER == "sixel" or IMAGE_RENDERER == "system-sixels":
if render_sixel(file_path, width, height):
return
elif IMAGE_RENDERER == "imgcat":
if render_iterm(file_path, width, height):
return
elif IMAGE_RENDERER == "timg":
if render_timg(file_path, width, height):
return
elif IMAGE_RENDERER == "chafa":
if render_chafa_auto(file_path, width, height):
return
# Auto-detection / Fallback
if os.environ.get("KITTY_WINDOW_ID") or os.environ.get("GHOSTTY_BIN_DIR"):
if render_kitty(file_path, width, height, SCALE_UP):
return
if os.environ.get("TERM_PROGRAM") == "iTerm.app":
if render_iterm(file_path, width, height):
return
# Try standard tools in order of quality/preference
if render_kitty(file_path, width, height, SCALE_UP):
return
if render_sixel(file_path, width, height):
return
if render_timg(file_path, width, height):
return
if render_chafa_auto(file_path, width, height):
return
print("⚠️ No suitable image renderer found (icat, chafa, timg, img2sixel).")
def main():
if not SELECTED_TITLE:
print("No selection")
return
# Get the media data from cached search results
media = get_media_from_results(SELECTED_TITLE)
if not media:
print("Loading preview...")
return
term_width = get_terminal_width()
# Extract media information
title_obj = media.get("title", {})
title = (
title_obj.get("english")
or title_obj.get("romaji")
or title_obj.get("native")
or "Unknown"
)
# Show image if in image or full mode
if PREVIEW_MODE in ("image", "full"):
cover_image = media.get("coverImage", {}).get("large", "")
if cover_image:
# Ensure image cache directory exists
IMAGE_CACHE_DIR.mkdir(parents=True, exist_ok=True)
# Generate hash matching the preview worker pattern
# Use "anime-" prefix and hash of just the title (no KEY prefix for dynamic search)
hash_id = f"anime-{sha256(SELECTED_TITLE.encode('utf-8')).hexdigest()}"
image_file = IMAGE_CACHE_DIR / f"{hash_id}.png"
# Download image if not cached
if not image_file.exists():
download_image(cover_image, image_file)
# Try to render the image
if image_file.exists():
fzf_image_preview(str(image_file))
print() # Spacer
else:
print("🖼️ Loading image...")
print()
# Show text info if in text or full mode
if PREVIEW_MODE in ("text", "full"):
# Separator line
r, g, b = map(int, SEPARATOR_COLOR.split(","))
separator = f"\x1b[38;2;{r};{g};{b}m" + ("" * term_width) + "\x1b[0m"
print(separator, flush=True)
# Title centered
print(title.center(term_width))
# Extract data
status = media.get("status", "Unknown")
format_type = media.get("format", "Unknown")
episodes = media.get("episodes", "?")
duration = media.get("duration")
duration_str = f"{duration} min" if duration else "Unknown"
score = media.get("averageScore")
score_str = f"{score}/100" if score else "N/A"
favourites = format_number(media.get("favourites", 0))
popularity = format_number(media.get("popularity", 0))
genres = ", ".join(media.get("genres", [])[:5]) or "Unknown"
start_date = format_date(media.get("startDate"))
end_date = format_date(media.get("endDate"))
studios_list = media.get("studios", {}).get("nodes", [])
studios = ", ".join([s.get("name", "") for s in studios_list[:3]]) or "Unknown"
synonyms_list = media.get("synonyms", [])
synonyms = ", ".join(synonyms_list[:3]) or "N/A"
description = media.get("description", "No description available.")
description = strip_markdown(description)
# Print sections matching media_info.py structure
rows = [
("Score", score_str),
("Favorites", favourites),
("Popularity", popularity),
("Status", status),
]
print_rule(SEPARATOR_COLOR)
for key, value in rows:
print_table_row(key, value, HEADER_COLOR, 0, 0)
rows = [
("Episodes", str(episodes)),
("Duration", duration_str),
]
print_rule(SEPARATOR_COLOR)
for key, value in rows:
print_table_row(key, value, HEADER_COLOR, 0, 0)
rows = [
("Genres", genres),
("Format", format_type),
]
print_rule(SEPARATOR_COLOR)
for key, value in rows:
print_table_row(key, value, HEADER_COLOR, 0, 0)
rows = [
("Start Date", start_date),
("End Date", end_date),
]
print_rule(SEPARATOR_COLOR)
for key, value in rows:
print_table_row(key, value, HEADER_COLOR, 0, 0)
rows = [
("Studios", studios),
]
print_rule(SEPARATOR_COLOR)
for key, value in rows:
print_table_row(key, value, HEADER_COLOR, 0, 0)
rows = [
("Synonyms", synonyms),
]
print_rule(SEPARATOR_COLOR)
for key, value in rows:
print_table_row(key, value, HEADER_COLOR, 0, 0)
print_rule(SEPARATOR_COLOR)
print(wrap_text(description, term_width))
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
except Exception as e:
print(f"Preview Error: {e}", file=sys.stderr)

View File

@@ -1,31 +0,0 @@
#!/bin/sh
#
# Episode Preview Info Script Template
# This script formats and displays episode information in the FZF preview pane.
# Some values are injected by python those with '{name}' syntax using .replace()
draw_rule
echo "{TITLE}" | fold -s -w "$WIDTH"
draw_rule
print_kv "Duration" "{DURATION}"
print_kv "Status" "{STATUS}"
draw_rule
print_kv "Total Episodes" "{EPISODES}"
print_kv "Next Episode" "{NEXT_EPISODE}"
draw_rule
print_kv "Progress" "{USER_PROGRESS}"
print_kv "List Status" "{USER_STATUS}"
draw_rule
print_kv "Start Date" "{START_DATE}"
print_kv "End Date" "{END_DATE}"
draw_rule

View File

@@ -0,0 +1,49 @@
import sys
from _ansi_utils import print_rule, print_table_row, get_terminal_width
HEADER_COLOR = sys.argv[1]
SEPARATOR_COLOR = sys.argv[2]
# Get terminal dimensions
term_width = get_terminal_width()
# Print title centered
print("{TITLE}".center(term_width))
rows = [
("Duration", "{DURATION}"),
("Status", "{STATUS}"),
]
print_rule(SEPARATOR_COLOR)
for key, value in rows:
print_table_row(key, value, HEADER_COLOR, 15, term_width - 20)
rows = [
("Total Episodes", "{EPISODES}"),
("Next Episode", "{NEXT_EPISODE}"),
]
print_rule(SEPARATOR_COLOR)
for key, value in rows:
print_table_row(key, value, HEADER_COLOR, 15, term_width - 20)
rows = [
("Progress", "{USER_PROGRESS}"),
("List Status", "{USER_STATUS}"),
]
print_rule(SEPARATOR_COLOR)
for key, value in rows:
print_table_row(key, value, HEADER_COLOR, 15, term_width - 20)
rows = [
("Start Date", "{START_DATE}"),
("End Date", "{END_DATE}"),
]
print_rule(SEPARATOR_COLOR)
for key, value in rows:
print_table_row(key, value, HEADER_COLOR, 15, term_width - 20)
print_rule(SEPARATOR_COLOR)

View File

@@ -1,54 +0,0 @@
#!/bin/sh
#
# Viu Preview Info Script Template
# This script formats and displays the textual information in the FZF preview pane.
# Some values are injected by python those with '{name}' syntax using .replace()
draw_rule
print_kv "Title" "{TITLE}"
draw_rule
# Emojis take up double the space
score_multiplier=1
if ! [ "{SCORE}" = "N/A" ]; then
score_multiplier=2
fi
print_kv "Score" "{SCORE}" $score_multiplier
print_kv "Favourites" "{FAVOURITES}"
print_kv "Popularity" "{POPULARITY}"
print_kv "Status" "{STATUS}"
draw_rule
print_kv "Episodes" "{EPISODES}"
print_kv "Next Episode" "{NEXT_EPISODE}"
print_kv "Duration" "{DURATION}"
draw_rule
print_kv "Genres" "{GENRES}"
print_kv "Format" "{FORMAT}"
draw_rule
print_kv "List Status" "{USER_STATUS}"
print_kv "Progress" "{USER_PROGRESS}"
draw_rule
print_kv "Start Date" "{START_DATE}"
print_kv "End Date" "{END_DATE}"
draw_rule
print_kv "Studios" "{STUDIOS}"
print_kv "Synonymns" "{SYNONYMNS}"
print_kv "Tags" "{TAGS}"
draw_rule
# Synopsis
echo "{SYNOPSIS}" | fold -s -w "$WIDTH"

View File

@@ -0,0 +1,93 @@
import sys
from _ansi_utils import (
print_rule,
print_table_row,
strip_markdown,
wrap_text,
get_terminal_width,
)
HEADER_COLOR = sys.argv[1]
SEPARATOR_COLOR = sys.argv[2]
# Get terminal dimensions
term_width = get_terminal_width()
# Print title centered
print("{TITLE}".center(term_width))
# Define table data
rows = [
("Score", "{SCORE}"),
("Favorites", "{FAVOURITES}"),
("Popularity", "{POPULARITY}"),
("Status", "{STATUS}"),
]
print_rule(SEPARATOR_COLOR)
for key, value in rows:
print_table_row(key, value, HEADER_COLOR, 15, term_width - 20)
rows = [
("Episodes", "{EPISODES}"),
("Duration", "{DURATION}"),
("Next Episode", "{NEXT_EPISODE}"),
]
print_rule(SEPARATOR_COLOR)
for key, value in rows:
print_table_row(key, value, HEADER_COLOR, 15, term_width - 20)
rows = [
("Genres", "{GENRES}"),
("Format", "{FORMAT}"),
]
print_rule(SEPARATOR_COLOR)
for key, value in rows:
print_table_row(key, value, HEADER_COLOR, 15, term_width - 20)
rows = [
("List Status", "{USER_STATUS}"),
("Progress", "{USER_PROGRESS}"),
]
print_rule(SEPARATOR_COLOR)
for key, value in rows:
print_table_row(key, value, HEADER_COLOR, 15, term_width - 20)
rows = [
("Start Date", "{START_DATE}"),
("End Date", "{END_DATE}"),
]
print_rule(SEPARATOR_COLOR)
for key, value in rows:
print_table_row(key, value, HEADER_COLOR, 15, term_width - 20)
rows = [
("Studios", "{STUDIOS}"),
]
print_rule(SEPARATOR_COLOR)
for key, value in rows:
print_table_row(key, value, HEADER_COLOR, 15, term_width - 20)
rows = [
("Synonyms", "{SYNONYMNS}"),
]
print_rule(SEPARATOR_COLOR)
for key, value in rows:
print_table_row(key, value, HEADER_COLOR, 15, term_width - 20)
rows = [
("Tags", "{TAGS}"),
]
print_rule(SEPARATOR_COLOR)
for key, value in rows:
print_table_row(key, value, HEADER_COLOR, 15, term_width - 20)
print_rule(SEPARATOR_COLOR)
print(wrap_text(strip_markdown("""{SYNOPSIS}"""), term_width))

View File

@@ -0,0 +1,288 @@
#!/usr/bin/env python3
#
# FZF Preview Script Template
#
# This script is a template. The placeholders in curly braces, like {NAME}
# are dynamically filled by python using .replace() during runtime.
import os
import shutil
import subprocess
import sys
from hashlib import sha256
from pathlib import Path
# --- Template Variables (Injected by Python) ---
PREVIEW_MODE = "{PREVIEW_MODE}"
IMAGE_CACHE_DIR = Path("{IMAGE_CACHE_DIR}")
INFO_CACHE_DIR = Path("{INFO_CACHE_DIR}")
IMAGE_RENDERER = "{IMAGE_RENDERER}"
HEADER_COLOR = "{HEADER_COLOR}"
SEPARATOR_COLOR = "{SEPARATOR_COLOR}"
PREFIX = "{PREFIX}"
SCALE_UP = "{SCALE_UP}" == "True"
# --- Arguments ---
# sys.argv[1] is usually the raw line from FZF (the anime title/key)
TITLE = sys.argv[1] if len(sys.argv) > 1 else ""
KEY = """{KEY}"""
KEY = KEY + "-" if KEY else KEY
# Generate the hash to find the cached files
hash_id = f"{PREFIX}-{sha256((KEY + TITLE).encode('utf-8')).hexdigest()}"
def get_terminal_dimensions():
"""
Determine the available dimensions (cols x lines) for the preview window.
Prioritizes FZF environment variables.
"""
fzf_cols = os.environ.get("FZF_PREVIEW_COLUMNS")
fzf_lines = os.environ.get("FZF_PREVIEW_LINES")
if fzf_cols and fzf_lines:
return int(fzf_cols), int(fzf_lines)
# Fallback to stty if FZF vars aren't set (unlikely in preview)
try:
rows, cols = (
subprocess.check_output(
["stty", "size"], text=True, stderr=subprocess.DEVNULL
)
.strip()
.split()
)
return int(cols), int(rows)
except Exception:
return 80, 24
def which(cmd):
"""Alias for shutil.which"""
return shutil.which(cmd)
def render_kitty(file_path, width, height, scale_up):
"""Render using the Kitty Graphics Protocol (kitten/icat)."""
# 1. Try 'kitten icat' (Modern)
# 2. Try 'icat' (Legacy/Alias)
# 3. Try 'kitty +kitten icat' (Fallback)
cmd = []
if which("kitten"):
cmd = ["kitten", "icat"]
elif which("icat"):
cmd = ["icat"]
elif which("kitty"):
cmd = ["kitty", "+kitten", "icat"]
if not cmd:
return False
# Build Arguments
args = [
"--clear",
"--transfer-mode=memory",
"--unicode-placeholder",
"--stdin=no",
f"--place={width}x{height}@0x0",
]
if scale_up:
args.append("--scale-up")
args.append(file_path)
subprocess.run(cmd + args, stdout=sys.stdout, stderr=sys.stderr)
return True
def render_sixel(file_path, width, height):
"""
Render using Sixel.
Prioritizes 'chafa' for Sixel as it handles text-cell sizing better than img2sixel.
"""
# Option A: Chafa (Best for Sixel sizing)
if which("chafa"):
# Chafa automatically detects Sixel support if terminal reports it,
# but we force it here if specifically requested via logic flow.
subprocess.run(
["chafa", "-f", "sixel", "-s", f"{width}x{height}", file_path],
stdout=sys.stdout,
stderr=sys.stderr,
)
return True
# Option B: img2sixel (Libsixel)
# Note: img2sixel uses pixels, not cells. We estimate 1 cell ~= 10px width, 20px height
if which("img2sixel"):
pixel_width = width * 10
pixel_height = height * 20
subprocess.run(
[
"img2sixel",
f"--width={pixel_width}",
f"--height={pixel_height}",
file_path,
],
stdout=sys.stdout,
stderr=sys.stderr,
)
return True
return False
def render_iterm(file_path, width, height):
"""Render using iTerm2 Inline Image Protocol."""
if which("imgcat"):
subprocess.run(
["imgcat", "-W", str(width), "-H", str(height), file_path],
stdout=sys.stdout,
stderr=sys.stderr,
)
return True
# Chafa also supports iTerm
if which("chafa"):
subprocess.run(
["chafa", "-f", "iterm", "-s", f"{width}x{height}", file_path],
stdout=sys.stdout,
stderr=sys.stderr,
)
return True
return False
def render_timg(file_path, width, height):
"""Render using timg (supports half-blocks, quarter-blocks, sixel, kitty, etc)."""
if which("timg"):
subprocess.run(
["timg", f"-g{width}x{height}", "--upscale", file_path],
stdout=sys.stdout,
stderr=sys.stderr,
)
return True
return False
def render_chafa_auto(file_path, width, height):
"""
Render using Chafa in auto mode.
It supports Sixel, Kitty, iTerm, and various unicode block modes.
"""
if which("chafa"):
subprocess.run(
["chafa", "-s", f"{width}x{height}", file_path],
stdout=sys.stdout,
stderr=sys.stderr,
)
return True
return False
def fzf_image_preview(file_path: str):
"""
Main dispatch function to choose the best renderer.
"""
cols, lines = get_terminal_dimensions()
# Heuristic: Reserve 1 line for prompt/status if needed, though FZF handles this.
# Some renderers behave better with a tiny bit of padding.
width = cols
height = lines
# --- 1. Check Explicit Configuration ---
if IMAGE_RENDERER == "icat" or IMAGE_RENDERER == "system-kitty":
if render_kitty(file_path, width, height, SCALE_UP):
return
elif IMAGE_RENDERER == "sixel" or IMAGE_RENDERER == "system-sixels":
if render_sixel(file_path, width, height):
return
elif IMAGE_RENDERER == "imgcat":
if render_iterm(file_path, width, height):
return
elif IMAGE_RENDERER == "timg":
if render_timg(file_path, width, height):
return
elif IMAGE_RENDERER == "chafa":
if render_chafa_auto(file_path, width, height):
return
# --- 2. Auto-Detection / Fallback Strategy ---
# If explicit failed or set to 'auto'/'system-default', try detecting environment
# Ghostty / Kitty Environment
if os.environ.get("KITTY_WINDOW_ID") or os.environ.get("GHOSTTY_BIN_DIR"):
if render_kitty(file_path, width, height, SCALE_UP):
return
# iTerm Environment
if os.environ.get("TERM_PROGRAM") == "iTerm.app":
if render_iterm(file_path, width, height):
return
# Try standard tools in order of quality/preference
if render_kitty(file_path, width, height, SCALE_UP):
return # Try kitty just in case
if render_sixel(file_path, width, height):
return
if render_timg(file_path, width, height):
return
if render_chafa_auto(file_path, width, height):
return
print("⚠️ No suitable image renderer found (icat, chafa, timg, img2sixel).")
def fzf_text_info_render():
"""Renders the text-based info via the cached python script."""
# Get terminal dimensions from FZF environment or fallback
cols, lines = get_terminal_dimensions()
# Print simple separator line with proper width
r, g, b = map(int, SEPARATOR_COLOR.split(","))
separator = f"\x1b[38;2;{r};{g};{b}m" + ("" * cols) + "\x1b[0m"
print(separator, flush=True)
if PREVIEW_MODE == "text" or PREVIEW_MODE == "full":
preview_info_path = INFO_CACHE_DIR / f"{hash_id}.py"
if preview_info_path.exists():
subprocess.run(
[sys.executable, str(preview_info_path), HEADER_COLOR, SEPARATOR_COLOR]
)
else:
# Print dim text
print("\x1b[2m📝 Loading details...\x1b[0m")
def main():
# 1. Image Preview
if (PREVIEW_MODE == "image" or PREVIEW_MODE == "full") and (
PREFIX not in ("character", "review", "airing-schedule")
):
preview_image_path = IMAGE_CACHE_DIR / f"{hash_id}.png"
if preview_image_path.exists():
fzf_image_preview(str(preview_image_path))
print() # Spacer
else:
print("🖼️ Loading image...")
# 2. Text Info Preview
fzf_text_info_render()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
except Exception as e:
print(f"Preview Error: {e}")

View File

@@ -1,147 +0,0 @@
#!/bin/sh
#
# FZF Preview Script Template
#
# This script is a template. The placeholders in curly braces, like {NAME}
# are dynamically filled by python using .replace()
WIDTH=${FZF_PREVIEW_COLUMNS:-80} # Set a fallback width of 80
IMAGE_RENDERER="{IMAGE_RENDERER}"
generate_sha256() {
local input
# Check if input is passed as an argument or piped
if [ -n "$1" ]; then
input="$1"
else
input=$(cat)
fi
if command -v sha256sum &>/dev/null; then
echo -n "$input" | sha256sum | awk '{print $1}'
elif command -v shasum &>/dev/null; then
echo -n "$input" | shasum -a 256 | awk '{print $1}'
elif command -v sha256 &>/dev/null; then
echo -n "$input" | sha256 | awk '{print $1}'
elif command -v openssl &>/dev/null; then
echo -n "$input" | openssl dgst -sha256 | awk '{print $2}'
else
echo -n "$input" | base64 | tr '/+' '_-' | tr -d '\n'
fi
}
fzf_preview() {
file=$1
dim=${FZF_PREVIEW_COLUMNS}x${FZF_PREVIEW_LINES}
if [ "$dim" = x ]; then
dim=$(stty size </dev/tty | awk "{print \$2 \"x\" \$1}")
fi
if ! [ "$IMAGE_RENDERER" = "icat" ] && [ -z "$KITTY_WINDOW_ID" ] && [ "$((FZF_PREVIEW_TOP + FZF_PREVIEW_LINES))" -eq "$(stty size </dev/tty | awk "{print \$1}")" ]; then
dim=${FZF_PREVIEW_COLUMNS}x$((FZF_PREVIEW_LINES - 1))
fi
if [ "$IMAGE_RENDERER" = "icat" ] && [ -z "$GHOSTTY_BIN_DIR" ]; then
if command -v kitten >/dev/null 2>&1; then
kitten icat --clear --transfer-mode=memory --unicode-placeholder{SCALE_UP} --stdin=no --place="$dim@0x0" "$file" | sed "\$d" | sed "$(printf "\$s/\$/\033[m/")"
elif command -v icat >/dev/null 2>&1; then
icat --clear --transfer-mode=memory --unicode-placeholder{SCALE_UP} --stdin=no --place="$dim@0x0" "$file" | sed "\$d" | sed "$(printf "\$s/\$/\033[m/")"
else
kitty icat --clear --transfer-mode=memory --unicode-placeholder{SCALE_UP} --stdin=no --place="$dim@0x0" "$file" | sed "\$d" | sed "$(printf "\$s/\$/\033[m/")"
fi
elif [ -n "$GHOSTTY_BIN_DIR" ]; then
dim=$((FZF_PREVIEW_COLUMNS - 1))x${FZF_PREVIEW_LINES}
if command -v kitten >/dev/null 2>&1; then
kitten icat --clear --transfer-mode=memory --unicode-placeholder{SCALE_UP} --stdin=no --place="$dim@0x0" "$file" | sed "\$d" | sed "$(printf "\$s/\$/\033[m/")"
elif command -v icat >/dev/null 2>&1; then
icat --clear --transfer-mode=memory --unicode-placeholder{SCALE_UP} --stdin=no --place="$dim@0x0" "$file" | sed "\$d" | sed "$(printf "\$s/\$/\033[m/")"
else
chafa -s "$dim" "$file"
fi
elif command -v chafa >/dev/null 2>&1; then
case "$PLATFORM" in
android) chafa -s "$dim" "$file" ;;
windows) chafa -f sixel -s "$dim" "$file" ;;
*) chafa -s "$dim" "$file" ;;
esac
echo
elif command -v imgcat >/dev/null; then
imgcat -W "${dim%%x*}" -H "${dim##*x}" "$file"
else
echo please install a terminal image viewer
echo either icat for kitty terminal and wezterm or imgcat or chafa
fi
}
# --- Helper function for printing a key-value pair, aligning the value to the right ---
print_kv() {
local key="$1"
local value="$2"
local key_len=${#key}
local value_len=${#value}
local multiplier="${3:-1}"
# Correctly calculate padding by accounting for the key, the ": ", and the value.
local padding_len=$((WIDTH - key_len - 2 - value_len * multiplier))
# If the text is too long to fit, just add a single space for separation.
if [ "$padding_len" -lt 1 ]; then
padding_len=1
value=$(echo "$value"| fold -s -w "$((WIDTH - key_len - 3))")
printf "{C_KEY}%s:{RESET}%*s%s\\n" "$key" "$padding_len" "" " $value"
else
printf "{C_KEY}%s:{RESET}%*s%s\\n" "$key" "$padding_len" "" " $value"
fi
}
# --- Draw a rule across the screen ---
# TODO: figure out why this method does not work in fzf
draw_rule() {
local rule
# Generate the line of '─' characters, removing the trailing newline `tr` adds.
rule=$(printf '%*s' "$WIDTH" | tr ' ' '─' | tr -d '\n')
# Print the rule with colors and a single, clean newline.
printf "{C_RULE}%s{RESET}\\n" "$rule"
}
draw_rule(){
ll=2
while [ $ll -le $FZF_PREVIEW_COLUMNS ];do
echo -n -e "{C_RULE}─{RESET}"
((ll++))
done
echo
}
# Generate the same cache key that the Python worker uses
# {PREFIX} is used only on episode previews to make sure they are unique
title={}
hash=$(generate_sha256 "{PREFIX}$title")
#
# --- Display image if configured and the cached file exists ---
#
if [ "{PREVIEW_MODE}" = "full" ] || [ "{PREVIEW_MODE}" = "image" ]; then
image_file="{IMAGE_CACHE_PATH}{PATH_SEP}$hash.png"
if [ -f "$image_file" ]; then
fzf_preview "$image_file"
else
echo "🖼️ Loading image..."
fi
echo # Add a newline for spacing
fi
# Display text info if configured and the cached file exists
if [ "{PREVIEW_MODE}" = "full" ] || [ "{PREVIEW_MODE}" = "text" ]; then
info_file="{INFO_CACHE_PATH}{PATH_SEP}$hash"
if [ -f "$info_file" ]; then
source "$info_file"
else
echo "📝 Loading details..."
fi
fi

View File

@@ -1,19 +0,0 @@
#!/bin/sh
#
# Viu Review Info Script Template
# This script formats and displays review details in the FZF preview pane.
# Python injects the actual data values into the placeholders.
draw_rule
print_kv "Review By" "{REVIEWER_NAME}"
draw_rule
print_kv "Summary" "{REVIEW_SUMMARY}"
draw_rule
echo "{REVIEW_BODY}" | fold -s -w "$WIDTH"
draw_rule

View File

@@ -1,75 +0,0 @@
#!/bin/sh
#
# FZF Preview Script Template
#
# This script is a template. The placeholders in curly braces, like {NAME}
# are dynamically filled by python using .replace()
WIDTH=${FZF_PREVIEW_COLUMNS:-80} # Set a fallback width of 80
IMAGE_RENDERER="{IMAGE_RENDERER}"
generate_sha256() {
local input
# Check if input is passed as an argument or piped
if [ -n "$1" ]; then
input="$1"
else
input=$(cat)
fi
if command -v sha256sum &>/dev/null; then
echo -n "$input" | sha256sum | awk '{print $1}'
elif command -v shasum &>/dev/null; then
echo -n "$input" | shasum -a 256 | awk '{print $1}'
elif command -v sha256 &>/dev/null; then
echo -n "$input" | sha256 | awk '{print $1}'
elif command -v openssl &>/dev/null; then
echo -n "$input" | openssl dgst -sha256 | awk '{print $2}'
else
echo -n "$input" | base64 | tr '/+' '_-' | tr -d '\n'
fi
}
print_kv() {
local key="$1"
local value="$2"
local key_len=${#key}
local value_len=${#value}
local multiplier="${3:-1}"
# Correctly calculate padding by accounting for the key, the ": ", and the value.
local padding_len=$((WIDTH - key_len - 2 - value_len * multiplier))
# If the text is too long to fit, just add a single space for separation.
if [ "$padding_len" -lt 1 ]; then
padding_len=1
value=$(echo $value| fold -s -w "$((WIDTH - key_len - 3))")
printf "{C_KEY}%s:{RESET}%*s%s\\n" "$key" "$padding_len" "" " $value"
else
printf "{C_KEY}%s:{RESET}%*s%s\\n" "$key" "$padding_len" "" " $value"
fi
}
draw_rule(){
ll=2
while [ $ll -le $FZF_PREVIEW_COLUMNS ];do
echo -n -e "{C_RULE}─{RESET}"
((ll++))
done
echo
}
title={}
hash=$(generate_sha256 "$title")
if [ "{PREVIEW_MODE}" = "full" ] || [ "{PREVIEW_MODE}" = "text" ]; then
info_file="{INFO_CACHE_DIR}{PATH_SEP}$hash"
if [ -f "$info_file" ]; then
source "$info_file"
else
echo "📝 Loading details..."
fi
fi

View File

@@ -0,0 +1,28 @@
import sys
from _ansi_utils import (
print_rule,
print_table_row,
strip_markdown,
wrap_text,
get_terminal_width,
)
HEADER_COLOR = sys.argv[1]
SEPARATOR_COLOR = sys.argv[2]
# Get terminal dimensions
term_width = get_terminal_width()
# Print title centered
print("{REVIEWER_NAME}".center(term_width))
rows = [
("Summary", "{REVIEW_SUMMARY}"),
]
print_rule(SEPARATOR_COLOR)
for key, value in rows:
print_table_row(key, value, HEADER_COLOR, 15, term_width - 20)
print_rule(SEPARATOR_COLOR)
print(wrap_text(strip_markdown("""{REVIEW_BODY}"""), term_width))

View File

@@ -0,0 +1,145 @@
#!/usr/bin/env python3
#
# FZF Dynamic Search Script Template
#
# This script is a template for dynamic search functionality in fzf.
# The placeholders in curly braces, like {GRAPHQL_ENDPOINT} are dynamically
# filled by Python using .replace() during runtime.
import json
import sys
from pathlib import Path
from urllib import request
from urllib.error import URLError
# --- Template Variables (Injected by Python) ---
GRAPHQL_ENDPOINT = "{GRAPHQL_ENDPOINT}"
SEARCH_RESULTS_FILE = Path("{SEARCH_RESULTS_FILE}")
AUTH_HEADER = "{AUTH_HEADER}"
# The GraphQL query is injected as a properly escaped JSON string
GRAPHQL_QUERY = "{GRAPHQL_QUERY}"
# --- Get Query from fzf ---
# fzf passes the current query as the first argument when using --bind change:reload
QUERY = sys.argv[1] if len(sys.argv) > 1 else ""
# If query is empty, exit with empty results
if not QUERY.strip():
print("")
sys.exit(0)
def make_graphql_request(
endpoint: str, query: str, variables: dict, auth_token: str = ""
) -> dict | None:
"""
Make a GraphQL request to the specified endpoint.
Args:
endpoint: GraphQL API endpoint URL
query: GraphQL query string
variables: Query variables as a dictionary
auth_token: Optional authorization token (Bearer token)
Returns:
Response JSON as a dictionary, or None if request fails
"""
payload = {"query": query, "variables": variables}
headers = {"Content-Type": "application/json", "User-Agent": "viu/1.0"}
if auth_token:
headers["Authorization"] = auth_token
try:
req = request.Request(
endpoint,
data=json.dumps(payload).encode("utf-8"),
headers=headers,
method="POST",
)
with request.urlopen(req, timeout=10) as response:
return json.loads(response.read().decode("utf-8"))
except (URLError, json.JSONDecodeError, Exception) as e:
print(f"❌ Request failed: {e}", file=sys.stderr)
return None
def extract_title(media_item: dict) -> str:
"""
Extract the best available title from a media item.
Args:
media_item: Media object from GraphQL response
Returns:
Title string (english > romaji > native > "Unknown")
"""
title_obj = media_item.get("title", {})
return (
title_obj.get("english")
or title_obj.get("romaji")
or title_obj.get("native")
or "Unknown"
)
def main():
# Ensure parent directory exists
SEARCH_RESULTS_FILE.parent.mkdir(parents=True, exist_ok=True)
# Create GraphQL variables
variables = {
"query": QUERY,
"type": "ANIME",
"per_page": 50,
"genre_not_in": ["Hentai"],
}
# Make the GraphQL request
response = make_graphql_request(
GRAPHQL_ENDPOINT, GRAPHQL_QUERY, variables, AUTH_HEADER
)
if response is None:
print("❌ Search failed")
sys.exit(1)
# Save the raw response for later processing by dynamic_search.py
try:
with open(SEARCH_RESULTS_FILE, "w", encoding="utf-8") as f:
json.dump(response, f, ensure_ascii=False, indent=2)
except IOError as e:
print(f"❌ Failed to save results: {e}", file=sys.stderr)
sys.exit(1)
# Parse and display results
if "errors" in response:
print(f"❌ Search error: {response['errors']}")
sys.exit(1)
# Navigate the response structure
data = response.get("data", {})
page = data.get("Page", {})
media_list = page.get("media", [])
if not media_list:
print("❌ No results found")
sys.exit(0)
# Output titles for fzf (one per line)
for media in media_list:
title = extract_title(media)
print(title)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
sys.exit(0)
except Exception as e:
print(f"❌ Unexpected error: {e}", file=sys.stderr)
sys.exit(1)

View File

@@ -1,118 +0,0 @@
#!/bin/bash
#
# FZF Dynamic Search Script Template
#
# This script is a template for dynamic search functionality in fzf.
# The placeholders in curly braces, like {QUERY} are dynamically filled by Python using .replace()
# Configuration variables (injected by Python)
GRAPHQL_ENDPOINT="{GRAPHQL_ENDPOINT}"
CACHE_DIR="{CACHE_DIR}"
SEARCH_RESULTS_FILE="{SEARCH_RESULTS_FILE}"
AUTH_HEADER="{AUTH_HEADER}"
# Get the current query from fzf
QUERY="{{q}}"
# If query is empty, exit with empty results
if [ -z "$QUERY" ]; then
echo ""
exit 0
fi
# Create GraphQL variables
VARIABLES=$(cat <<EOF
{
"query": "$QUERY",
"type": "ANIME",
"per_page": 50,
"genre_not_in": ["Hentai"]
}
EOF
)
# The GraphQL query is injected here as a properly escaped string
GRAPHQL_QUERY='{GRAPHQL_QUERY}'
# Create the GraphQL request payload
PAYLOAD=$(cat <<EOF
{
"query": $GRAPHQL_QUERY,
"variables": $VARIABLES
}
EOF
)
# Make the GraphQL request and save raw results
if [ -n "$AUTH_HEADER" ]; then
RESPONSE=$(curl -s -X POST \
-H "Content-Type: application/json" \
-H "Authorization: $AUTH_HEADER" \
-d "$PAYLOAD" \
"$GRAPHQL_ENDPOINT")
else
RESPONSE=$(curl -s -X POST \
-H "Content-Type: application/json" \
-d "$PAYLOAD" \
"$GRAPHQL_ENDPOINT")
fi
# Check if the request was successful
if [ $? -ne 0 ] || [ -z "$RESPONSE" ]; then
echo "❌ Search failed"
exit 1
fi
# Save the raw response for later processing
echo "$RESPONSE" > "$SEARCH_RESULTS_FILE"
# Parse and display results
if command -v jq >/dev/null 2>&1; then
# Use jq for faster and more reliable JSON parsing
echo "$RESPONSE" | jq -r '
if .errors then
"❌ Search error: " + (.errors | tostring)
elif (.data.Page.media // []) | length == 0 then
"❌ No results found"
else
.data.Page.media[] | (.title.english // .title.romaji // .title.native // "Unknown")
end
' 2>/dev/null || echo "❌ Parse error"
else
# Fallback to Python for JSON parsing
echo "$RESPONSE" | python3 -c "
import json
import sys
try:
data = json.load(sys.stdin)
if 'errors' in data:
print('❌ Search error: ' + str(data['errors']))
sys.exit(1)
if 'data' not in data or 'Page' not in data['data'] or 'media' not in data['data']['Page']:
print('❌ No results found')
sys.exit(0)
media_list = data['data']['Page']['media']
if not media_list:
print('❌ No results found')
sys.exit(0)
for media in media_list:
title = media.get('title', {})
english_title = title.get('english') or title.get('romaji') or title.get('native', 'Unknown')
year = media.get('startDate', {}).get('year', 'Unknown') if media.get('startDate') else 'Unknown'
status = media.get('status', 'Unknown')
genres = ', '.join(media.get('genres', [])[:3]) or 'Unknown'
# Format: Title (Year) [Status] - Genres
print(f'{english_title} ({year}) [{status}] - {genres}')
except Exception as e:
print(f'❌ Parse error: {str(e)}')
sys.exit(1)
"
fi

View File

@@ -1,5 +1,6 @@
import json
import logging
import sys
from .....core.constants import APP_CACHE_DIR, SCRIPTS_DIR
from .....libs.media_api.params import MediaSearchParams
@@ -11,9 +12,7 @@ logger = logging.getLogger(__name__)
SEARCH_CACHE_DIR = APP_CACHE_DIR / "search"
SEARCH_RESULTS_FILE = SEARCH_CACHE_DIR / "current_search_results.json"
FZF_SCRIPTS_DIR = SCRIPTS_DIR / "fzf"
SEARCH_TEMPLATE_SCRIPT = (FZF_SCRIPTS_DIR / "search.template.sh").read_text(
encoding="utf-8"
)
SEARCH_TEMPLATE_SCRIPT = (FZF_SCRIPTS_DIR / "search.py").read_text(encoding="utf-8")
@session.menu
@@ -29,8 +28,8 @@ def dynamic_search(ctx: Context, state: State) -> State | InternalDirective:
from .....libs.media_api.anilist import gql
search_query = gql.SEARCH_MEDIA.read_text(encoding="utf-8")
# Properly escape the GraphQL query for JSON
search_query_escaped = json.dumps(search_query)
# Escape the GraphQL query as a JSON string literal for Python script
search_query_json = json.dumps(search_query).replace('"', "")
# Prepare the search script
auth_header = ""
@@ -42,8 +41,7 @@ def dynamic_search(ctx: Context, state: State) -> State | InternalDirective:
replacements = {
"GRAPHQL_ENDPOINT": "https://graphql.anilist.co",
"GRAPHQL_QUERY": search_query_escaped,
"CACHE_DIR": str(SEARCH_CACHE_DIR),
"GRAPHQL_QUERY": search_query_json,
"SEARCH_RESULTS_FILE": str(SEARCH_RESULTS_FILE),
"AUTH_HEADER": auth_header,
}
@@ -51,6 +49,14 @@ def dynamic_search(ctx: Context, state: State) -> State | InternalDirective:
for key, value in replacements.items():
search_command = search_command.replace(f"{{{key}}}", str(value))
# Write the filled template to a cache file
search_script_file = SEARCH_CACHE_DIR / "search-script.py"
search_script_file.write_text(search_command, encoding="utf-8")
# Make the search script executable by calling it with python3
# fzf will pass the query as {q} which becomes the first argument
search_command_final = f"{sys.executable} {search_script_file} {{q}}"
try:
# Prepare preview functionality
preview_command = None
@@ -62,13 +68,13 @@ def dynamic_search(ctx: Context, state: State) -> State | InternalDirective:
choice = ctx.selector.search(
prompt="Search Anime",
search_command=search_command,
search_command=search_command_final,
preview=preview_command,
)
else:
choice = ctx.selector.search(
prompt="Search Anime",
search_command=search_command,
search_command=search_command_final,
)
except NotImplementedError:
feedback.error("Dynamic search is not supported by your current selector")

View File

@@ -1,13 +1,15 @@
import logging
import os
import re
from hashlib import sha256
import sys
from typing import Dict, List, Optional
import httpx
from viu_media.core.utils import formatter
from ...core.config import AppConfig
from ...core.constants import APP_CACHE_DIR, PLATFORM, SCRIPTS_DIR
from ...core.constants import APP_CACHE_DIR, SCRIPTS_DIR
from ...core.utils.file import AtomicWriter
from ...libs.media_api.types import (
AiringScheduleResult,
@@ -15,7 +17,6 @@ from ...libs.media_api.types import (
MediaItem,
MediaReview,
)
from . import ansi
from .preview_workers import PreviewWorkerManager
@@ -117,29 +118,15 @@ def _get_episode_image(episode: str, media_item: MediaItem) -> str:
logger = logging.getLogger(__name__)
os.environ["SHELL"] = "bash"
# os.environ["SHELL"] = sys.executable
PREVIEWS_CACHE_DIR = APP_CACHE_DIR / "previews"
IMAGES_CACHE_DIR = PREVIEWS_CACHE_DIR / "images"
INFO_CACHE_DIR = PREVIEWS_CACHE_DIR / "info"
REVIEWS_CACHE_DIR = PREVIEWS_CACHE_DIR / "reviews"
CHARACTERS_CACHE_DIR = PREVIEWS_CACHE_DIR / "characters"
AIRING_SCHEDULE_CACHE_DIR = PREVIEWS_CACHE_DIR / "airing_schedule"
FZF_SCRIPTS_DIR = SCRIPTS_DIR / "fzf"
TEMPLATE_PREVIEW_SCRIPT = (FZF_SCRIPTS_DIR / "preview.template.sh").read_text(
encoding="utf-8"
)
TEMPLATE_REVIEW_PREVIEW_SCRIPT = (
FZF_SCRIPTS_DIR / "review-preview.template.sh"
).read_text(encoding="utf-8")
TEMPLATE_CHARACTER_PREVIEW_SCRIPT = (
FZF_SCRIPTS_DIR / "character-preview.template.sh"
).read_text(encoding="utf-8")
TEMPLATE_AIRING_SCHEDULE_PREVIEW_SCRIPT = (
FZF_SCRIPTS_DIR / "airing-schedule-preview.template.sh"
).read_text(encoding="utf-8")
DYNAMIC_PREVIEW_SCRIPT = (FZF_SCRIPTS_DIR / "dynamic-preview.template.sh").read_text(
TEMPLATE_PREVIEW_SCRIPT = (FZF_SCRIPTS_DIR / "preview.py").read_text(encoding="utf-8")
DYNAMIC_PREVIEW_SCRIPT = (FZF_SCRIPTS_DIR / "dynamic_preview.py").read_text(
encoding="utf-8"
)
@@ -149,6 +136,23 @@ EPISODE_PATTERN = re.compile(r"^Episode\s+(\d+)\s-\s.*")
_preview_manager: Optional[PreviewWorkerManager] = None
def _ensure_ansi_utils_in_cache():
"""Copy _ansi_utils.py to the info cache directory so cached scripts can import it."""
source = FZF_SCRIPTS_DIR / "_ansi_utils.py"
dest = INFO_CACHE_DIR / "_ansi_utils.py"
if source.exists() and (
not dest.exists() or source.stat().st_mtime > dest.stat().st_mtime
):
try:
import shutil
shutil.copy2(source, dest)
logger.debug(f"Copied _ansi_utils.py to {INFO_CACHE_DIR}")
except Exception as e:
logger.warning(f"Failed to copy _ansi_utils.py to cache: {e}")
def create_preview_context():
"""
Create a context manager for preview operations.
@@ -284,6 +288,7 @@ def get_anime_preview(
# Ensure cache directories exist on startup
IMAGES_CACHE_DIR.mkdir(parents=True, exist_ok=True)
INFO_CACHE_DIR.mkdir(parents=True, exist_ok=True)
_ensure_ansi_utils_in_cache()
HEADER_COLOR = config.fzf.preview_header_color.split(",")
SEPARATOR_COLOR = config.fzf.preview_separator_color.split(",")
@@ -300,30 +305,28 @@ def get_anime_preview(
logger.error(f"Failed to start background caching: {e}")
# Continue with script generation even if caching fails
# Prepare values to inject into the template
path_sep = "\\" if PLATFORM == "win32" else "/"
# Format the template with the dynamic values
replacements = {
"PREVIEW_MODE": config.general.preview,
"IMAGE_CACHE_PATH": str(IMAGES_CACHE_DIR),
"INFO_CACHE_PATH": str(INFO_CACHE_DIR),
"PATH_SEP": path_sep,
"IMAGE_CACHE_DIR": str(IMAGES_CACHE_DIR),
"INFO_CACHE_DIR": str(INFO_CACHE_DIR),
"IMAGE_RENDERER": config.general.image_renderer,
# Color codes
"C_TITLE": ansi.get_true_fg(HEADER_COLOR, bold=True),
"C_KEY": ansi.get_true_fg(HEADER_COLOR, bold=True),
"C_VALUE": ansi.get_true_fg(HEADER_COLOR, bold=True),
"C_RULE": ansi.get_true_fg(SEPARATOR_COLOR, bold=True),
"RESET": ansi.RESET,
"PREFIX": "",
"SCALE_UP": " --scale-up" if config.general.preview_scale_up else "",
"HEADER_COLOR": ",".join(HEADER_COLOR),
"SEPARATOR_COLOR": ",".join(SEPARATOR_COLOR),
"PREFIX": "search-result",
"KEY": "",
"SCALE_UP": str(config.general.preview_scale_up),
}
for key, value in replacements.items():
preview_script = preview_script.replace(f"{{{key}}}", value)
return preview_script
preview_file = PREVIEWS_CACHE_DIR / "search-result-preview-script.py"
preview_file.write_text(preview_script, encoding="utf-8")
preview_script_final = f"{sys.executable} {preview_file} {{}}"
return preview_script_final
def get_episode_preview(
@@ -360,30 +363,169 @@ def get_episode_preview(
logger.error(f"Failed to start episode background caching: {e}")
# Continue with script generation even if caching fails
# Prepare values to inject into the template
path_sep = "\\" if PLATFORM == "win32" else "/"
# Format the template with the dynamic values
replacements = {
"PREVIEW_MODE": config.general.preview,
"IMAGE_CACHE_PATH": str(IMAGES_CACHE_DIR),
"INFO_CACHE_PATH": str(INFO_CACHE_DIR),
"PATH_SEP": path_sep,
"IMAGE_CACHE_DIR": str(IMAGES_CACHE_DIR),
"INFO_CACHE_DIR": str(INFO_CACHE_DIR),
"IMAGE_RENDERER": config.general.image_renderer,
# Color codes
"C_TITLE": ansi.get_true_fg(HEADER_COLOR, bold=True),
"C_KEY": ansi.get_true_fg(HEADER_COLOR, bold=True),
"C_VALUE": ansi.get_true_fg(HEADER_COLOR, bold=True),
"C_RULE": ansi.get_true_fg(SEPARATOR_COLOR, bold=True),
"RESET": ansi.RESET,
"PREFIX": f"{media_item.title.english}_Episode_",
"SCALE_UP": " --scale-up" if config.general.preview_scale_up else "",
"HEADER_COLOR": ",".join(HEADER_COLOR),
"SEPARATOR_COLOR": ",".join(SEPARATOR_COLOR),
"PREFIX": "episode",
"KEY": f"{media_item.title.english.replace(formatter.DOUBLE_QUOTE, formatter.SINGLE_QUOTE)}",
"SCALE_UP": str(config.general.preview_scale_up),
}
for key, value in replacements.items():
preview_script = preview_script.replace(f"{{{key}}}", value)
return preview_script
preview_file = PREVIEWS_CACHE_DIR / "episode-preview-script.py"
preview_file.write_text(preview_script, encoding="utf-8")
preview_script_final = f"{sys.executable} {preview_file} {{}}"
return preview_script_final
def get_character_preview(choice_map: Dict[str, Character], config: AppConfig) -> str:
"""
Generate the generic loader script for character previews and start background caching.
"""
IMAGES_CACHE_DIR.mkdir(parents=True, exist_ok=True)
INFO_CACHE_DIR.mkdir(parents=True, exist_ok=True)
HEADER_COLOR = config.fzf.preview_header_color.split(",")
SEPARATOR_COLOR = config.fzf.preview_separator_color.split(",")
# Start managed background caching for episodes
try:
preview_manager = _get_preview_manager()
worker = preview_manager.get_character_worker()
worker.cache_character_previews(choice_map, config)
logger.debug("Started background caching for character previews")
except Exception as e:
logger.error(f"Failed to start episode background caching: {e}")
# Use the generic loader script
preview_script = TEMPLATE_PREVIEW_SCRIPT
replacements = {
"PREVIEW_MODE": config.general.preview,
"IMAGE_CACHE_DIR": str(IMAGES_CACHE_DIR),
"INFO_CACHE_DIR": str(INFO_CACHE_DIR),
"IMAGE_RENDERER": config.general.image_renderer,
# Color codes
"HEADER_COLOR": ",".join(HEADER_COLOR),
"SEPARATOR_COLOR": ",".join(SEPARATOR_COLOR),
"PREFIX": "character",
"KEY": "",
"SCALE_UP": str(config.general.preview_scale_up),
}
for key, value in replacements.items():
preview_script = preview_script.replace(f"{{{key}}}", value)
preview_file = PREVIEWS_CACHE_DIR / "character-preview-script.py"
preview_file.write_text(preview_script, encoding="utf-8")
preview_script_final = f"{sys.executable} {preview_file} {{}}"
return preview_script_final
def get_review_preview(choice_map: Dict[str, MediaReview], config: AppConfig) -> str:
"""
Generate the generic loader script for review previews and start background caching.
"""
IMAGES_CACHE_DIR.mkdir(parents=True, exist_ok=True)
INFO_CACHE_DIR.mkdir(parents=True, exist_ok=True)
HEADER_COLOR = config.fzf.preview_header_color.split(",")
SEPARATOR_COLOR = config.fzf.preview_separator_color.split(",")
# Start managed background caching for episodes
try:
preview_manager = _get_preview_manager()
worker = preview_manager.get_review_worker()
worker.cache_review_previews(choice_map, config)
logger.debug("Started background caching for review previews")
except Exception as e:
logger.error(f"Failed to start episode background caching: {e}")
# Use the generic loader script
preview_script = TEMPLATE_PREVIEW_SCRIPT
replacements = {
"PREVIEW_MODE": config.general.preview,
"IMAGE_CACHE_DIR": str(IMAGES_CACHE_DIR),
"INFO_CACHE_DIR": str(INFO_CACHE_DIR),
"IMAGE_RENDERER": config.general.image_renderer,
# Color codes
"HEADER_COLOR": ",".join(HEADER_COLOR),
"SEPARATOR_COLOR": ",".join(SEPARATOR_COLOR),
"PREFIX": "review",
"KEY": "",
"SCALE_UP": str(config.general.preview_scale_up),
}
for key, value in replacements.items():
preview_script = preview_script.replace(f"{{{key}}}", value)
preview_file = PREVIEWS_CACHE_DIR / "review-preview-script.py"
preview_file.write_text(preview_script, encoding="utf-8")
preview_script_final = f"{sys.executable} {preview_file} {{}}"
return preview_script_final
def get_airing_schedule_preview(
schedule_result: AiringScheduleResult, config: AppConfig, anime_title: str = "Anime"
) -> str:
"""
Generate the generic loader script for airing schedule previews and start background caching.
"""
IMAGES_CACHE_DIR.mkdir(parents=True, exist_ok=True)
INFO_CACHE_DIR.mkdir(parents=True, exist_ok=True)
HEADER_COLOR = config.fzf.preview_header_color.split(",")
SEPARATOR_COLOR = config.fzf.preview_separator_color.split(",")
# Start managed background caching for episodes
try:
preview_manager = _get_preview_manager()
worker = preview_manager.get_airing_schedule_worker()
worker.cache_airing_schedule_preview(anime_title, schedule_result, config)
logger.debug("Started background caching for airing schedule previews")
except Exception as e:
logger.error(f"Failed to start episode background caching: {e}")
# Use the generic loader script
preview_script = TEMPLATE_PREVIEW_SCRIPT
replacements = {
"PREVIEW_MODE": config.general.preview,
"IMAGE_CACHE_DIR": str(IMAGES_CACHE_DIR),
"INFO_CACHE_DIR": str(INFO_CACHE_DIR),
"IMAGE_RENDERER": config.general.image_renderer,
# Color codes
"HEADER_COLOR": ",".join(HEADER_COLOR),
"SEPARATOR_COLOR": ",".join(SEPARATOR_COLOR),
"PREFIX": "airing-schedule",
"KEY": "",
"SCALE_UP": str(config.general.preview_scale_up),
}
for key, value in replacements.items():
preview_script = preview_script.replace(f"{{{key}}}", value)
preview_file = PREVIEWS_CACHE_DIR / "airing-schedule-preview-script.py"
preview_file.write_text(preview_script, encoding="utf-8")
# preview_script_final = f"{sys.executable} {preview_file} {{}}"
# NOTE: disabled cause not very useful
return ""
def get_dynamic_anime_preview(config: AppConfig) -> str:
@@ -393,17 +535,30 @@ def get_dynamic_anime_preview(config: AppConfig) -> str:
This is different from regular anime preview because:
1. We don't have media items upfront
2. The preview needs to work with search results as they come in
3. Preview is handled entirely in shell by parsing JSON results
3. Preview script dynamically loads data from search results JSON
Args:
config: Application configuration
Returns:
Preview script content for fzf dynamic search
Preview script command for fzf dynamic search
"""
# Ensure cache directories exist
IMAGES_CACHE_DIR.mkdir(parents=True, exist_ok=True)
INFO_CACHE_DIR.mkdir(parents=True, exist_ok=True)
source = FZF_SCRIPTS_DIR / "_ansi_utils.py"
dest = PREVIEWS_CACHE_DIR / "_ansi_utils.py"
if source.exists() and (
not dest.exists() or source.stat().st_mtime > dest.stat().st_mtime
):
try:
import shutil
shutil.copy2(source, dest)
logger.debug(f"Copied _ansi_utils.py to {INFO_CACHE_DIR}")
except Exception as e:
logger.warning(f"Failed to copy _ansi_utils.py to cache: {e}")
HEADER_COLOR = config.fzf.preview_header_color.split(",")
SEPARATOR_COLOR = config.fzf.preview_separator_color.split(",")
@@ -414,39 +569,34 @@ def get_dynamic_anime_preview(config: AppConfig) -> str:
search_cache_dir = APP_CACHE_DIR / "search"
search_results_file = search_cache_dir / "current_search_results.json"
# Prepare values to inject into the template
path_sep = "\\" if PLATFORM == "win32" else "/"
# Format the template with the dynamic values
# Prepare replacements for the template
replacements = {
"PREVIEW_MODE": config.general.preview,
"IMAGE_CACHE_PATH": str(IMAGES_CACHE_DIR),
"INFO_CACHE_PATH": str(INFO_CACHE_DIR),
"PATH_SEP": path_sep,
"IMAGE_RENDERER": config.general.image_renderer,
"SEARCH_RESULTS_FILE": str(search_results_file),
# Color codes
"C_TITLE": ansi.get_true_fg(HEADER_COLOR, bold=True),
"C_KEY": ansi.get_true_fg(HEADER_COLOR, bold=True),
"C_VALUE": ansi.get_true_fg(HEADER_COLOR, bold=True),
"C_RULE": ansi.get_true_fg(SEPARATOR_COLOR, bold=True),
"RESET": ansi.RESET,
"SCALE_UP": " --scale-up" if config.general.preview_scale_up else "",
"IMAGE_CACHE_DIR": str(IMAGES_CACHE_DIR),
"PREVIEW_MODE": config.general.preview,
"IMAGE_RENDERER": config.general.image_renderer,
"HEADER_COLOR": ",".join(HEADER_COLOR),
"SEPARATOR_COLOR": ",".join(SEPARATOR_COLOR),
"SCALE_UP": str(config.general.preview_scale_up),
}
for key, value in replacements.items():
preview_script = preview_script.replace(f"{{{key}}}", value)
return preview_script
# Write the preview script to cache
preview_file = PREVIEWS_CACHE_DIR / "dynamic-search-preview-script.py"
preview_file.write_text(preview_script, encoding="utf-8")
# Return the command to execute the preview script
preview_script_final = f"{sys.executable} {preview_file} {{}}"
return preview_script_final
def _get_preview_manager() -> PreviewWorkerManager:
"""Get or create the global preview worker manager."""
global _preview_manager
if _preview_manager is None:
_preview_manager = PreviewWorkerManager(
IMAGES_CACHE_DIR, INFO_CACHE_DIR, REVIEWS_CACHE_DIR
)
_preview_manager = PreviewWorkerManager(IMAGES_CACHE_DIR, INFO_CACHE_DIR)
return _preview_manager
@@ -470,111 +620,3 @@ def get_preview_worker_status() -> dict:
if _preview_manager:
return _preview_manager.get_status()
return {"preview_worker": None, "episode_worker": None}
def get_review_preview(choice_map: Dict[str, MediaReview], config: AppConfig) -> str:
"""
Generate the generic loader script for review previews and start background caching.
"""
REVIEWS_CACHE_DIR.mkdir(parents=True, exist_ok=True)
preview_manager = _get_preview_manager()
worker = preview_manager.get_review_worker()
worker.cache_review_previews(choice_map, config)
logger.debug("Started background caching for review previews")
# Use the generic loader script
preview_script = TEMPLATE_REVIEW_PREVIEW_SCRIPT
path_sep = "\\" if PLATFORM == "win32" else "/"
# Inject the correct cache path and color codes
replacements = {
"PREVIEW_MODE": config.general.preview,
"INFO_CACHE_DIR": str(REVIEWS_CACHE_DIR),
"PATH_SEP": path_sep,
"C_TITLE": ansi.get_true_fg(config.fzf.header_color.split(","), bold=True),
"C_KEY": ansi.get_true_fg(config.fzf.header_color.split(","), bold=True),
"C_VALUE": ansi.get_true_fg(config.fzf.header_color.split(","), bold=True),
"C_RULE": ansi.get_true_fg(
config.fzf.preview_separator_color.split(","), bold=True
),
"RESET": ansi.RESET,
}
for key, value in replacements.items():
preview_script = preview_script.replace(f"{{{key}}}", value)
return preview_script
def get_character_preview(choice_map: Dict[str, Character], config: AppConfig) -> str:
"""
Generate the generic loader script for character previews and start background caching.
"""
INFO_CACHE_DIR.mkdir(parents=True, exist_ok=True)
preview_manager = _get_preview_manager()
worker = preview_manager.get_character_worker()
worker.cache_character_previews(choice_map, config)
logger.debug("Started background caching for character previews")
# Use the generic loader script
preview_script = TEMPLATE_CHARACTER_PREVIEW_SCRIPT
path_sep = "\\" if PLATFORM == "win32" else "/"
# Inject the correct cache path and color codes
replacements = {
"PREVIEW_MODE": config.general.preview,
"INFO_CACHE_DIR": str(INFO_CACHE_DIR),
"IMAGE_CACHE_DIR": str(IMAGES_CACHE_DIR),
"PATH_SEP": path_sep,
"C_TITLE": ansi.get_true_fg(config.fzf.header_color.split(","), bold=True),
"C_KEY": ansi.get_true_fg(config.fzf.header_color.split(","), bold=True),
"C_VALUE": ansi.get_true_fg(config.fzf.header_color.split(","), bold=True),
"C_RULE": ansi.get_true_fg(
config.fzf.preview_separator_color.split(","), bold=True
),
"RESET": ansi.RESET,
}
for key, value in replacements.items():
preview_script = preview_script.replace(f"{{{key}}}", value)
return preview_script
def get_airing_schedule_preview(
schedule_result: AiringScheduleResult, config: AppConfig, anime_title: str = "Anime"
) -> str:
"""
Generate the generic loader script for airing schedule previews and start background caching.
"""
INFO_CACHE_DIR.mkdir(parents=True, exist_ok=True)
preview_manager = _get_preview_manager()
worker = preview_manager.get_airing_schedule_worker()
worker.cache_airing_schedule_preview(anime_title, schedule_result, config)
logger.debug("Started background caching for airing schedule previews")
# Use the generic loader script
preview_script = TEMPLATE_AIRING_SCHEDULE_PREVIEW_SCRIPT
path_sep = "\\" if PLATFORM == "win32" else "/"
# Inject the correct cache path and color codes
replacements = {
"PREVIEW_MODE": config.general.preview,
"INFO_CACHE_DIR": str(INFO_CACHE_DIR),
"PATH_SEP": path_sep,
"C_TITLE": ansi.get_true_fg(config.fzf.header_color.split(","), bold=True),
"C_KEY": ansi.get_true_fg(config.fzf.header_color.split(","), bold=True),
"C_VALUE": ansi.get_true_fg(config.fzf.header_color.split(","), bold=True),
"C_RULE": ansi.get_true_fg(
config.fzf.preview_separator_color.split(","), bold=True
),
"RESET": ansi.RESET,
}
for key, value in replacements.items():
preview_script = preview_script.replace(f"{{{key}}}", value)
return preview_script

View File

@@ -6,6 +6,7 @@ including image downloads and info text generation with proper lifecycle managem
"""
import logging
from pathlib import Path
from typing import Dict, List, Optional
import httpx
@@ -31,20 +32,20 @@ logger = logging.getLogger(__name__)
FZF_SCRIPTS_DIR = SCRIPTS_DIR / "fzf"
TEMPLATE_INFO_SCRIPT = (FZF_SCRIPTS_DIR / "info.template.sh").read_text(
TEMPLATE_MEDIA_INFO_SCRIPT = (FZF_SCRIPTS_DIR / "media_info.py").read_text(
encoding="utf-8"
)
TEMPLATE_EPISODE_INFO_SCRIPT = (FZF_SCRIPTS_DIR / "episode-info.template.sh").read_text(
TEMPLATE_EPISODE_INFO_SCRIPT = (FZF_SCRIPTS_DIR / "episode_info.py").read_text(
encoding="utf-8"
)
TEMPLATE_REVIEW_INFO_SCRIPT = (FZF_SCRIPTS_DIR / "review-info.template.sh").read_text(
TEMPLATE_REVIEW_INFO_SCRIPT = (FZF_SCRIPTS_DIR / "review_info.py").read_text(
encoding="utf-8"
)
TEMPLATE_CHARACTER_INFO_SCRIPT = (FZF_SCRIPTS_DIR / "character_info.py").read_text(
encoding="utf-8"
)
TEMPLATE_CHARACTER_INFO_SCRIPT = (
FZF_SCRIPTS_DIR / "character-info.template.sh"
).read_text(encoding="utf-8")
TEMPLATE_AIRING_SCHEDULE_INFO_SCRIPT = (
FZF_SCRIPTS_DIR / "airing-schedule-info.template.sh"
FZF_SCRIPTS_DIR / "airing_schedule_info.py"
).read_text(encoding="utf-8")
@@ -103,29 +104,29 @@ class PreviewCacheWorker(ManagedBackgroundWorker):
raise RuntimeError("PreviewCacheWorker is not running")
for media_item, title_str in zip(media_items, titles):
hash_id = self._get_cache_hash(title_str)
selection_title = self._get_selection_title(title_str)
# Submit image download task if needed
if config.general.preview in ("full", "image") and media_item.cover_image:
image_path = self.images_cache_dir / f"{hash_id}.png"
image_path = self.images_cache_dir / f"{selection_title}.png"
if not image_path.exists():
self.submit_function(
self._download_and_save_image,
media_item.cover_image.large,
hash_id,
selection_title,
)
# Submit info generation task if needed
if config.general.preview in ("full", "text"):
info_text = self._generate_info_text(media_item, config)
self.submit_function(self._save_info_text, info_text, hash_id)
self.submit_function(self._save_info_text, info_text, selection_title)
def _download_and_save_image(self, url: str, hash_id: str) -> None:
def _download_and_save_image(self, url: str, selection_title: str) -> None:
"""Download an image and save it to cache."""
if not self._http_client:
raise RuntimeError("HTTP client not initialized")
image_path = self.images_cache_dir / f"{hash_id}.png"
image_path = self.images_cache_dir / f"{selection_title}.png"
try:
with self._http_client.stream("GET", url) as response:
@@ -135,7 +136,7 @@ class PreviewCacheWorker(ManagedBackgroundWorker):
for chunk in response.iter_bytes():
f.write(chunk)
logger.debug(f"Successfully cached image: {hash_id}")
logger.debug(f"Successfully cached image: {selection_title}")
except Exception as e:
logger.error(f"Failed to download image {url}: {e}")
@@ -144,7 +145,7 @@ class PreviewCacheWorker(ManagedBackgroundWorker):
def _generate_info_text(self, media_item: MediaItem, config: AppConfig) -> str:
"""Generate formatted info text for a media item."""
# Import here to avoid circular imports
info_script = TEMPLATE_INFO_SCRIPT
info_script = TEMPLATE_MEDIA_INFO_SCRIPT
description = formatter.clean_html(
media_item.description or "No description available."
)
@@ -159,11 +160,13 @@ class PreviewCacheWorker(ManagedBackgroundWorker):
media_item.format.value if media_item.format else "UNKNOWN"
),
"NEXT_EPISODE": formatter.shell_safe(
f"Episode {media_item.next_airing.episode} on {formatter.format_date(media_item.next_airing.airing_at, '%A, %d %B %Y at %X)')}"
f"Episode {media_item.next_airing.episode} on {formatter.format_date(media_item.next_airing.airing_at, '%A, %d %B %Y at %X')}"
if media_item.next_airing
else "N/A"
),
"EPISODES": formatter.shell_safe(str(media_item.episodes)),
"EPISODES": formatter.shell_safe(
str(media_item.episodes) if media_item.episodes else "??"
),
"DURATION": formatter.shell_safe(
formatter.format_media_duration(media_item.duration)
),
@@ -190,7 +193,12 @@ class PreviewCacheWorker(ManagedBackgroundWorker):
)
),
"SYNONYMNS": formatter.shell_safe(
formatter.format_list_with_commas(media_item.synonymns)
formatter.format_list_with_commas(
[media_item.title.romaji] + media_item.synonymns
if media_item.title.romaji
and media_item.title.romaji not in media_item.synonymns
else media_item.synonymns
)
),
"USER_STATUS": formatter.shell_safe(
media_item.user_status.status.value
@@ -216,22 +224,22 @@ class PreviewCacheWorker(ManagedBackgroundWorker):
return info_script
def _save_info_text(self, info_text: str, hash_id: str) -> None:
def _save_info_text(self, info_text: str, selection_title: str) -> None:
"""Save info text to cache."""
try:
info_path = self.info_cache_dir / hash_id
info_path = self.info_cache_dir / f"{selection_title}.py"
with AtomicWriter(info_path) as f:
f.write(info_text)
logger.debug(f"Successfully cached info: {hash_id}")
logger.debug(f"Successfully cached info: {selection_title}")
except IOError as e:
logger.error(f"Failed to write info cache for {hash_id}: {e}")
logger.error(f"Failed to write info cache for {selection_title}: {e}")
raise
def _get_cache_hash(self, text: str) -> str:
def _get_selection_title(self, text: str) -> str:
"""Generate a cache hash for the given text."""
from hashlib import sha256
return sha256(text.encode("utf-8")).hexdigest()
return f"search-result-{sha256(text.encode('utf-8')).hexdigest()}"
def _on_task_completed(self, task: WorkerTask, future) -> None:
"""Handle task completion with enhanced logging."""
@@ -301,7 +309,7 @@ class EpisodeCacheWorker(ManagedBackgroundWorker):
for episode_str in episodes:
hash_id = self._get_cache_hash(
f"{media_item.title.english}_Episode_{episode_str}"
f"{media_item.title.english.replace(formatter.DOUBLE_QUOTE, formatter.SINGLE_QUOTE)}-{episode_str}"
)
# Find episode data
@@ -352,7 +360,7 @@ class EpisodeCacheWorker(ManagedBackgroundWorker):
replacements = {
"TITLE": formatter.shell_safe(title),
"NEXT_EPISODE": formatter.shell_safe(
f"Episode {media_item.next_airing.episode} on {formatter.format_date(media_item.next_airing.airing_at, '%A, %d %B %Y at %X)')}"
f"Episode {media_item.next_airing.episode} on {formatter.format_date(media_item.next_airing.airing_at, '%A, %d %B %Y at %X')}"
if media_item.next_airing
else "N/A"
),
@@ -385,7 +393,7 @@ class EpisodeCacheWorker(ManagedBackgroundWorker):
def _save_info_text(self, info_text: str, hash_id: str) -> None:
"""Save episode info text to cache."""
try:
info_path = self.info_cache_dir / hash_id
info_path = self.info_cache_dir / (hash_id + ".py")
with AtomicWriter(info_path) as f:
f.write(info_text)
logger.debug(f"Successfully cached episode info: {hash_id}")
@@ -397,7 +405,7 @@ class EpisodeCacheWorker(ManagedBackgroundWorker):
"""Generate a cache hash for the given text."""
from hashlib import sha256
return sha256(text.encode("utf-8")).hexdigest()
return "episode-" + sha256(text.encode("utf-8")).hexdigest()
def _on_task_completed(self, task: WorkerTask, future) -> None:
"""Handle task completion with enhanced logging."""
@@ -414,9 +422,12 @@ class ReviewCacheWorker(ManagedBackgroundWorker):
Specialized background worker for caching fully-rendered media review previews.
"""
def __init__(self, reviews_cache_dir, max_workers: int = 10):
def __init__(
self, images_cache_dir: Path, info_cache_dir: Path, max_workers: int = 10
):
super().__init__(max_workers=max_workers, name="ReviewCacheWorker")
self.reviews_cache_dir = reviews_cache_dir
self.images_cache_dir = images_cache_dir
self.info_cache_dir = info_cache_dir
def cache_review_previews(
self, choice_map: Dict[str, MediaReview], config: AppConfig
@@ -464,7 +475,7 @@ class ReviewCacheWorker(ManagedBackgroundWorker):
def _save_preview_content(self, content: str, hash_id: str) -> None:
"""Saves the final preview content to the cache."""
try:
info_path = self.reviews_cache_dir / hash_id
info_path = self.info_cache_dir / hash_id
with AtomicWriter(info_path) as f:
f.write(content)
logger.debug(f"Successfully cached review preview: {hash_id}")
@@ -475,7 +486,7 @@ class ReviewCacheWorker(ManagedBackgroundWorker):
def _get_cache_hash(self, text: str) -> str:
from hashlib import sha256
return sha256(text.encode("utf-8")).hexdigest()
return "review-" + sha256(text.encode("utf-8")).hexdigest() + ".py"
def _on_task_completed(self, task: WorkerTask, future) -> None:
super()._on_task_completed(task, future)
@@ -610,7 +621,7 @@ class CharacterCacheWorker(ManagedBackgroundWorker):
def _get_cache_hash(self, text: str) -> str:
from hashlib import sha256
return sha256(text.encode("utf-8")).hexdigest()
return "character-" + sha256(text.encode("utf-8")).hexdigest() + ".py"
def _on_task_completed(self, task: WorkerTask, future) -> None:
super()._on_task_completed(task, future)
@@ -734,7 +745,7 @@ class AiringScheduleCacheWorker(ManagedBackgroundWorker):
def _get_cache_hash(self, text: str) -> str:
from hashlib import sha256
return sha256(text.encode("utf-8")).hexdigest()
return "airing-schedule-" + sha256(text.encode("utf-8")).hexdigest() + ".py"
def _on_task_completed(self, task: WorkerTask, future) -> None:
super()._on_task_completed(task, future)
@@ -750,7 +761,7 @@ class PreviewWorkerManager:
caching workers with automatic lifecycle management.
"""
def __init__(self, images_cache_dir, info_cache_dir, reviews_cache_dir):
def __init__(self, images_cache_dir, info_cache_dir):
"""
Initialize the preview worker manager.
@@ -761,7 +772,6 @@ class PreviewWorkerManager:
"""
self.images_cache_dir = images_cache_dir
self.info_cache_dir = info_cache_dir
self.reviews_cache_dir = reviews_cache_dir
self._preview_worker: Optional[PreviewCacheWorker] = None
self._episode_worker: Optional[EpisodeCacheWorker] = None
self._review_worker: Optional[ReviewCacheWorker] = None
@@ -805,7 +815,9 @@ class PreviewWorkerManager:
# Clean up old worker
thread_manager.shutdown_worker("review_cache_worker")
self._review_worker = ReviewCacheWorker(self.reviews_cache_dir)
self._review_worker = ReviewCacheWorker(
self.images_cache_dir, self.info_cache_dir
)
self._review_worker.start()
thread_manager.register_worker("review_cache_worker", self._review_worker)

View File

@@ -178,7 +178,9 @@ class GeneralConfig(BaseModel):
description=desc.GENERAL_SCALE_PREVIEW,
)
image_renderer: Literal["icat", "chafa", "imgcat"] = Field(
image_renderer: Literal[
"icat", "chafa", "imgcat", "system-sixels", "system-kitty", "system-default"
] = Field(
default_factory=defaults.GENERAL_IMAGE_RENDERER,
description=desc.GENERAL_IMAGE_RENDERER,
)

View File

@@ -5,6 +5,8 @@ from typing import Dict, List, Optional, Union
from ...libs.media_api.types import AiringSchedule
COMMA_REGEX = re.compile(r"([0-9]{3})(?=\d)")
SINGLE_QUOTE = "'"
DOUBLE_QUOTE = '"'
def format_media_duration(total_minutes: Optional[int]) -> str: