Skip to content

Commit

Permalink
Merge branch 'development'
Browse files Browse the repository at this point in the history
  • Loading branch information
Euro20179 committed Jun 29, 2022
2 parents 9349dc5 + 1b17caa commit 4d6bb1f
Show file tree
Hide file tree
Showing 14 changed files with 755 additions and 295 deletions.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ There are only 2 required dependencies, however the rest require some configurat
| [`catimg`](https://github.com/posva/catimg) | ✅ |
| [`w3m`](https://github.com/tats/w3m) (buggy) | ❌ |
| [`imv`](https://git.sr.ht/~exec64/imv) | ✅ |
| [`mpv`](https://github.com/mpv-player/mpv) | ✅ |
| [`kitty`](https://github.com/kovidgoyal/kitty) | ✅ |
| [`swayimg`](https://github.com/artemsen/swayimg) | only on `sway` |

Expand Down
16 changes: 16 additions & 0 deletions addons/extensions/ani-url-handler
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
open_url_handler () {
urls="$(tr '\n' ' ' < "$1")"

set -f
IFS=" "
set -- $urls
[ -z "$*" ] && return 0
unset IFS

idx="$(jq -r --arg url "$1" '.[]|select(.url==$url).idx' < "$ytfzf_video_json_file")"
referrer="$(jq -r --arg url "$1" '.[]|select(.url==$url).dpage' < "$ytfzf_video_json_file")"

url_handler_opts="--vid=$idx --referrer=$referrer"

printf "%s\t" "$ytdl_pref" "$is_audio_only" "$is_detach" "$video_pref" "$audio_pref" | session_temp_dir="${session_temp_dir}" session_cache_dir="${session_cache_dir}" "$url_handler" "$@"
}
3 changes: 3 additions & 0 deletions addons/extensions/auto-play
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,9 @@

auto_play_handler () {
_url="$1"
stop_code=4
$user_url_handler "$@"
if [ $? -eq $stop_code ]; then return 0; fi
case "$_url" in
*/*) id="${_url##*=}" ;;
*) id="$_url" ;;
Expand All @@ -15,6 +17,7 @@ auto_play_handler () {
id="${id##*=}"
set -f
$user_url_handler $link
if [ $? -eq $stop_code ]; then break; fi
done
}

Expand Down
4 changes: 3 additions & 1 deletion addons/extensions/gui
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,9 @@ vars=$(MAIN_DIALOG="

while read -r line; do
case "$line" in
search*) initial_search=${line#*=} ;;
search*)
initial_search=$(printf "%s" "$line" | tr -d '"')
initial_search="${initial_search#*=}" ;;
EXIT*) break ;;
esac
done <<EOF
Expand Down
20 changes: 14 additions & 6 deletions addons/interfaces/gui
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#!/bin/sh

: "${YTFZF_INTERFACE_GUI_CSS:=$YTFZF_CONFIG_DIR/interface-gui.css}"
: "${YTFZF_GUI_CSS:=$YTFZF_CONFIG_DIR/interface-gui.css}"

gui_dialog () {
css_file="${session_temp_dir}/gtk.css"
Expand All @@ -10,6 +10,7 @@ gui_dialog () {
: > "$css_file"
fi
MAIN_DIALOG="<window><vbox scrollable=\"true\" vscrollbar-policy=\"0\">"
download_thumbnails $(get_missing_thumbnails)
while read -r line; do
url="${line##*|}"
_correct_json="$(jq -r --arg url "$url" '[.[]|select(.url==$url)]|unique_by(.ID)[0]' < "$video_json_file")"
Expand All @@ -21,6 +22,14 @@ gui_dialog () {
scraper="$(get_video_json_attr "scraper")"
duration="$(get_video_json_attr "duration")"
description="$(get_video_json_attr "description" | sed 's/\\n/\n/g')"

unset IFS

for path in "${YTFZF_CUSTOM_THUMBNAILS_DIR}/$id.jpg" "${thumb_dir}/${id}.jpg" "${YTFZF_CUSTOM_THUMBNAILS_DIR}/YTFZF:DEFAULT.jpg"; do
thumb_path="$path"
[ -f "${thumb_path}" ] && break
done

MAIN_DIALOG="$MAIN_DIALOG
<hbox>
<vbox>
Expand All @@ -41,13 +50,12 @@ gui_dialog () {
</button>
</vbox>
<pixmap name=\"Thumbnail\">
<input file>$thumb_dir/$id.jpg</input>
<width>400</width>
<input file>$thumb_path</input>
</pixmap>
</hbox>"
done
unset IFS
download_thumbnails $(get_missing_thumbnails)
MAIN_DIALOG="$MAIN_DIALOG<width>100</width><height>100</height></vbox></window>" gtkdialog --styles="$css_file"
MAIN_DIALOG="$MAIN_DIALOG<width>750</width><height>500</height></vbox></window>" gtkdialog --styles="$css_file"
}

interface_gui () {
Expand All @@ -62,6 +70,6 @@ interface_gui () {
}

on_opt_parse_i_gui_help (){
print_info "YTFZF_INTERFACE_GUI_CSS is a file for custom css for the window\nselectors:\n#TitleText\n#ViewsText\n#DateText\n#DurationText\n#UrlButton\n#Thumbnail\n"
print_info "YTFZF_GUI_CSS is a file for custom css for the window\nselectors:\n#TitleText\n#ViewsText\n#DateText\n#DurationText\n#UrlButton\n#Thumbnail\n"
exit 0
}
4 changes: 3 additions & 1 deletion addons/scrapers/ani
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
#!/bin/sh

submenu_opts="$submenu_opts --ext=ani-url-handler"

_ani_get_categories () {
#stolen from pystardust/ani-cli
sed -n 's_^[[:space:]]*<a href="/category/\([^"]*\)" title="\([^"]*\)".*_\1_p' "$1"
Expand All @@ -13,7 +15,7 @@ _ani_get_thumbnails () {
scrape_ani () {
search="$1"
print_info "If you like this, you should check out https://github.com/pystardust/ani-cli, as it is dedicated for anime!\nPlus that team does all the hard work\n"
[ "$search" = ":help" ] && print_info "Search gogoanime for an anime (currently broken)\n" && return 100
[ "$search" = ":help" ] && print_info "Search gogoanime for an anime\nYou can use the --pages-start, and --pages options to control which episodes to scrape" && return 100
output_json_file="$2"
search="$(printf "%s" "$search" | tr '[[:space:]]' '-')"
#their url could move a lot
Expand Down
94 changes: 68 additions & 26 deletions addons/scrapers/ani-category
Original file line number Diff line number Diff line change
Expand Up @@ -2,21 +2,64 @@

#stolen from pystardust/ani-cli
_decrypt_link() {
ajax_url="https://gogoplay4.com/encrypt-ajax.php"
ajax_url="https://goload.pro/encrypt-ajax.php"
id=$(printf "%s" "$1" | sed -nE 's/.*id=(.*)&title.*/\1/p')
resp=$(curl -s "$1")
secret_key=$(printf "%s" "$resp" | sed -nE 's/.*class="container-(.*)">/\1/p' | tr -d "\n" | od -A n -t x1 | tr -d " |\n")
iv=$(printf "%s" "$resp" | sed -nE 's/.*class="wrapper container-(.*)">/\1/p' | tr -d "\n" | od -A n -t x1 | tr -d " |\n")
second_key=$(printf "%s" "$resp" | sed -nE 's/.*class=".*videocontent-(.*)">/\1/p' | tr -d "\n" | od -A n -t x1 | tr -d " |\n")
token=$(printf "%s" "$resp" | sed -nE 's/.*data-value="(.*)">.*/\1/p' | base64 -d | openssl enc -d -aes256 -K "$secret_key" -iv "$iv" | sed -nE 's/.*&(token.*)/\1/p')
ajax=$(printf '%s' "$id" | openssl enc -e -aes256 -K "$secret_key" -iv "$iv" | base64)
data=$(curl -s -H "X-Requested-With:XMLHttpRequest" "${ajax_url}?id=${ajax}&alias=${id}&${token}" | sed -e 's/{"data":"//' -e 's/"}/\n/' -e 's/\\//g')
printf '%s' "$data" | base64 -d | openssl enc -d -aes256 -K "$second_key" -iv "$iv" | sed -e 's/\].*/\]/' -e 's/\\//g' |
grep -Eo 'https:\/\/[-a-zA-Z0-9@:%._\+~#=][a-zA-Z0-9][-a-zA-Z0-9@:%_\+.~#?&\/\/=]*'
}

#get the id from the url
id=$(printf "%s" "$1" | sed -nE 's/.*id=(.*)&title.*/\1/p')
_get_video_link () {
dpage_url="$1"
video_links=$(_decrypt_link "$dpage_url")
if printf '%s' "$video_links" | grep -q "mp4"; then
video_url=$(printf "%s" "$video_links" | head -n 4 | tail -n 1)
idx=1
else
video_url="$video_links"
_get_video_quality_m3u8
fi

#construct ajax parameters
secret_key='3633393736383832383733353539383139363339393838303830383230393037'
iv='34373730343738393639343138323637'
ajax=$(echo $id|openssl enc -e -aes256 -K "$secret_key" -iv "$iv" | base64)
}

#send the request to the ajax url
data=$(curl -s -H "X-Requested-With:XMLHttpRequest" "$ajax_url" -d "id=$ajax" | sed -e 's/{"data":"//' -e 's/"}/\n/' -e 's/\\//g')
_get_video_quality_mp4() {
case $quality in
best)
video_url=$(printf '%s' "$1" | head -n 4 | tail -n 1) ;;
worst)
video_url=$(printf '%s' "$1" | head -n 1) ;;
*)
video_url=$(printf '%s' "$1" | grep -i "${quality}p" | head -n 1)
if [ -z "$video_url" ]; then
err "Current video quality is not available (defaulting to best quality)"
quality=best
video_url=$(printf '%s' "$1" | head -n 4 | tail -n 1)
fi
;;
esac
printf '%s' "$video_url"
}

printf '%s' "$data" | base64 -d | openssl enc -d -aes256 -K "$secret_key" -iv "$iv" | sed -e 's/\].*/\]/' -e 's/\\//g' |
grep -Eo 'https:\/\/[-a-zA-Z0-9@:%._\+~#=][a-zA-Z0-9][-a-zA-Z0-9@:%_\+.~#?&\/\/=]*'
_get_video_quality_m3u8() {
case $quality in
worst|360)
idx=2 ;;
480)
idx=3 ;;
720)
idx=4 ;;
1080|best)
idx=5 ;;
*)
idx=5 ;;
esac
printf '%s' "$video_url" | grep -qE "gogocdn.*m3u.*" && idx=$((idx-1))
}

#stolen from pystardust/ani-cli
Expand All @@ -32,13 +75,8 @@ _get_dpage_link () {
# get the download page url
anime_id="$1"
ep_no="$2"
# credits to fork: https://github.com/Dink4n/ani-cli for the fix
for params in "-episode-$ep_no" "-$ep_no" "-episode-$ep_no-1" "-camrip-episode-$ep_no"; do
anime_page=$(curl -s "$base_url/$anime_id$params")
printf '%s' "$anime_page" | grep -q '<h1 class="entry-title">404</h1>' || break
done
printf '%s' "$anime_page" |
sed -n -E 's/.*class="active" rel="1" data-video="([^"]*)".*/\1/p' | sed 's/^/https:/g'
curl -s "https://goload.pro/videos/${anime_id}-episode-${ep_no}" | sed -nE 's_^[[:space:]]*<iframe src="([^"]*)".*_\1_p' |
sed 's/^/https:/g'
}

scrape_ani_category () {
Expand All @@ -50,19 +88,23 @@ scrape_ani_category () {
_tmp_html="${session_temp_dir}/ani-category.html"
_get_request "$base_url/category/$search" > "$_tmp_html"
episode_count="$(_ani_category_get_episodes "$_tmp_html")"

ep_start=${pages_start:-1}
[ "$pages_to_scrape" -eq 1 ] && ep_max="$episode_count" || ep_max="$((ep_start + pages_to_scrape))"
[ "$ep_max" -gt "$episode_count" ] && ep_max=$episode_count

command_exists "openssl" || die 3 "openssl is a required dependency for ani, please install it\n"
i=1
_start_series_of_threads
while [ $i -le "$episode_count" ]; do
while [ $ep_start -le "$ep_max" ]; do
{
print_info "Scraping anime episode $i\n"
_tmp_json="${session_temp_dir}/ani-category-$i.json.final"
print_info "Scraping anime episode $ep_start\n"
_tmp_json="${session_temp_dir}/ani-category-$ep_start.json.final"
#stolen from pystardust/ani-cli
dpage_link=$(_get_dpage_link "$search" "$i")
url="$(_decrypt_link "$dpage_link" | head -n 4 | tail -n 1)"
echo "[]" | jq --arg url "$url" --arg title "$search episode $i" '[{"url": $url, "title": $title, "ID": $title}]' > "$_tmp_json"
dpage_link=$(_get_dpage_link "$search" "$ep_start")
_get_video_link "$dpage_link"
echo "[]" | jq --arg idx "$idx" --arg dpage "$dpage_link" --arg url "$video_url" --arg title "$search episode $ep_start" '[{"url": $url, "title": $title, "ID": $title, "idx": $idx, "dpage": $dpage}]' > "$_tmp_json"
} &
: $((i+=1))
: $((ep_start+=1))
_thread_started "$!"
done
wait
Expand Down
47 changes: 47 additions & 0 deletions addons/scrapers/feed-url
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
#!/bin/sh

handle_link () {
link=$1
domain="${link#https://}"
domain="${domain%%/*}"
case "$domain" in
#invidious list found here: https://docs.invidious.io/instances
*youtube*|*invidious*|vid.puffyan.us|yewtu.be|inv.riverside.rocks|yt.artemislena.eu|tube.cthd.icu)
link=$(_get_real_channel_link "$1")
final="https://www.youtube.com/feeds/videos.xml?channel_id=${link##*/}"
;;
*reddit*) final="$link/.rss" ;;
esac
}

scrape_feed_url () {
IFS=" "
if [ "$1" = ":help" ]; then
printf "%s\n" "Get the rss feed for something
supported searches:
youtube/<channel-name>
<link-to-youtube-channel>
r/subreddit
<link-to-subreddit>
example searches:
youtube/pewdiepie
r/linux"
return 100
fi
for link in $1; do
case "$link" in
r/*) final="https://www.reddit.com/r/${link#r/}/.rss" ;;
youtube/*)
link="https://www.youtube.com/user/${link#youtube/}"
handle_link "$link"
;;
*)
handle_link "$link" ;;
esac
#i honestly don't even know how it's possible that non-printable characters end up here
final="$(printf "%s" "$final" | sed 's/[^[:print:]]//g')"
printf "%s\n" "$final"
_get_request "$final" -L --head --silent -f > /dev/null|| print_warning "Warning: $final, does not appear to be a real url\n"
done
exit 0
}
49 changes: 49 additions & 0 deletions addons/scrapers/yt-music
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
#!/bin/sh

_yt_music_get_playlist_json () {
jq '..|.musicResponsiveListItemRenderer?|select(.!=null)|select(..|.musicResponsiveListItemFlexColumnRenderer?.text.runs|length == 5)
| {
title: .flexColumns[0].musicResponsiveListItemFlexColumnRenderer.text.runs[0].text,
duration: .flexColumns[1].musicResponsiveListItemFlexColumnRenderer.text.runs[-1].text,
url: "https://music.youtube.com/playlist?list=\(.menu.menuRenderer.items[0].menuNavigationItemRenderer.navigationEndpoint.watchPlaylistEndpoint.playlistId)",
thumbs: .thumbnail.musicThumbnailRenderer.thumbnail.thumbnails[-1].url,
action: "scrape type=yt-music-playlist search=https://music.youtube.com/playlist?list=\(.menu.menuRenderer.items[0].menuNavigationItemRenderer.navigationEndpoint.watchPlaylistEndpoint.playlistId)",
ID: .menu.menuRenderer.items[0].menuNavigationItemRenderer.navigationEndpoint.watchPlaylistEndpoint.playlistId }' | jq '[inputs]'
}

_yt_music_get_song_json () {
jq '..|.musicResponsiveListItemRenderer?|select(.!=null)|select(..|.musicResponsiveListItemFlexColumnRenderer?.text.runs|length == 7)
| {
title: .flexColumns[0].musicResponsiveListItemFlexColumnRenderer.text.runs[0].text,
channel: .flexColumns[1].musicResponsiveListItemFlexColumnRenderer.text.runs[2].text,
views: .flexColumns[1].musicResponsiveListItemFlexColumnRenderer.text.runs[4].text,
duration: .flexColumns[1].musicResponsiveListItemFlexColumnRenderer.text.runs[-1].text,
url: "https://music.youtube.com/watch?v=\(.flexColumns[0].musicResponsiveListItemFlexColumnRenderer.text.runs[0].navigationEndpoint.watchEndpoint.videoId)",
ID: .flexColumns[0].musicResponsiveListItemFlexColumnRenderer.text.runs[0].navigationEndpoint.watchEndpoint.videoId,
thumbs: .thumbnail.musicThumbnailRenderer.thumbnail.thumbnails[-1].url}' | jq '[inputs]'
}

scrape_yt_music () {
search="$1"
output_json_file="$2"
_tmp_html="${session_temp_dir}/yt-music.html"
_tmp_json="${session_temp_dir}/yt-music.json"
url="https://music.youtube.com/search"
_get_request "$url" -G --data-urlencode "q=$search" > "$_tmp_html"

if [ -f $YTFZF_CUSTOM_SCRAPERS_DIR/yt-music-utils/convert-ascii-escape.pl ]; then
utils_path=$YTFZF_CUSTOM_SCRAPERS_DIR/yt-music-utils/convert-ascii-escape.pl
elif [ -f "$YTFZF_SYSTEM_ADDON_DIR"/scrapers/yt-music-utils/convert-ascii-escape.pl ]; then
utils_path="$YTFZF_SYSTEM_ADDON_DIR"/scrapers/yt-music-utils/convert-ascii-escape.pl
else
print_error "The convert-ascii-escape.pl file could not be found\n"
exit 1
fi


sed -n "s/.*data: '\([^']*\)'.*/\1/p" < "$_tmp_html" | "$utils_path" > "$_tmp_json"
{
_yt_music_get_playlist_json < "$_tmp_json"
_yt_music_get_song_json < "$_tmp_json"
} > "$output_json_file"
}
45 changes: 45 additions & 0 deletions addons/scrapers/yt-music-playlist
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
#!/bin/sh
_yt_music_get_playlist_json () {
jq '..|.musicResponsiveListItemRenderer?|select(.!=null)|select(..|.musicResponsiveListItemFlexColumnRenderer?.text.runs|length == 5)
| {
title: .flexColumns[0].musicResponsiveListItemFlexColumnRenderer.text.runs[0].text,
duration: .flexColumns[1].musicResponsiveListItemFlexColumnRenderer.text.runs[-1].text,
url: "https://music.youtube.com/playlist?list=\(.menu.menuRenderer.items[0].menuNavigationItemRenderer.navigationEndpoint.watchPlaylistEndpoint.playlistId)",
thumbs: .thumbnail.musicThumbnailRenderer.thumbnail.thumbnails[-1].url,
action: "scrape type=yt-music-playlist search=https://music.youtube.com/playlist?list=\(.menu.menuRenderer.items[0].menuNavigationItemRenderer.navigationEndpoint.watchPlaylistEndpoint.playlistId)",
ID: .menu.menuRenderer.items[0].menuNavigationItemRenderer.navigationEndpoint.watchPlaylistEndpoint.playlistId }' | jq '[inputs]'
}

_yt_music_get_song_json () {
jq '..|.musicResponsiveListItemRenderer?|select(.!=null)
| {
title: .flexColumns[0].musicResponsiveListItemFlexColumnRenderer.text.runs[0].text,
channel: .flexColumns[1].musicResponsiveListItemFlexColumnRenderer.text.runs[-1].text,
duration: .fixedColumns[0].musicResponsiveListItemFixedColumnRenderer.text.runs[0].text,
url: "https://music.youtube.com/watch?v=\(.flexColumns[0].musicResponsiveListItemFlexColumnRenderer.text.runs[0].navigationEndpoint.watchEndpoint.videoId)",
ID: .flexColumns[0].musicResponsiveListItemFlexColumnRenderer.text.runs[0].navigationEndpoint.watchEndpoint.videoId,
thumbs: .thumbnail.musicThumbnailRenderer.thumbnail.thumbnails[-1].url}' | jq '[inputs]'
}

scrape_yt_music_playlist () {
search="$1"
output_json_file="$2"
_tmp_html="${session_temp_dir}/yt-music-playlist.html"
_tmp_json="${session_temp_dir}/yt-music-playlist.json"
_get_request "$search" > "$_tmp_html"

if [ -f $YTFZF_CUSTOM_SCRAPERS_DIR/yt-music-utils/convert-ascii-escape.pl ]; then
utils_path=$YTFZF_CUSTOM_SCRAPERS_DIR/yt-music-utils/convert-ascii-escape.pl
elif [ -f "$YTFZF_SYSTEM_ADDON_DIR"/scrapers/yt-music-utils/convert-ascii-escape.pl ]; then
utils_path="$YTFZF_SYSTEM_ADDON_DIR"/scrapers/yt-music-utils/convert-ascii-escape.pl
else
print_error "The convert-ascii-escape.pl file could not be found\n"
exit 1
fi

sed -n "s/.*data: '\([^']*\)'.*/\1/p" < "$_tmp_html" | "$utils_path" > "$_tmp_json"
{
#_yt_music_get_playlist_json < "$_tmp_json"
_yt_music_get_song_json < "$_tmp_json"
} > "$output_json_file"
}
Loading

0 comments on commit 4d6bb1f

Please sign in to comment.