forked from minhngoc25a/yt-dlc
[openload] Fix extraction (closes #10408)
Thanks @yokrysty for the algorithm
This commit is contained in:
parent
fb009b7f53
commit
18b6216150
|
@ -1,3 +1,9 @@
|
|||
version <unreleased>
|
||||
|
||||
Extractors
|
||||
* [openload] Fix extraction (#10408)
|
||||
|
||||
|
||||
version 2016.08.22
|
||||
|
||||
Core
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
# coding: utf-8
|
||||
from __future__ import unicode_literals, division
|
||||
|
||||
import math
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_chr
|
||||
from ..compat import (
|
||||
compat_chr,
|
||||
compat_ord,
|
||||
)
|
||||
from ..utils import (
|
||||
decode_png,
|
||||
determine_ext,
|
||||
ExtractorError,
|
||||
)
|
||||
|
@ -42,71 +42,26 @@ class OpenloadIE(InfoExtractor):
|
|||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
webpage = self._download_webpage('https://openload.co/embed/%s/' % video_id, video_id)
|
||||
|
||||
if 'File not found' in webpage:
|
||||
if 'File not found' in webpage or 'deleted by the owner' in webpage:
|
||||
raise ExtractorError('File not found', expected=True)
|
||||
|
||||
# The following extraction logic is proposed by @Belderak and @gdkchan
|
||||
# and declared to be used freely in youtube-dl
|
||||
# See https://github.com/rg3/youtube-dl/issues/9706
|
||||
# The following decryption algorithm is written by @yokrysty and
|
||||
# declared to be freely used in youtube-dl
|
||||
# See https://github.com/rg3/youtube-dl/issues/10408
|
||||
enc_data = self._html_search_regex(
|
||||
r'<span[^>]+id="hiddenurl"[^>]*>([^<]+)</span>', webpage, 'encrypted data')
|
||||
|
||||
numbers_js = self._download_webpage(
|
||||
'https://openload.co/assets/js/obfuscator/n.js', video_id,
|
||||
note='Downloading signature numbers')
|
||||
signums = self._search_regex(
|
||||
r'window\.signatureNumbers\s*=\s*[\'"](?P<data>[a-z]+)[\'"]',
|
||||
numbers_js, 'signature numbers', group='data')
|
||||
video_url_chars = []
|
||||
|
||||
linkimg_uri = self._search_regex(
|
||||
r'<img[^>]+id="linkimg"[^>]+src="([^"]+)"', webpage, 'link image')
|
||||
linkimg = self._request_webpage(
|
||||
linkimg_uri, video_id, note=False).read()
|
||||
for c in enc_data:
|
||||
j = compat_ord(c)
|
||||
if j >= 33 and j <= 126:
|
||||
j = ((j + 14) % 94) + 33
|
||||
video_url_chars += compat_chr(j)
|
||||
|
||||
width, height, pixels = decode_png(linkimg)
|
||||
|
||||
output = ''
|
||||
for y in range(height):
|
||||
for x in range(width):
|
||||
r, g, b = pixels[y][3 * x:3 * x + 3]
|
||||
if r == 0 and g == 0 and b == 0:
|
||||
break
|
||||
else:
|
||||
output += compat_chr(r)
|
||||
output += compat_chr(g)
|
||||
output += compat_chr(b)
|
||||
|
||||
img_str_length = len(output) // 200
|
||||
img_str = [[0 for x in range(img_str_length)] for y in range(10)]
|
||||
|
||||
sig_str_length = len(signums) // 260
|
||||
sig_str = [[0 for x in range(sig_str_length)] for y in range(10)]
|
||||
|
||||
for i in range(10):
|
||||
for j in range(img_str_length):
|
||||
begin = i * img_str_length * 20 + j * 20
|
||||
img_str[i][j] = output[begin:begin + 20]
|
||||
for j in range(sig_str_length):
|
||||
begin = i * sig_str_length * 26 + j * 26
|
||||
sig_str[i][j] = signums[begin:begin + 26]
|
||||
|
||||
parts = []
|
||||
# TODO: find better names for str_, chr_ and sum_
|
||||
str_ = ''
|
||||
for i in [2, 3, 5, 7]:
|
||||
str_ = ''
|
||||
sum_ = float(99)
|
||||
for j in range(len(sig_str[i])):
|
||||
for chr_idx in range(len(img_str[i][j])):
|
||||
if sum_ > float(122):
|
||||
sum_ = float(98)
|
||||
chr_ = compat_chr(int(math.floor(sum_)))
|
||||
if sig_str[i][j][chr_idx] == chr_ and j >= len(str_):
|
||||
sum_ += float(2.5)
|
||||
str_ += img_str[i][j][chr_idx]
|
||||
parts.append(str_.replace(',', ''))
|
||||
|
||||
video_url = 'https://openload.co/stream/%s~%s~%s~%s' % (parts[3], parts[1], parts[2], parts[0])
|
||||
video_url = 'https://openload.co/stream/%s?mime=true' % ''.join(video_url_chars)
|
||||
|
||||
title = self._og_search_title(webpage, default=None) or self._search_regex(
|
||||
r'<span[^>]+class=["\']title["\'][^>]*>([^<]+)', webpage,
|
||||
|
|
Loading…
Reference in New Issue