]> code.delx.au - youtube-cgi/blob - youtube.cgi
5c55feb3cdbb1abee93da8834685e7b76dd54193
[youtube-cgi] / youtube.cgi
1 #!/usr/bin/env python3
2
3 import cgi
4 import html.parser
5 import http.cookiejar
6 import json
7 import os
8 import re
9 import shutil
10 import subprocess
11 import sys
12 import time
13 import urllib.error
14 import urllib.parse
15 import urllib.request
16
17
18 USER_AGENT = "Mozilla/5.0 (X11; Linux x86_64; rv:67.0) Gecko/20100101 Firefox/67.0"
19
20 MIMETYPES = {
21 "video/mp4": "mp4",
22 "video/x-flv": "flv",
23 "video/3gpp": "3gp",
24 }
25
26 QUALITIES = {
27 "hd1080": 5,
28 "hd720": 4,
29 "large": 3,
30 "medium": 2,
31 "small": 1,
32 }
33
34
35 class VideoUnavailable(Exception):
36 pass
37
38 class NotYouTube(Exception):
39 pass
40
41 def print_form(url="", msg=""):
42 script_url = "https://%s%s" % (os.environ["HTTP_HOST"], os.environ["REQUEST_URI"])
43 sys.stdout.write("Content-Type: text/html\r\n\r\n")
44 sys.stdout.write("""
45 <!DOCTYPE html>
46 <html>
47 <head>
48 <title>delx.net.au - YouTube Scraper</title>
49 <link rel="stylesheet" type="text/css" href="/style.css">
50 <style type="text/css">
51 input[type="text"] {
52 width: 100%;
53 }
54 .error {
55 color: red;
56 }
57 </style>
58 </head>
59 <body>
60 <h1>delx.net.au - YouTube Scraper</h1>
61 {0}
62 <form action="" method="get">
63 <p>This page will let you easily download YouTube videos to watch offline. It
64 will automatically grab the highest quality version.</p>
65 <div><input type="text" name="url" value="{1}"/></div>
66 <div><input type="submit" value="Download!"/></div>
67 </form>
68 <p>Tip! Use this bookmarklet: <a href="javascript:(function(){window.location='{2}?url='+escape(location);})()">YouTube Download</a>
69 to easily download videos. Right-click the link and add it to bookmarks,
70 then when you're looking at a YouTube page select that bookmark from your
71 browser's bookmarks menu to download the video straight away.</p>
72 </body>
73 </html>
74 """.replace("{0}", msg).replace("{1}", url).replace("{2}", script_url))
75
76 cookiejar = http.cookiejar.CookieJar()
77 urlopener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cookiejar))
78 referrer = ""
79
80 def urlopen(url, offset=None):
81 if url.startswith("//"):
82 url = "https:" + url
83 if not url.startswith("http://") and not url.startswith("https://"):
84 url = "https://www.youtube.com" + url
85
86 global referrer
87 req = urllib.request.Request(url)
88 if not referrer:
89 referrer = url
90 else:
91 req.add_header("Referer", referrer)
92
93 req.add_header("User-Agent", USER_AGENT)
94
95 if offset:
96 req.add_header("Range", "bytes=%d-" % offset)
97
98 res = urlopener.open(req)
99
100 content_range = res.getheader("Content-Range")
101 if content_range:
102 tokens = content_range.split()
103 assert tokens[0] == "bytes"
104 start = int(tokens[1].split("-")[0])
105 assert start == offset
106 return res
107
108 def validate_url(url):
109 parsed_url = urllib.parse.urlparse(url)
110 scheme_ok = parsed_url.scheme == "https"
111 host_ok = parsed_url.netloc.lstrip("www.") in ["youtube.com", "youtu.be"]
112
113 if scheme_ok and host_ok:
114 return
115 else:
116 raise NotYouTube()
117
118 def parse_url(url, parser):
119 f = urlopen(url)
120 parser.feed(f.read().decode("utf-8"))
121 parser.close()
122 f.close()
123
124 def append_to_qs(url, params):
125 r = list(urllib.parse.urlsplit(url))
126 qs = urllib.parse.parse_qs(r[3])
127 qs.update(params)
128 r[3] = urllib.parse.urlencode(qs, True)
129 url = urllib.parse.urlunsplit(r)
130 return url
131
132 def get_player_config(scripts):
133 config_strings = [
134 ("ytplayer.config = {", 1, "};", 1),
135 ("ytcfg.set({\"", 2, "});", 1),
136 ]
137 player_config = {}
138 for script in scripts:
139 for line in script.split("\n"):
140 for s1, off1, s2, off2 in config_strings:
141 if s1 in line:
142 p1 = line.find(s1) + len(s1) - off1
143 p2 = line.find(s2, p1) + off2
144 if p1 >= 0 and p2 > 0:
145 player_config.update(json.loads(line[p1:p2]))
146 return player_config
147
148 def extract_js(script):
149 PREFIX = "var _yt_player={};(function(g){var window=this;"
150 SUFFIX = ";})(_yt_player);\n"
151 assert script.startswith(PREFIX)
152 assert script.endswith(SUFFIX)
153
154 return script[len(PREFIX):-len(SUFFIX)]
155
156 def find_cipher_func(script):
157 FUNC_NAME = R"([a-zA-Z0-9$]+)"
158 DECODE_URI_COMPONENT = R"(\(decodeURIComponent)?"
159 FUNC_PARAMS = R"(\([a-zA-Z,\.]+\.s\))"
160 TERMINATOR = R"[,;\)]"
161 PATTERN = FUNC_NAME + DECODE_URI_COMPONENT + FUNC_PARAMS + TERMINATOR
162
163 match = re.search(PATTERN, script)
164 func_name = match.groups()[0]
165 return func_name
166
167 def find_url_func(script):
168 FUNC_NAME = R"([a-zA-Z0-9$]+)"
169 PATTERN = R"this\.url\s*=\s*" + FUNC_NAME + R"\s*\(\s*this\s*\)"
170
171 match = re.search(PATTERN, script)
172 func_name = match.groups()[0]
173 return func_name
174
175 def decode_cipher_url(js_url, cipher):
176 cipher = urllib.parse.parse_qs(cipher)
177 args = [
178 cipher["url"][0],
179 cipher["sp"][0],
180 cipher["s"][0],
181 ]
182
183 f = urlopen(js_url)
184 script = f.read().decode("utf-8")
185 f.close()
186
187 cipher_func_name = find_cipher_func(script)
188 url_func_name = find_url_func(script)
189
190 params = {
191 "cipher_func_name": cipher_func_name,
192 "url_func_name": url_func_name,
193 "args": json.dumps(args),
194 "code": json.dumps(extract_js(script)),
195 }
196 p = subprocess.Popen(
197 "node",
198 shell=True,
199 close_fds=True,
200 stdin=subprocess.PIPE,
201 stdout=subprocess.PIPE
202 )
203 js_decode_script = ("""
204 const vm = require('vm');
205
206 const fakeGlobal = {};
207 fakeGlobal.window = fakeGlobal;
208 fakeGlobal.location = {
209 hash: '',
210 host: 'www.youtube.com',
211 hostname: 'www.youtube.com',
212 href: 'https://www.youtube.com',
213 origin: 'https://www.youtube.com',
214 pathname: '/',
215 protocol: 'https:'
216 };
217 fakeGlobal.history = {
218 pushState: function(){}
219 };
220 fakeGlobal.document = {
221 location: fakeGlobal.location
222 };
223 fakeGlobal.document = {};
224 fakeGlobal.navigator = {
225 userAgent: ''
226 };
227 fakeGlobal.XMLHttpRequest = class XMLHttpRequest {};
228 fakeGlobal.matchMedia = () => ({matches: () => {}, media: ''});
229 fakeGlobal.result_url = null;
230 fakeGlobal.g = function(){}; // this is _yt_player
231
232 const code_string = %(code)s + ';';
233 const exec_string = 'result_url = %(url_func_name)s(%(cipher_func_name)s(...%(args)s));';
234 vm.runInNewContext(code_string + exec_string, fakeGlobal);
235
236 console.log(fakeGlobal.result_url);
237 """ % params)
238
239 p.stdin.write(js_decode_script.encode("utf-8"))
240 p.stdin.close()
241
242 result_url = p.stdout.read().decode("utf-8").strip()
243 if p.wait() != 0:
244 raise Exception("js failed to execute: %d" % p.returncode)
245
246 return result_url
247
248 def get_best_video(player_config):
249 player_args = player_config["args"]
250 player_response = json.loads(player_args["player_response"])
251 formats = player_response["streamingData"]["formats"]
252
253 best_url = None
254 best_quality = None
255 best_extension = None
256 for format_data in formats:
257 mimetype = format_data["mimeType"].split(";")[0]
258 quality = format_data["quality"]
259
260 if quality not in QUALITIES:
261 continue
262 if mimetype not in MIMETYPES:
263 continue
264
265 extension = MIMETYPES[mimetype]
266 quality = QUALITIES.get(quality, -1)
267
268 if best_quality is not None and quality < best_quality:
269 continue
270
271 if "signatureCipher" in format_data:
272 js_url = player_config["PLAYER_JS_URL"]
273 video_url = decode_cipher_url(js_url, format_data["signatureCipher"])
274 else:
275 video_url = format_data["url"]
276
277 best_url = video_url
278 best_quality = quality
279 best_extension = extension
280
281 return best_url, best_extension
282
283 def sanitize_filename(filename):
284 return (
285 re.sub("\s+", " ", filename.strip())
286 .replace("\\", "-")
287 .replace("/", "-")
288 .replace("\0", " ")
289 )
290
291 def get_video_url(page):
292 player_config = get_player_config(page.scripts)
293 if not player_config:
294 raise VideoUnavailable(page.unavailable_message or "Could not find video URL")
295
296 video_url, extension = get_best_video(player_config)
297 if not video_url:
298 return None, None
299
300 title = player_config["args"].get("title", None)
301 if not title:
302 title = json.loads(player_config["args"]["player_response"])["videoDetails"]["title"]
303 if not title:
304 title = "Unknown title"
305
306 filename = sanitize_filename(title) + "." + extension
307
308 return video_url, filename
309
310 class YouTubeVideoPageParser(html.parser.HTMLParser):
311 def __init__(self):
312 super().__init__()
313 self.unavailable_message = None
314 self.scripts = []
315
316 def handle_starttag(self, tag, attrs):
317 attrs = dict(attrs)
318 self._handle_unavailable_message(tag, attrs)
319 self._handle_script(tag, attrs)
320
321 def handle_endtag(self, tag):
322 self.handle_data = self._ignore_data
323
324 def _ignore_data(self, _):
325 pass
326
327 def _handle_unavailable_message(self, tag, attrs):
328 if attrs.get("id", None) == "unavailable-message":
329 self.handle_data = self._handle_unavailable_message_data
330
331 def _handle_unavailable_message_data(self, data):
332 self.unavailable_message = data.strip()
333
334 def _handle_script(self, tag, attrs):
335 if tag == "script":
336 self.handle_data = self._handle_script_data
337
338 def _handle_script_data(self, data):
339 if data:
340 self.scripts.append(data)
341
342 def write_video(filename, video_data):
343 quoted_filename = urllib.parse.quote(filename.encode("utf-8"))
344 sys.stdout.buffer.write(
345 b"Content-Disposition: attachment; filename*=UTF-8''{0}\r\n"
346 .replace(b"{0}", quoted_filename.encode("utf-8"))
347 )
348 sys.stdout.buffer.write(
349 b"Content-Length: {0}\r\n"
350 .replace(b"{0}", video_data.getheader("Content-Length").encode("utf-8"))
351 )
352 sys.stdout.buffer.write(b"\r\n")
353 shutil.copyfileobj(video_data, sys.stdout.buffer)
354 video_data.close()
355
356 def cgimain():
357 args = cgi.parse()
358 try:
359 url = args["url"][0]
360 except:
361 print_form(url="https://www.youtube.com/watch?v=FOOBAR")
362 return
363
364 try:
365 page = YouTubeVideoPageParser()
366 validate_url(url)
367 parse_url(url, page)
368 video_url, filename = get_video_url(page)
369 video_data = urlopen(video_url)
370 except VideoUnavailable as e:
371 print_form(
372 url=url,
373 msg="<p class='error'>Sorry, there was an error: %s</p>" % cgi.escape(e.args[0])
374 )
375 except NotYouTube:
376 print_form(
377 url=url,
378 msg="<p class='error'>Sorry, that does not look like a YouTube page!</p>"
379 )
380 except Exception as e:
381 print_form(
382 url=url,
383 msg="<p class='error'>Sorry, there was an unknown error.</p>"
384 )
385 return
386
387 write_video(filename, video_data)
388
389 def pp_size(size):
390 suffixes = ["", "KiB", "MiB", "GiB"]
391 for i, suffix in enumerate(suffixes):
392 if size < 1024:
393 break
394 size /= 1024
395 return "%.2f %s" % (size, suffix)
396
397 def copy_with_progress(content_length, infile, outfile):
398 def print_status():
399 rate = 0
400 if now != last_ts:
401 rate = last_bytes_read / (now - last_ts)
402 sys.stdout.write("\33[2K\r")
403 sys.stdout.write("%s / %s (%s/sec)" % (
404 pp_size(bytes_read),
405 pp_size(content_length),
406 pp_size(rate),
407 ))
408 sys.stdout.flush()
409
410 last_ts = 0
411 last_bytes_read = 0
412 bytes_read = 0
413 while True:
414 now = time.time()
415 if now - last_ts > 0.5:
416 print_status()
417 last_ts = now
418 last_bytes_read = 0
419
420 buf = infile.read(32768)
421 if not buf:
422 break
423 outfile.write(buf)
424 last_bytes_read += len(buf)
425 bytes_read += len(buf)
426
427 # Newline at the end
428 print_status()
429 print()
430
431 def main():
432 try:
433 url = sys.argv[1]
434 except:
435 print("Usage: %s https://youtube.com/watch?v=FOOBAR" % sys.argv[0], file=sys.stderr)
436 sys.exit(1)
437
438 page = YouTubeVideoPageParser()
439 parse_url(url, page)
440 video_url, filename = get_video_url(page)
441 print("Downloading", filename)
442
443 outfile = open(filename, "ab")
444 offset = outfile.tell()
445 if offset > 0:
446 print("Resuming download from", pp_size(offset))
447 total_size = None
448
449 while True:
450 try:
451 video_data = urlopen(video_url, offset)
452 except urllib.error.HTTPError as e:
453 if e.code == 416:
454 print("File is complete!")
455 break
456 else:
457 raise
458
459 content_length = int(video_data.getheader("Content-Length"))
460 if total_size is None:
461 total_size = content_length
462
463 try:
464 copy_with_progress(content_length, video_data, outfile)
465 except IOError as e:
466 print()
467
468 video_data.close()
469 if outfile.tell() != total_size:
470 old_offset = offset
471 offset = outfile.tell()
472 if old_offset == offset:
473 time.sleep(1)
474 print("Restarting download from", pp_size(offset))
475 else:
476 break
477
478 outfile.close()
479
480
481 if __name__ == "__main__":
482 if "SCRIPT_NAME" in os.environ:
483 cgimain()
484 else:
485 try:
486 main()
487 except KeyboardInterrupt:
488 print("\nExiting...")
489 sys.exit(1)
490