print "No match found for pattern:", "/".join(pattern)
return
p = pattern[count]
- for child in node.children:
+ for child in node.get_children():
if fnmatch.fnmatch(child.title, p):
match(download_list, child, pattern, count+1)
self.children = []
self.can_download = False
+ def get_children(self):
+ return self.children
+
def download(self):
raise NotImplemented
def load_root_node():
root_node = Node("Root")
- print "Loading iView episode data...",
- sys.stdout.flush()
import iview
- iview_node = Node("ABC iView", root_node)
- iview.fill_nodes(iview_node)
- print "done"
+ iview.fill_nodes(root_node)
- print "Loading SBS episode data...",
- sys.stdout.flush()
import sbs
- sbs_node = Node("SBS", root_node)
- sbs.fill_nodes(sbs_node)
- print "done"
+ sbs.fill_nodes(root_node)
return root_node
def urlopen(url, max_age):
+### print url
if not os.path.isdir(CACHE_DIR):
os.makedirs(CACHE_DIR)
"-r", vbase,
"-y", vpath,
]
- print cmd
if hash_url is not None:
cmd += ["--swfVfy", hash_url]
try:
def download_urllib(filename, url):
filename = sanify_filename(filename)
- print "Downloading: %s -> %s" % (url, filename)
+ print "Downloading: %s" % filename
try:
src = urllib.urlopen(url)
dst = open(filename, "w")
- shutil.copyfileobj(src, dst)
+ while True:
+ buf = src.read(1024*1024)
+ if not buf:
+ break
+ dst.write(buf)
+ sys.stdout.write(".")
+ sys.stdout.flush()
return True
except KeyboardInterrupt:
print "\nCancelled", url
finally:
- src.close()
- dst.close()
+ try:
+ src.close()
+ except:
+ pass
+ try:
+ dst.close()
+ except:
+ pass
return False
while True:
options = {}
will_download = True
- for n in node.children:
+ for n in node.get_children():
options[n.title] = n
if not n.can_download:
will_download = False
Node.__init__(self, title, parent)
self.vpath = vpath
self.can_download = True
-
+
def download(self):
auth_doc = grab_xml(PARAMS["auth"], 0)
vbase = auth_doc.xpath("//auth:server/text()", namespaces=NS)[0]
vpath = ext + ":" + vpath
filename = self.title + "." + ext
return download_rtmp(filename, vbase, vpath, HASH_URL)
-
+
+
+class IviewSeries(Node):
+ def __init__(self, series_title, series_id, parent):
+ Node.__init__(self, series_title, parent)
+ self.series_title = series_title
+ self.series_id = series_id
+
+ def get_children(self):
+ if self.children:
+ return self.children
+ print "DOWNLOADING SERIES"
+ series_doc = grab_json(PARAMS["api"] + "series=" + self.series_id, 3600)[0]
+ for episode in series_doc["f"]:
+ vpath = episode["n"]
+ episode_title = episode["b"].strip()
+ if self.series_title != episode_title:
+ episode_title = self.series_title + " " + episode_title
+ IviewNode(episode_title, self, vpath)
+ return self.children
+
+
def fill_nodes(root_node):
+ root_node = Node("ABC iView", root_node)
+
config_doc = grab_xml(CONFIG_URL, 24*3600)
global PARAMS
PARAMS = dict((p.attrib["name"], p.attrib["value"]) for p in config_doc.xpath("/config/param"))
# Create a duplicate of each series within each category that it appears
series_list_doc = grab_json(PARAMS["api"] + "seriesIndex", 3600)
- now = datetime.now()
for series in series_list_doc:
categories = series["e"].split()
sid = series["a"]
- max_age = None
- for episode in series["f"]:
- air_date = datetime.strptime(episode["f"], "%Y-%m-%d %H:%M:%S")
- diff = now - air_date
- diff = 24*3600*diff.days + diff.seconds
- if max_age is None or diff < max_age:
- max_age = diff
-
- if max_age is None:
- continue
series_title = series["b"].replace("&", "&")
- series_nodes = []
for cid in categories:
category_node = categories_map.get(cid, None)
if category_node:
- series_nodes.append(Node(series_title, category_node))
- if not series_nodes:
- continue
-
- series_doc = grab_json(PARAMS["api"] + "series=" + sid, max_age)[0]
- for episode in series_doc["f"]:
- vpath = episode["n"]
- episode_title = episode["b"].strip()
- if series_title != episode_title:
- episode_title = series_title + " " + episode_title
- for series_node in series_nodes:
- IviewNode(episode_title, series_node, vpath)
-
+ IviewSeries(series_title, sid, category_node)
#!/usr/bin/env python
# vim:ts=4:sts=4:sw=4:noet
-from common import grab_json, grab_xml, download_rtmp, Node
+from common import grab_json, grab_xml, download_rtmp, download_urllib, Node
import collections
best_url = d["plfile$url"]
doc = grab_xml(best_url, 3600)
- vbase = doc.xpath("//smil:meta/@base", namespaces=NS)[0]
- vpath = doc.xpath("//smil:video/@src", namespaces=NS)[0]
- ext = vpath.rsplit(".", 1)[1]
- filename = self.title + "." + ext
-
- return download_rtmp(filename, vbase, vpath)
+ if doc.xpath("//smil:meta/@base", namespaces=NS):
+ vbase = doc.xpath("//smil:meta/@base", namespaces=NS)[0]
+ vpath = doc.xpath("//smil:video/@src", namespaces=NS)[0]
+ ext = vpath.rsplit(".", 1)[1]
+ filename = self.title + "." + ext
+ return download_rtmp(filename, vbase, vpath)
+ else:
+ from lxml import etree
+ url = doc.xpath("//smil:video/@src", namespaces=NS)[0]
+ ext = url.rsplit(".", 1)[1]
+ filename = self.title + "." + ext
+ url += "?v=2.5.14&fp=MAC%2011,1,102,55&r=FLQDD&g=YNANAXRIYFYO"
+ return download_urllib(filename, url)
def fill_entry(get_catnode, entry):
title = entry["title"]
fill_entry(get_catnode, entry)
index += doc["itemsPerPage"]
-def fill_nodes(root_node):
- catnodes = {}
- def get_catnode(name):
+class SbsRoot(Node):
+ def __init__(self, title, parent=None):
+ Node.__init__(self, title, parent)
+ self.catnodes = {}
+
+ def get_catnode(self, name):
try:
- return catnodes[name]
+ return self.catnodes[name]
except KeyError:
- n = Node(name, root_node)
- catnodes[name] = n
+ n = Node(name, self)
+ self.catnodes[name] = n
return n
- for section in SECTIONS:
- fill_section(get_catnode, section)
+ def get_children(self):
+ if self.children:
+ return self.children
+ for section in SECTIONS:
+ fill_section(self.get_catnode, section)
+ return self.children
+def fill_nodes(root_node):
+ SbsRoot("SBS", root_node)