[totem] Updated iPlayer library with new BBC channel and category list



commit c23ad3443959235ae8ab0bfd8ff9be4901f9a6ec
Author: Philip Withnall <philip tecnocode co uk>
Date:   Sun Apr 26 11:10:21 2009 +0100

    Updated iPlayer library with new BBC channel and category list
    
    Moved some regional channels to categories, to stay in-line with the BBC
    website. Also added a missing "listparser" helper file.
---
 src/plugins/iplayer/Makefile.am   |    4 +-
 src/plugins/iplayer/iplayer2.py   |   12 ++++----
 src/plugins/iplayer/listparser.py |   52 +++++++++++++++++++++++++++++++++++++
 3 files changed, 60 insertions(+), 8 deletions(-)

diff --git a/src/plugins/iplayer/Makefile.am b/src/plugins/iplayer/Makefile.am
index 63cd311..bb71cdd 100644
--- a/src/plugins/iplayer/Makefile.am
+++ b/src/plugins/iplayer/Makefile.am
@@ -1,6 +1,6 @@
 plugindir = $(PLUGINDIR)/iplayer
 uidir = $(plugindir)
-plugin_PYTHON = iplayer.py
+plugin_PYTHON = iplayer.py iplayer2.py listparser.py
 plugin_in_files = iplayer.totem-plugin.in
 
 %.totem-plugin: %.totem-plugin.in $(INTLTOOL_MERGE) $(wildcard $(top_srcdir)/po/*po) ; $(INTLTOOL_MERGE) $(top_srcdir)/po $< $@ -d -u -c $(top_builddir)/po/.intltool-merge-cache
@@ -8,7 +8,7 @@ plugin_in_files = iplayer.totem-plugin.in
 plugin_DATA = $(plugin_in_files:.totem-plugin.in=.totem-plugin)
 ui_DATA = iplayer.ui
 
-EXTRA_DIST = $(plugin_in_files) $(ui_DATA) iplayer.py
+EXTRA_DIST = $(plugin_in_files) $(ui_DATA) $(plugin_PYTHON)
 
 CLEANFILES = $(plugin_DATA)
 DISTCLEANFILES = $(plugin_DATA)
diff --git a/src/plugins/iplayer/iplayer2.py b/src/plugins/iplayer/iplayer2.py
index 14c5f78..610c4d2 100644
--- a/src/plugins/iplayer/iplayer2.py
+++ b/src/plugins/iplayer/iplayer2.py
@@ -69,9 +69,7 @@ channels_tv_list = [
     ('cbeebies', 'CBeebies'),
     ('bbc_news24', 'BBC News Channel'),
     ('bbc_parliament', 'BBC Parliament'),
-    ('bbc_one_northern_ireland', 'BBC One Northern Ireland'), 
-    ('bbc_one_scotland', 'BBC One Scotland'),
-    ('bbc_one_wales', 'BBC One Wales'),
+    ('bbc_hd', 'BBC HD'),
     ('bbc_alba', 'BBC Alba'),
 ]
 channels_tv = dict(channels_tv_list)
@@ -192,13 +190,15 @@ categories_list = [
     ('drama', 'Drama'),
     ('entertainment', 'Entertainment'),
     ('factual', 'Factual'),
+    ('films', 'Films'),
     ('music', 'Music'),
     ('news', 'News'),
     ('religion_and_ethics', 'Religion & Ethics'),
     ('sport', 'Sport'),
-    ('olympics', 'Olympics'),
-    ('wales', 'Wales'),
-    ('signed', 'Sign Zone')
+    ('signed', 'Sign Zone'),
+    ('northern_ireland', 'Northern Ireland'),
+    ('scotland', 'Scotland'),
+    ('wales', 'Wales')
 ]
 categories = dict(categories_list)
 
diff --git a/src/plugins/iplayer/listparser.py b/src/plugins/iplayer/listparser.py
new file mode 100644
index 0000000..526224a
--- /dev/null
+++ b/src/plugins/iplayer/listparser.py
@@ -0,0 +1,52 @@
+#
+# Provides a simple and very quick way to parse list feeds
+#
+
+import re
+
+def xmlunescape(data):
+    data = data.replace('&amp;', '&')
+    data = data.replace('&gt;', '>')
+    data = data.replace('&lt;', '<')
+    return data
+
+class listentry(object):
+     def __init__(self, title=None, id=None, updated=None, summary=None, categories=None):
+         self.title      = title
+         self.id         = id
+         self.updated    = updated
+         self.summary    = summary
+         self.categories = categories
+
+class listentries(object):
+     def __init__(self):
+         self.entries = []
+                  
+def parse(xmlSource):  
+    try:
+        encoding = re.findall( "<\?xml version=\"[^\"]*\" encoding=\"([^\"]*)\"\?>", xmlSource )[ 0 ]
+    except: return None
+    elist=listentries()
+    # gather all list entries 
+    entriesSrc = re.findall( "<entry>(.*?)</entry>", xmlSource, re.DOTALL)
+    datematch = re.compile(':\s+([0-9]+)/([0-9]+)/([0-9]{4})')
+    
+    # enumerate thru the element list and gather info
+    for entrySrc in entriesSrc:
+        entry={}
+        title   = re.findall( "<title[^>]*>(.*?)</title>", entrySrc, re.DOTALL )[0]
+        id      = re.findall( "<id[^>]*>(.*?)</id>", entrySrc, re.DOTALL )[0]
+        updated = re.findall( "<updated[^>]*>(.*?)</updated>", entrySrc, re.DOTALL )[0]
+        summary = re.findall( "<content[^>]*>(.*?)</content>", entrySrc, re.DOTALL )[0].splitlines()[-3]
+        categories = re.findall( "<category[^>]*term=\"(.*?)\"[^>]*>", entrySrc, re.DOTALL )
+
+        match = datematch.search(title)
+        if match:
+            # if the title contains a data at the end use that as the updated date YYYY-MM-DD
+            updated = "%s-%s-%s" % ( match.group(3), match.group(2), match.group(1)  )
+                    
+        e_categories=[]
+        for c in categories: e_categories.append(xmlunescape(c))        
+        elist.entries.append(listentry(xmlunescape(title), xmlunescape(id), xmlunescape(updated), xmlunescape(summary), e_categories))
+
+    return elist   



[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]