[nerdcubed] Add new extractor
nerdcubed.co.uk describes videos in a single a feed.json file, providing references to and metadata on >1300 YouTube videos spread across 3 main channels as well as guest appareances on other channels via a single HTTP request. NerdCubedFeedIE transforms this feed into a youtube-dl playlist, preserving information present in the upstream JSON (allowing zero-cost title/date matches) and ultimately referencing the embedded YouTube videos.pull/4515/head
parent
a22524b004
commit
c58843b3a1
@ -0,0 +1,36 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import datetime
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class NerdCubedFeedIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?nerdcubed\.co\.uk/feed\.json'
|
||||
_TEST = {
|
||||
'url': 'http://www.nerdcubed.co.uk/feed.json',
|
||||
'info_dict': {
|
||||
'title': 'nerdcubed.co.uk feed',
|
||||
},
|
||||
'playlist_mincount': 1300,
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
feed = self._download_json(url, url, "Downloading NerdCubed JSON feed")
|
||||
|
||||
entries = [{
|
||||
'_type': 'url',
|
||||
'title': feed_entry['title'],
|
||||
'uploader': feed_entry['source']['name'] if feed_entry['source'] else None,
|
||||
'upload_date': datetime.datetime.strptime(feed_entry['date'], '%Y-%m-%d').strftime('%Y%m%d'),
|
||||
'url': "http://www.youtube.com/watch?v=" + feed_entry['youtube_id'],
|
||||
} for feed_entry in feed]
|
||||
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'title': 'nerdcubed.co.uk feed',
|
||||
'id': 'nerdcubed-feed',
|
||||
'entries': entries,
|
||||
}
|
||||
|
Loading…
Reference in New Issue