bandcamp.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367
  1. from __future__ import unicode_literals
  2. import json
  3. import random
  4. import re
  5. import time
  6. from .common import InfoExtractor
  7. from ..compat import (
  8. compat_str,
  9. compat_urlparse,
  10. )
  11. from ..utils import (
  12. ExtractorError,
  13. float_or_none,
  14. int_or_none,
  15. KNOWN_EXTENSIONS,
  16. parse_filesize,
  17. unescapeHTML,
  18. update_url_query,
  19. unified_strdate,
  20. url_or_none,
  21. )
  22. class BandcampIE(InfoExtractor):
  23. _VALID_URL = r'https?://.*?\.bandcamp\.com/track/(?P<title>[^/?#&]+)'
  24. _TESTS = [{
  25. 'url': 'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song',
  26. 'md5': 'c557841d5e50261777a6585648adf439',
  27. 'info_dict': {
  28. 'id': '1812978515',
  29. 'ext': 'mp3',
  30. 'title': "youtube-dl \"'/\\\u00e4\u21ad - youtube-dl test song \"'/\\\u00e4\u21ad",
  31. 'duration': 9.8485,
  32. },
  33. '_skip': 'There is a limit of 200 free downloads / month for the test song'
  34. }, {
  35. 'url': 'http://benprunty.bandcamp.com/track/lanius-battle',
  36. 'md5': '0369ace6b939f0927e62c67a1a8d9fa7',
  37. 'info_dict': {
  38. 'id': '2650410135',
  39. 'ext': 'aiff',
  40. 'title': 'Ben Prunty - Lanius (Battle)',
  41. 'uploader': 'Ben Prunty',
  42. },
  43. }, {
  44. 'url': 'https://relapsealumni.bandcamp.com/track/hail-to-fire',
  45. 'info_dict': {
  46. 'id': '2584466013',
  47. 'ext': 'mp3',
  48. 'title': 'Hail to Fire',
  49. 'track_number': 5,
  50. },
  51. 'params': {
  52. 'skip_download': True,
  53. },
  54. }]
  55. def _real_extract(self, url):
  56. mobj = re.match(self._VALID_URL, url)
  57. title = mobj.group('title')
  58. webpage = self._download_webpage(url, title)
  59. thumbnail = self._html_search_meta('og:image', webpage, default=None)
  60. m_download = re.search(r'freeDownloadPage: "(.*?)"', webpage)
  61. if not m_download:
  62. m_trackinfo = re.search(r'trackinfo: (.+),\s*?\n', webpage)
  63. if m_trackinfo:
  64. json_code = m_trackinfo.group(1)
  65. data = json.loads(json_code)[0]
  66. track_id = compat_str(data['id'])
  67. if not data.get('file'):
  68. raise ExtractorError('Not streamable', video_id=track_id, expected=True)
  69. formats = []
  70. for format_id, format_url in data['file'].items():
  71. ext, abr_str = format_id.split('-', 1)
  72. formats.append({
  73. 'format_id': format_id,
  74. 'url': self._proto_relative_url(format_url, 'http:'),
  75. 'ext': ext,
  76. 'vcodec': 'none',
  77. 'acodec': ext,
  78. 'abr': int_or_none(abr_str),
  79. })
  80. self._sort_formats(formats)
  81. return {
  82. 'id': track_id,
  83. 'title': data['title'],
  84. 'thumbnail': thumbnail,
  85. 'formats': formats,
  86. 'duration': float_or_none(data.get('duration')),
  87. 'track_number': int_or_none(data.get('track_num')),
  88. }
  89. else:
  90. raise ExtractorError('No free songs found')
  91. download_link = m_download.group(1)
  92. video_id = self._search_regex(
  93. r'(?ms)var TralbumData = .*?[{,]\s*id: (?P<id>\d+),?$',
  94. webpage, 'video id')
  95. download_webpage = self._download_webpage(
  96. download_link, video_id, 'Downloading free downloads page')
  97. blob = self._parse_json(
  98. self._search_regex(
  99. r'data-blob=(["\'])(?P<blob>{.+?})\1', download_webpage,
  100. 'blob', group='blob'),
  101. video_id, transform_source=unescapeHTML)
  102. info = blob['digital_items'][0]
  103. downloads = info['downloads']
  104. track = info['title']
  105. artist = info.get('artist')
  106. title = '%s - %s' % (artist, track) if artist else track
  107. download_formats = {}
  108. for f in blob['download_formats']:
  109. name, ext = f.get('name'), f.get('file_extension')
  110. if all(isinstance(x, compat_str) for x in (name, ext)):
  111. download_formats[name] = ext.strip('.')
  112. formats = []
  113. for format_id, f in downloads.items():
  114. format_url = f.get('url')
  115. if not format_url:
  116. continue
  117. # Stat URL generation algorithm is reverse engineered from
  118. # download_*_bundle_*.js
  119. stat_url = update_url_query(
  120. format_url.replace('/download/', '/statdownload/'), {
  121. '.rand': int(time.time() * 1000 * random.random()),
  122. })
  123. format_id = f.get('encoding_name') or format_id
  124. stat = self._download_json(
  125. stat_url, video_id, 'Downloading %s JSON' % format_id,
  126. transform_source=lambda s: s[s.index('{'):s.rindex('}') + 1],
  127. fatal=False)
  128. if not stat:
  129. continue
  130. retry_url = url_or_none(stat.get('retry_url'))
  131. if not retry_url:
  132. continue
  133. formats.append({
  134. 'url': self._proto_relative_url(retry_url, 'http:'),
  135. 'ext': download_formats.get(format_id),
  136. 'format_id': format_id,
  137. 'format_note': f.get('description'),
  138. 'filesize': parse_filesize(f.get('size_mb')),
  139. 'vcodec': 'none',
  140. })
  141. self._sort_formats(formats)
  142. return {
  143. 'id': video_id,
  144. 'title': title,
  145. 'thumbnail': info.get('thumb_url') or thumbnail,
  146. 'uploader': info.get('artist'),
  147. 'artist': artist,
  148. 'track': track,
  149. 'formats': formats,
  150. }
  151. class BandcampAlbumIE(InfoExtractor):
  152. IE_NAME = 'Bandcamp:album'
  153. _VALID_URL = r'https?://(?:(?P<subdomain>[^.]+)\.)?bandcamp\.com(?:/album/(?P<album_id>[^/?#&]+))?'
  154. _TESTS = [{
  155. 'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',
  156. 'playlist': [
  157. {
  158. 'md5': '39bc1eded3476e927c724321ddf116cf',
  159. 'info_dict': {
  160. 'id': '1353101989',
  161. 'ext': 'mp3',
  162. 'title': 'Intro',
  163. }
  164. },
  165. {
  166. 'md5': '1a2c32e2691474643e912cc6cd4bffaa',
  167. 'info_dict': {
  168. 'id': '38097443',
  169. 'ext': 'mp3',
  170. 'title': 'Kero One - Keep It Alive (Blazo remix)',
  171. }
  172. },
  173. ],
  174. 'info_dict': {
  175. 'title': 'Jazz Format Mixtape vol.1',
  176. 'id': 'jazz-format-mixtape-vol-1',
  177. 'uploader_id': 'blazo',
  178. },
  179. 'params': {
  180. 'playlistend': 2
  181. },
  182. 'skip': 'Bandcamp imposes download limits.'
  183. }, {
  184. 'url': 'http://nightbringer.bandcamp.com/album/hierophany-of-the-open-grave',
  185. 'info_dict': {
  186. 'title': 'Hierophany of the Open Grave',
  187. 'uploader_id': 'nightbringer',
  188. 'id': 'hierophany-of-the-open-grave',
  189. },
  190. 'playlist_mincount': 9,
  191. }, {
  192. 'url': 'http://dotscale.bandcamp.com',
  193. 'info_dict': {
  194. 'title': 'Loom',
  195. 'id': 'dotscale',
  196. 'uploader_id': 'dotscale',
  197. },
  198. 'playlist_mincount': 7,
  199. }, {
  200. # with escaped quote in title
  201. 'url': 'https://jstrecords.bandcamp.com/album/entropy-ep',
  202. 'info_dict': {
  203. 'title': '"Entropy" EP',
  204. 'uploader_id': 'jstrecords',
  205. 'id': 'entropy-ep',
  206. },
  207. 'playlist_mincount': 3,
  208. }, {
  209. # not all tracks have songs
  210. 'url': 'https://insulters.bandcamp.com/album/we-are-the-plague',
  211. 'info_dict': {
  212. 'id': 'we-are-the-plague',
  213. 'title': 'WE ARE THE PLAGUE',
  214. 'uploader_id': 'insulters',
  215. },
  216. 'playlist_count': 2,
  217. }]
  218. @classmethod
  219. def suitable(cls, url):
  220. return (False
  221. if BandcampWeeklyIE.suitable(url) or BandcampIE.suitable(url)
  222. else super(BandcampAlbumIE, cls).suitable(url))
  223. def _real_extract(self, url):
  224. mobj = re.match(self._VALID_URL, url)
  225. uploader_id = mobj.group('subdomain')
  226. album_id = mobj.group('album_id')
  227. playlist_id = album_id or uploader_id
  228. webpage = self._download_webpage(url, playlist_id)
  229. track_elements = re.findall(
  230. r'(?s)<div[^>]*>(.*?<a[^>]+href="([^"]+?)"[^>]+itemprop="url"[^>]*>.*?)</div>', webpage)
  231. if not track_elements:
  232. raise ExtractorError('The page doesn\'t contain any tracks')
  233. # Only tracks with duration info have songs
  234. entries = [
  235. self.url_result(
  236. compat_urlparse.urljoin(url, t_path),
  237. ie=BandcampIE.ie_key(),
  238. video_title=self._search_regex(
  239. r'<span\b[^>]+\bitemprop=["\']name["\'][^>]*>([^<]+)',
  240. elem_content, 'track title', fatal=False))
  241. for elem_content, t_path in track_elements
  242. if self._html_search_meta('duration', elem_content, default=None)]
  243. title = self._html_search_regex(
  244. r'album_title\s*:\s*"((?:\\.|[^"\\])+?)"',
  245. webpage, 'title', fatal=False)
  246. if title:
  247. title = title.replace(r'\"', '"')
  248. return {
  249. '_type': 'playlist',
  250. 'uploader_id': uploader_id,
  251. 'id': playlist_id,
  252. 'title': title,
  253. 'entries': entries,
  254. }
  255. class BandcampWeeklyIE(InfoExtractor):
  256. IE_NAME = 'Bandcamp:weekly'
  257. _VALID_URL = r'https?://(?:www\.)?bandcamp\.com/?\?(?:.*?&)?show=(?P<id>\d+)'
  258. _TESTS = [{
  259. 'url': 'https://bandcamp.com/?show=224',
  260. 'md5': 'b00df799c733cf7e0c567ed187dea0fd',
  261. 'info_dict': {
  262. 'id': '224',
  263. 'ext': 'opus',
  264. 'title': 'BC Weekly April 4th 2017 - Magic Moments',
  265. 'description': 'md5:5d48150916e8e02d030623a48512c874',
  266. 'duration': 5829.77,
  267. 'release_date': '20170404',
  268. 'series': 'Bandcamp Weekly',
  269. 'episode': 'Magic Moments',
  270. 'episode_number': 208,
  271. 'episode_id': '224',
  272. }
  273. }, {
  274. 'url': 'https://bandcamp.com/?blah/blah@&show=228',
  275. 'only_matching': True
  276. }]
  277. def _real_extract(self, url):
  278. video_id = self._match_id(url)
  279. webpage = self._download_webpage(url, video_id)
  280. blob = self._parse_json(
  281. self._search_regex(
  282. r'data-blob=(["\'])(?P<blob>{.+?})\1', webpage,
  283. 'blob', group='blob'),
  284. video_id, transform_source=unescapeHTML)
  285. show = blob['bcw_show']
  286. # This is desired because any invalid show id redirects to `bandcamp.com`
  287. # which happens to expose the latest Bandcamp Weekly episode.
  288. show_id = int_or_none(show.get('show_id')) or int_or_none(video_id)
  289. formats = []
  290. for format_id, format_url in show['audio_stream'].items():
  291. if not url_or_none(format_url):
  292. continue
  293. for known_ext in KNOWN_EXTENSIONS:
  294. if known_ext in format_id:
  295. ext = known_ext
  296. break
  297. else:
  298. ext = None
  299. formats.append({
  300. 'format_id': format_id,
  301. 'url': format_url,
  302. 'ext': ext,
  303. 'vcodec': 'none',
  304. })
  305. self._sort_formats(formats)
  306. title = show.get('audio_title') or 'Bandcamp Weekly'
  307. subtitle = show.get('subtitle')
  308. if subtitle:
  309. title += ' - %s' % subtitle
  310. episode_number = None
  311. seq = blob.get('bcw_seq')
  312. if seq and isinstance(seq, list):
  313. try:
  314. episode_number = next(
  315. int_or_none(e.get('episode_number'))
  316. for e in seq
  317. if isinstance(e, dict) and int_or_none(e.get('id')) == show_id)
  318. except StopIteration:
  319. pass
  320. return {
  321. 'id': video_id,
  322. 'title': title,
  323. 'description': show.get('desc') or show.get('short_desc'),
  324. 'duration': float_or_none(show.get('audio_duration')),
  325. 'is_live': False,
  326. 'release_date': unified_strdate(show.get('published_date')),
  327. 'series': 'Bandcamp Weekly',
  328. 'episode': show.get('subtitle'),
  329. 'episode_number': episode_number,
  330. 'episode_id': compat_str(video_id),
  331. 'formats': formats
  332. }