2016-04-12 21:11:39 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
2016-10-28 22:21:41 +00:00
|
|
|
# Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs
|
2014-01-05 15:50:57 +00:00
|
|
|
# Copyright (C) 2012-2014 Bastian Kleineidam
|
2019-12-26 21:03:18 +00:00
|
|
|
# Copyright (C) 2015-2019 Tobias Gruetzmacher
|
2016-04-12 21:11:39 +00:00
|
|
|
|
|
|
|
from __future__ import absolute_import, division, print_function
|
2012-06-20 19:58:13 +00:00
|
|
|
|
2016-05-05 18:55:14 +00:00
|
|
|
from re import compile
|
2016-05-05 21:33:48 +00:00
|
|
|
from six.moves.urllib.parse import urljoin
|
2016-04-12 21:11:39 +00:00
|
|
|
|
2019-06-28 07:32:04 +00:00
|
|
|
from ..helpers import bounceStarter, xpath_class
|
2016-04-12 21:11:39 +00:00
|
|
|
from ..scraper import _BasicScraper, _ParserScraper
|
2012-11-21 20:57:26 +00:00
|
|
|
from ..util import tagre
|
2017-02-13 21:41:17 +00:00
|
|
|
from .common import _WordPressScraper
|
2012-06-20 19:58:13 +00:00
|
|
|
|
|
|
|
|
|
|
|
class RadioactivePanda(_BasicScraper):
|
2013-02-04 20:00:26 +00:00
|
|
|
url = 'http://www.radioactivepanda.com/'
|
|
|
|
stripUrl = url + 'comic/%s'
|
2012-06-20 19:58:13 +00:00
|
|
|
imageSearch = compile(r'<img src="(/Assets/.*?)".+?"comicimg"')
|
|
|
|
prevSearch = compile(r'<a href="(/comic/.*?)".+?previous_btn')
|
|
|
|
help = 'Index format: n (no padding)'
|
|
|
|
|
2016-04-12 21:11:39 +00:00
|
|
|
|
2016-05-05 18:55:14 +00:00
|
|
|
class RalfTheDestroyer(_WordPressScraper):
|
2015-04-21 17:12:40 +00:00
|
|
|
url = 'http://ralfthedestroyer.com/'
|
|
|
|
|
2012-06-20 19:58:13 +00:00
|
|
|
|
2016-05-16 21:55:41 +00:00
|
|
|
class RaynaOnTheRiver(_WordPressScraper):
|
|
|
|
url = 'http://www.catomix.com/rayna/'
|
|
|
|
firstStripUrl = url + 'archives/comic/teaser-poster'
|
|
|
|
|
|
|
|
|
2016-05-05 18:55:14 +00:00
|
|
|
class RealLife(_WordPressScraper):
|
2019-12-31 00:44:19 +00:00
|
|
|
url = 'https://reallifecomics.com/'
|
2013-07-18 18:39:53 +00:00
|
|
|
stripUrl = url + 'comic.php?comic=%s'
|
2016-05-05 18:55:14 +00:00
|
|
|
firstStripUrl = stripUrl % 'title-1'
|
|
|
|
help = 'Index format: monthname-dd-yyyy'
|
|
|
|
|
2019-07-14 00:16:28 +00:00
|
|
|
def namer(self, imageUrl, pageUrl):
|
|
|
|
# Fix inconsisntent filenames
|
|
|
|
filename = imageUrl.rsplit('/', 1)[-1]
|
|
|
|
if pageUrl.rsplit('=', 1)[-1] == 'may-27-2014':
|
|
|
|
filename = filename.replace('20140219_3121', '20140527')
|
|
|
|
filename = filename.replace('5-Finished', '20140623_3161')
|
|
|
|
filename = filename.replace('520140722', '20140722')
|
|
|
|
filename = filename.replace('520140724', '20140724')
|
|
|
|
return filename
|
|
|
|
|
2016-05-05 18:55:14 +00:00
|
|
|
def getPrevUrl(self, url, data):
|
|
|
|
# "Parse" JavaScript
|
|
|
|
prevtag = data.find_class('comic-nav-previous')
|
|
|
|
if not prevtag:
|
|
|
|
return None
|
|
|
|
target = prevtag[0].get('onclick').split("'")[1]
|
|
|
|
return urljoin(url, target)
|
2012-06-20 19:58:13 +00:00
|
|
|
|
|
|
|
|
2013-04-09 17:37:47 +00:00
|
|
|
class RealmOfAtland(_BasicScraper):
|
|
|
|
url = 'http://www.realmofatland.com/'
|
|
|
|
stripUrl = url + '?p=%s'
|
|
|
|
firstStripUrl = stripUrl % '1'
|
|
|
|
prevSearch = compile(tagre("a", "href", r'(\?p=\d+)', after="cg_back"))
|
|
|
|
imageSearch = compile(tagre("img", "src", r'(images/strips/atland\d+.[^"]+)'))
|
|
|
|
help = 'Index format: nnn'
|
|
|
|
|
|
|
|
|
2016-05-05 18:55:14 +00:00
|
|
|
class RedMeat(_ParserScraper):
|
|
|
|
url = 'http://www.redmeat.com/max-cannon/FreshMeat'
|
|
|
|
imageSearch = '//div[@class="comicStrip"]//img'
|
|
|
|
prevSearch = '//a[@class="prev"]'
|
2013-03-06 19:21:10 +00:00
|
|
|
|
2016-05-05 18:55:14 +00:00
|
|
|
def namer(self, image_url, page_url):
|
|
|
|
parts = image_url.rsplit('/', 2)
|
|
|
|
return '_'.join(parts[1:3])
|
2013-12-10 18:50:21 +00:00
|
|
|
|
2013-03-06 19:21:10 +00:00
|
|
|
|
2012-06-20 19:58:13 +00:00
|
|
|
class RedString(_BasicScraper):
|
2013-02-04 20:00:26 +00:00
|
|
|
url = 'http://www.redstring.strawberrycomics.com/'
|
|
|
|
stripUrl = url + 'index.php?id=%s'
|
2013-04-10 21:57:09 +00:00
|
|
|
firstStripUrl = stripUrl % '434'
|
2012-12-04 06:02:40 +00:00
|
|
|
imageSearch = compile(tagre("img", "src", r'(comics/[^"]+)'))
|
2012-11-21 20:57:26 +00:00
|
|
|
prevSearch = compile(tagre("a", "href", r'(/index\.php\?id=\d+)', after="prev"))
|
2012-06-20 19:58:13 +00:00
|
|
|
help = 'Index format: nnn'
|
|
|
|
|
|
|
|
|
2019-06-28 07:32:04 +00:00
|
|
|
class Replay(_ParserScraper):
|
|
|
|
url = 'http://replaycomic.com/'
|
|
|
|
stripUrl = url + 'comic/%s/'
|
|
|
|
url = stripUrl % 'trying-it-out'
|
|
|
|
firstStripUrl = stripUrl % 'red-desert'
|
|
|
|
imageSearch = '//div[@id="comic"]//img'
|
|
|
|
prevSearch = '//a[contains(@class, "comic-nav-previous")]'
|
|
|
|
nextSearch = '//a[contains(@class, "comic-nav-next")]'
|
|
|
|
|
|
|
|
def starter(self):
|
|
|
|
# Retrieve archive page to identify chapters
|
|
|
|
archivePage = self.getPage(self.url + 'archive')
|
|
|
|
archive = archivePage.xpath('//div[@class="comic-archive-chapter-wrap"]')
|
|
|
|
self.chapter = len(archive) - 1
|
|
|
|
self.startOfChapter = []
|
|
|
|
for archiveChapter in archive:
|
|
|
|
self.startOfChapter.append(archiveChapter.xpath('.//a')[0].get('href'))
|
|
|
|
return bounceStarter(self)
|
|
|
|
|
|
|
|
def namer(self, imageUrl, pageUrl):
|
|
|
|
# Name pages based on chapter, index, and post title
|
|
|
|
name = pageUrl.rstrip('/').rsplit('/', 1)[-1]
|
|
|
|
page = imageUrl.rsplit('/', 1)[-1].rsplit('.', 1)
|
|
|
|
|
|
|
|
# Fix inconsistent page number formatting
|
|
|
|
if page[0].isdigit() and len(page[0]) > 2 and self.chapter == 1 and name != 'through-the-woods':
|
|
|
|
page[0] = page[0][:2] + '-' + page[0][2:]
|
|
|
|
|
|
|
|
name = '%d-%s-%s.%s' % (self.chapter, page[0], name, page[1])
|
|
|
|
if pageUrl in self.startOfChapter:
|
|
|
|
self.chapter -= 1
|
|
|
|
return name
|
|
|
|
|
|
|
|
|
2019-12-26 21:03:18 +00:00
|
|
|
class RiversideExtras(_ParserScraper):
|
|
|
|
url = 'https://riversidecomics.com/'
|
|
|
|
imageSearch = '//div[{}]//img'.format(xpath_class('webcomic-image'))
|
|
|
|
prevSearch = '//a[{}]'.format(xpath_class('previous-webcomic-link'))
|
|
|
|
|
|
|
|
|
2016-05-05 18:55:14 +00:00
|
|
|
class RomanticallyApocalyptic(_ParserScraper):
|
2013-12-10 18:50:21 +00:00
|
|
|
url = 'http://romanticallyapocalyptic.com/'
|
2016-05-05 18:55:14 +00:00
|
|
|
stripUrl = url + '%s'
|
|
|
|
firstStripUrl = stripUrl % '0'
|
|
|
|
imageSearch = '//div[%s]/center//img' % xpath_class('comicpanel')
|
|
|
|
prevSearch = '//a[@accesskey="p"]'
|
2013-12-10 18:50:21 +00:00
|
|
|
help = 'Index format: n'
|
|
|
|
adult = True
|
|
|
|
|
|
|
|
|
2016-05-05 18:55:14 +00:00
|
|
|
class Roza(_ParserScraper):
|
2013-02-04 20:00:26 +00:00
|
|
|
url = 'http://www.junglestudio.com/roza/index.php'
|
|
|
|
stripUrl = url + '?date=%s'
|
2013-04-10 21:57:09 +00:00
|
|
|
firstStripUrl = stripUrl % '2007-05-01'
|
2016-05-05 18:55:14 +00:00
|
|
|
imageSearch = '//img[contains(@src, "pages/")]'
|
|
|
|
prevSearch = '//a[img[contains(@src, "navtable_01.gif")]]'
|
2012-06-20 19:58:13 +00:00
|
|
|
help = 'Index format: yyyy-mm-dd'
|
2013-03-12 19:49:46 +00:00
|
|
|
|
|
|
|
|
|
|
|
class Ruthe(_BasicScraper):
|
|
|
|
url = 'http://ruthe.de/'
|
2016-05-05 18:55:14 +00:00
|
|
|
stripUrl = url + 'cartoon/%s/datum/asc/'
|
2013-03-12 19:49:46 +00:00
|
|
|
firstStripUrl = stripUrl % '1'
|
|
|
|
lang = 'de'
|
2014-07-31 19:27:49 +00:00
|
|
|
imageSearch = compile(tagre("img", "src", r'(/?cartoons/strip_\d+[^"]+)'))
|
2016-04-12 21:11:39 +00:00
|
|
|
prevSearch = compile(tagre("a", "href", r'(/cartoon/\d+/datum/asc/)') +
|
|
|
|
'vorheriger')
|
2013-03-12 19:49:46 +00:00
|
|
|
help = 'Index format: number'
|
2019-07-06 05:24:07 +00:00
|
|
|
|
|
|
|
|
|
|
|
class Ryugou(_ParserScraper):
|
|
|
|
url = 'http://ryugou.swashbuckledcomics.com/'
|
|
|
|
stripUrl = url + 'comic/%s/'
|
|
|
|
firstStripUrl = 'ryugou-chapter-1-cover'
|
|
|
|
imageSearch = '//div[@class="webcomic-image"]//img'
|
|
|
|
prevSearch = '//a[contains(@class, "previous-webcomic-link")]'
|
|
|
|
nextSearch = '//a[contains(@class, "next-webcomic-link")]'
|
|
|
|
starter = bounceStarter
|
|
|
|
|
|
|
|
def namer(self, imageUrl, pageUrl):
|
|
|
|
title = pageUrl.rstrip('/').rsplit('/', 1)[-1]
|
|
|
|
ext = imageUrl.rsplit('.', 1)[-1]
|
|
|
|
return title + '.' + ext
|
|
|
|
|
|
|
|
def fetchUrls(self, url, data, urlSearch):
|
|
|
|
imageUrls = super(Ryugou, self).fetchUrls(url, data, urlSearch)
|
|
|
|
if url == self.stripUrl % '1-3':
|
|
|
|
imageUrls = [imageUrls[1]]
|
|
|
|
return imageUrls
|