2020-04-18 11:45:44 +00:00
|
|
|
# SPDX-License-Identifier: MIT
|
2016-10-28 22:21:41 +00:00
|
|
|
# Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs
|
2014-01-05 15:50:57 +00:00
|
|
|
# Copyright (C) 2012-2014 Bastian Kleineidam
|
2021-01-17 22:28:25 +00:00
|
|
|
# Copyright (C) 2015-2021 Tobias Gruetzmacher
|
2020-01-13 06:34:05 +00:00
|
|
|
# Copyright (C) 2019-2020 Daniel Ring
|
2013-04-10 16:19:11 +00:00
|
|
|
from re import compile, escape
|
2016-04-03 20:58:01 +00:00
|
|
|
|
2016-05-05 18:55:14 +00:00
|
|
|
from ..scraper import _BasicScraper, _ParserScraper
|
2020-12-02 21:37:18 +00:00
|
|
|
from ..helpers import indirectStarter, bounceStarter
|
2012-11-21 20:57:26 +00:00
|
|
|
from ..util import tagre
|
2019-10-18 07:49:55 +00:00
|
|
|
from .common import _ComicControlScraper, _WordPressScraper, _WPNavi, _WPWebcomic
|
2012-06-20 19:58:13 +00:00
|
|
|
|
|
|
|
|
2016-04-03 22:12:53 +00:00
|
|
|
class Namesake(_ComicControlScraper):
|
2013-03-03 21:41:11 +00:00
|
|
|
url = 'http://namesakecomic.com/'
|
2019-09-08 00:45:52 +00:00
|
|
|
stripUrl = url + 'comic/%s'
|
|
|
|
firstStripUrl = stripUrl % 'the-journey-begins'
|
2013-03-03 21:41:11 +00:00
|
|
|
|
|
|
|
|
2013-07-10 16:43:53 +00:00
|
|
|
class NatalieDee(_BasicScraper):
|
|
|
|
url = 'http://www.nataliedee.com/'
|
|
|
|
rurl = escape(url)
|
2013-07-18 18:39:53 +00:00
|
|
|
stripUrl = url + '%s'
|
2013-07-10 16:43:53 +00:00
|
|
|
firstStripUrl = stripUrl % '022806'
|
2016-03-31 21:13:54 +00:00
|
|
|
imageSearch = compile(tagre("img", "src", r'(%s\d+/[^"]+)' % rurl,
|
|
|
|
before="overflow"))
|
2013-07-10 16:43:53 +00:00
|
|
|
prevSearch = compile(tagre("a", "href", r'([^"]+)') + "<< Yesterday")
|
|
|
|
help = 'Index format: mmddyy'
|
|
|
|
|
2016-04-21 06:20:49 +00:00
|
|
|
def namer(self, image_url, page_url):
|
|
|
|
unused, date, filename = image_url.rsplit('/', 2)
|
2013-07-10 16:43:53 +00:00
|
|
|
return '%s-%s' % (date, filename)
|
|
|
|
|
|
|
|
|
2016-04-01 22:14:31 +00:00
|
|
|
class Nedroid(_WordPressScraper):
|
|
|
|
url = 'http://nedroid.com/'
|
|
|
|
prevSearch = '//a[@rel="prev"]'
|
|
|
|
|
|
|
|
|
2019-07-06 06:47:53 +00:00
|
|
|
class NeoCTC(_ParserScraper):
|
|
|
|
url = 'http://www.hirezfox.com/neoctc/'
|
|
|
|
stripUrl = url + 'd/%s.html'
|
|
|
|
firstStripUrl = stripUrl % '20071205'
|
|
|
|
imageSearch = '//img[contains(@src, "neoctc/comics")]'
|
|
|
|
prevSearch = '//a[./img[@alt="Previous Day"]]'
|
|
|
|
multipleImagesPerStrip = True
|
|
|
|
|
|
|
|
|
2012-06-20 19:58:13 +00:00
|
|
|
class NeoEarth(_BasicScraper):
|
2013-02-04 20:00:26 +00:00
|
|
|
url = 'http://www.neo-earth.com/NE/'
|
|
|
|
stripUrl = url + 'index.php?date=%s'
|
2013-04-10 21:57:09 +00:00
|
|
|
firstStripUrl = stripUrl % '2007-03-23'
|
2012-06-20 19:58:13 +00:00
|
|
|
imageSearch = compile(r'<img src="(strips/.+?)"')
|
|
|
|
prevSearch = compile(r'<a href="(.+?)">Previous</a>')
|
|
|
|
help = 'Index format: yyyy-mm-dd'
|
|
|
|
|
|
|
|
|
2016-04-01 22:14:31 +00:00
|
|
|
class NerfNow(_WordPressScraper):
|
|
|
|
url = 'https://www.nerfnow.com/'
|
|
|
|
prevSearch = '//li[@id="nav_previous"]/a'
|
|
|
|
|
|
|
|
|
2019-07-10 08:05:36 +00:00
|
|
|
class Newshounds(_ParserScraper):
|
|
|
|
stripUrl = 'http://www.newshounds.com/%s.html'
|
|
|
|
url = stripUrl % 'nh2/20140929'
|
|
|
|
firstStripUrl = stripUrl % 'nh1/19971101'
|
|
|
|
imageSearch = '//img[@class="ksc"]'
|
|
|
|
prevSearch = '//a[./img[@alt="Previous comic"]]'
|
|
|
|
endOfLife = True
|
|
|
|
|
|
|
|
def getPrevUrl(self, url, data):
|
|
|
|
# Add navigation link between comic and graphic novel
|
|
|
|
if url == self.stripUrl % 'nh2/20070201':
|
|
|
|
return self.stripUrl % 'nh1/20061208'
|
2019-10-28 08:26:37 +00:00
|
|
|
return super(Newshounds, self).getPrevUrl(url, data)
|
2019-07-10 08:05:36 +00:00
|
|
|
|
|
|
|
|
2020-01-09 16:38:13 +00:00
|
|
|
class NewWorld(_WordPressScraper):
|
|
|
|
url = ('https://web.archive.org/web/20190718012133/'
|
|
|
|
'http://www.tfsnewworld.com/')
|
2013-04-10 21:57:09 +00:00
|
|
|
stripUrl = url + '%s/'
|
|
|
|
firstStripUrl = stripUrl % '2007/08/30/63'
|
2020-01-09 16:38:13 +00:00
|
|
|
prevSearch = '//a[@rel="prev"]'
|
|
|
|
endOfLife = True
|
2012-06-20 19:58:13 +00:00
|
|
|
help = 'Index format: yyyy/mm/dd/stripn'
|
|
|
|
|
|
|
|
|
2020-09-13 15:04:02 +00:00
|
|
|
class NeverSatisfied(_ComicControlScraper):
|
|
|
|
url = 'https://www.neversatisfiedcomic.com/'
|
|
|
|
stripUrl = url + 'comic/%s'
|
|
|
|
firstStripUrl = stripUrl % 'never-satisfied'
|
|
|
|
|
|
|
|
|
2012-06-20 19:58:13 +00:00
|
|
|
class NichtLustig(_BasicScraper):
|
2020-12-02 21:37:18 +00:00
|
|
|
url = 'https://joscha.com/'
|
|
|
|
starter = bounceStarter
|
|
|
|
stripUrl = url + 'nichtlustig/%s/'
|
|
|
|
firstStripUrl = stripUrl % '000501'
|
2013-03-08 21:33:05 +00:00
|
|
|
lang = 'de'
|
2020-12-02 21:37:18 +00:00
|
|
|
imageSearch = compile(tagre("img", "src", r'(https://joscha.com/data/media/cartoons/[0-9a-f-_]+.png)'))
|
|
|
|
prevSearch = compile(tagre("a", "href", r'(https://joscha.com/nichtlustig/\d+/)', after="next"))
|
|
|
|
nextSearch = compile(tagre("a", "href", r'(https://joscha.com/nichtlustig/\d+/)', after="prev"))
|
2012-06-20 19:58:13 +00:00
|
|
|
help = 'Index format: yymmdd'
|
2020-12-02 21:37:18 +00:00
|
|
|
|
|
|
|
def namer(self, image_url, page_url):
|
|
|
|
unused, filename, unused2 = page_url.rsplit('/', 2)
|
|
|
|
return '%s' % (filename)
|
2012-06-20 19:58:13 +00:00
|
|
|
|
|
|
|
|
2017-05-21 23:17:05 +00:00
|
|
|
class Nicky510(_WPNavi):
|
2020-01-09 16:38:13 +00:00
|
|
|
url = ('https://web.archive.org/web/20160510215718/'
|
|
|
|
'http://www.nickyitis.com/')
|
|
|
|
endOfLife = True
|
2016-04-01 22:14:31 +00:00
|
|
|
|
|
|
|
|
2020-06-20 04:34:49 +00:00
|
|
|
class Nightshift(_WPWebcomic):
|
2020-04-29 07:43:55 +00:00
|
|
|
url = 'https://poecatcomix.com/nightshift-static/'
|
2020-06-20 04:34:49 +00:00
|
|
|
stripUrl = 'https://poecatcomix.com/nightshift/%s/'
|
|
|
|
firstStripUrl = stripUrl % 'ns-cover'
|
|
|
|
imageSearch = '//div[contains(@class, "webcomic-media")]//img'
|
2019-06-30 02:46:15 +00:00
|
|
|
adult = True
|
|
|
|
|
2020-04-29 07:43:55 +00:00
|
|
|
def starter(self):
|
2020-06-20 04:34:49 +00:00
|
|
|
# Build list of chapters for naming
|
2020-04-29 07:43:55 +00:00
|
|
|
indexPage = self.getPage(self.url)
|
|
|
|
self.chapters = indexPage.xpath('//a[./img[contains(@class, "attachment-large")]]/@href')
|
2020-06-20 04:34:49 +00:00
|
|
|
latestPage = self.chapters[0]
|
|
|
|
self.chapters = self.chapters[1:]
|
|
|
|
self.currentChapter = len(self.chapters)
|
|
|
|
return latestPage
|
2020-04-29 07:43:55 +00:00
|
|
|
|
2019-06-30 02:46:15 +00:00
|
|
|
def namer(self, imageUrl, pageUrl):
|
2020-06-20 04:34:49 +00:00
|
|
|
page = pageUrl.rstrip('/').rsplit('/', 1)[-1]
|
|
|
|
page = page.replace('blood-brothers', 'bloodbrothers').replace('bb-2', 'bb2').replace('ns7-', 'page-')
|
|
|
|
filename = 'ns%d-%s.%s' % (self.currentChapter, page, imageUrl.rsplit('.', 1)[-1])
|
|
|
|
if pageUrl in self.chapters:
|
|
|
|
self.currentChapter = self.currentChapter - 1
|
|
|
|
return filename
|
2019-06-30 02:46:15 +00:00
|
|
|
|
|
|
|
|
2020-01-09 16:38:13 +00:00
|
|
|
class Nimona(_ParserScraper):
|
|
|
|
url = ('https://web.archive.org/web/20141008095502/'
|
|
|
|
'http://gingerhaze.com/nimona/')
|
2016-05-05 18:55:14 +00:00
|
|
|
stripUrl = url + 'comic/%s'
|
|
|
|
firstStripUrl = stripUrl % "page-1"
|
2020-07-31 20:56:30 +00:00
|
|
|
imageSearch = '//div[d:class("field-name-field-comic-page")]//img'
|
2020-01-09 16:38:13 +00:00
|
|
|
prevSearch = '//a[img[contains(@src, "/comicdrop_prev_label")]]'
|
2014-12-08 13:28:37 +00:00
|
|
|
endOfLife = True
|
|
|
|
|
|
|
|
|
2019-06-20 06:26:48 +00:00
|
|
|
class NineToNine(_ParserScraper):
|
|
|
|
url = 'https://www.tigerknight.com/99'
|
|
|
|
stripUrl = url + '/%s'
|
|
|
|
firstStripUrl = stripUrl % '2014-01-01'
|
|
|
|
imageSearch = '//img[@class="comic-image"]'
|
2021-01-25 07:24:30 +00:00
|
|
|
prevSearch = '//a[./span[contains(text(), "Previous")]]'
|
2019-06-20 06:26:48 +00:00
|
|
|
multipleImagesPerStrip = True
|
|
|
|
|
|
|
|
|
2012-06-20 19:58:13 +00:00
|
|
|
class NobodyScores(_BasicScraper):
|
2013-02-04 20:00:26 +00:00
|
|
|
url = 'http://nobodyscores.loosenutstudio.com/'
|
2013-04-10 16:19:11 +00:00
|
|
|
rurl = escape(url)
|
2013-02-04 20:00:26 +00:00
|
|
|
stripUrl = url + 'index.php?id=%s'
|
2013-04-10 21:57:09 +00:00
|
|
|
firstStripUrl = stripUrl % '4'
|
2013-04-10 16:19:11 +00:00
|
|
|
imageSearch = compile(tagre("img", "src", r'(%scomix/[^"]+)' % rurl))
|
2012-12-04 06:02:40 +00:00
|
|
|
multipleImagesPerStrip = True
|
2013-04-10 16:19:11 +00:00
|
|
|
prevSearch = compile(r'<a href="(%sindex.php.+?)">the one before </a>' % rurl)
|
2012-06-20 19:58:13 +00:00
|
|
|
help = 'Index format: nnn'
|
2013-03-06 19:21:10 +00:00
|
|
|
|
|
|
|
|
2016-05-05 18:55:14 +00:00
|
|
|
class NoNeedForBushido(_ParserScraper):
|
2013-11-12 17:33:14 +00:00
|
|
|
url = 'http://nn4b.com/'
|
2016-05-05 18:55:14 +00:00
|
|
|
stripUrl = url + 'comic/%s'
|
|
|
|
imageSearch = '//div[@id="comic-image"]//img'
|
|
|
|
prevSearch = '//a[@rel="prev"]'
|
2013-11-12 17:33:14 +00:00
|
|
|
help = 'Index format: nnn'
|
2016-03-31 21:13:54 +00:00
|
|
|
|
2013-03-06 19:21:10 +00:00
|
|
|
|
2019-07-14 09:14:07 +00:00
|
|
|
class NonPlayerCharacter(_ParserScraper):
|
|
|
|
url = 'https://www.lfg.co/'
|
|
|
|
stripUrl = url + 'npc/tale/%s/'
|
|
|
|
firstStripUrl = stripUrl % '1-1'
|
|
|
|
imageSearch = '//div[@id="comic-img"]//img'
|
|
|
|
prevSearch = '//a[@class="comic-nav-prev"]'
|
|
|
|
latestSearch = '//div[@id="feature-npc-footer"]/a[contains(@href, "npc/tale/")]'
|
|
|
|
starter = indirectStarter
|
|
|
|
|
|
|
|
def namer(self, imageUrl, pageUrl):
|
|
|
|
return pageUrl.rstrip('/').rsplit('/', 1)[-1]
|
|
|
|
|
|
|
|
|
2019-10-18 07:49:55 +00:00
|
|
|
class NotAVillain(_WPWebcomic):
|
2019-06-21 06:51:39 +00:00
|
|
|
url = 'http://navcomic.com/'
|
|
|
|
stripUrl = url + 'not-a-villain/%s/'
|
|
|
|
firstStripUrl = stripUrl % 'v1-001'
|
|
|
|
|
|
|
|
def namer(self, imageUrl, pageUrl):
|
|
|
|
filename = imageUrl.rsplit('/', 1)[-1]
|
|
|
|
# Fix filenames missing "Page"
|
|
|
|
if filename[2].isdigit():
|
|
|
|
filename = filename[0] + '-Page' + filename[2:]
|
|
|
|
# Fix filenames of early comics
|
|
|
|
filename = filename.replace('Page-', '1-Page')
|
|
|
|
if filename.startswith('0-Page'):
|
|
|
|
filename = '1' + filename[1:]
|
|
|
|
return filename
|
|
|
|
|
|
|
|
|
2016-05-05 18:55:14 +00:00
|
|
|
class NotInventedHere(_ParserScraper):
|
2013-12-10 18:50:21 +00:00
|
|
|
url = 'http://notinventedhe.re/'
|
2016-05-05 18:55:14 +00:00
|
|
|
stripUrl = url + 'on/%s'
|
|
|
|
firstStripUrl = stripUrl % '2009-9-21'
|
|
|
|
imageSearch = '//div[@id="comic-content"]//img'
|
|
|
|
prevSearch = '//a[@id="nav-previous"]'
|
|
|
|
help = 'Index format: yyyy-m-d'
|
2013-03-06 19:21:10 +00:00
|
|
|
|
2016-03-31 21:13:54 +00:00
|
|
|
|
2013-03-06 19:21:10 +00:00
|
|
|
class Nukees(_BasicScraper):
|
|
|
|
url = 'http://www.nukees.com/'
|
|
|
|
stripUrl = url + 'd/%s'
|
2013-04-10 21:57:09 +00:00
|
|
|
firstStripUrl = stripUrl % '19970121'
|
2013-03-06 19:21:10 +00:00
|
|
|
imageSearch = compile(r'"comic".+?"(/comics/.+?)"')
|
|
|
|
prevSearch = compile(r'"(/d/.+?)".+?previous')
|
|
|
|
help = 'Index format: yyyymmdd.html'
|