Compare commits

...

3 commits

5 changed files with 66 additions and 42 deletions

View file

@ -80,6 +80,16 @@ class AdventuresOfFifne(_ParserScraper):
return super(AdventuresOfFifne, self).getPrevUrl(url, data) return super(AdventuresOfFifne, self).getPrevUrl(url, data)
class AfterlifeBlues(ParserScraper):
baseUrl = 'https://project-apollo.net/'
url = baseUrl + 'ab/ab213.html'
firstStripUrl = baseUrl + 'mos/ab000.html'
imageSearch = '//img[contains(@src, "manga/")]'
prevSearch = '//a[img/@alt="Previous Page"]'
nextSearch = '//a[img/@alt="Next Page"]'
endOfLife = True
class AfterStrife(WordPressNavi): class AfterStrife(WordPressNavi):
baseUrl = 'http://afterstrife.com/' baseUrl = 'http://afterstrife.com/'
stripUrl = baseUrl + '?p=%s' stripUrl = baseUrl + '?p=%s'
@ -252,6 +262,16 @@ class AmbersNoBrainers(_ParserScraper):
return self.stripUrl % str(pageNum - 1) return self.stripUrl % str(pageNum - 1)
class AMiracleOfScience(ParserScraper):
baseUrl = 'https://project-apollo.net/'
url = baseUrl + 'mos/mos435.html'
firstStripUrl = baseUrl + 'mos/mos000.html'
imageSearch = '//img[contains(@src, "manga/")]'
prevSearch = '//a[img/@alt="Previous Page"]'
nextSearch = '//a[img/@alt="Next Page"]'
endOfLife = True
class Amya(WordPressScraper): class Amya(WordPressScraper):
url = 'http://www.amyachronicles.com/' url = 'http://www.amyachronicles.com/'

View file

@ -1,55 +1,49 @@
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
# SPDX-FileCopyrightText: © 2019 Tobias Gruetzmacher # SPDX-FileCopyrightText: © 2019 Tobias Gruetzmacher
# SPDX-FileCopyrightText: © 2019 Daniel Ring # SPDX-FileCopyrightText: © 2019 Daniel Ring
import itertools
from ..scraper import ParserScraper from ..scraper import ParserScraper
from ..helpers import indirectStarter from ..helpers import indirectStarter, joinPathPartsNamer
class Derideal(ParserScraper): class Derideal(ParserScraper):
baseUrl = 'https://www.derideal.com/' baseUrl = 'https://derideal.com/'
imageSearch = '//img[contains(@class, "comic-page")]' imageSearch = '//img[d:class("comic-page") or d:class("comic-pag")]'
prevSearch = '//a[i[contains(@class, "fa-angle-left")]]' prevSearch = '//a[text()="<"]'
latestSearch = '//a[i[contains(@class, "fa-angle-double-right")]]' starter = indirectStarter
namer = joinPathPartsNamer(imageparts=range(-3, 0))
def __init__(self, name, sub, first, last=None): def __init__(self, name, lang, sub, first, eol=False, multi=False):
if name == 'Derideal': if lang == 'en':
super().__init__(name) base = 'Derideal'
lateststr = 'Read latest update'
else: else:
super().__init__('Derideal/' + name) base = 'DeridealSpanish'
sub = f'{lang}/{sub}'
lateststr = 'Leer última actualización'
self.url = self.baseUrl + sub if not name:
self.stripUrl = self.url + '/%s/' super().__init__(base)
self.firstStripUrl = self.stripUrl % first else:
self.startUrl = self.firstStripUrl super().__init__(f'{base}/{name}')
if last: self.url = f'{self.baseUrl}{sub}'
self.endOfLife = True self.firstStripUrl = f'{self.url}/{first}/'
self.latestSearch = f'//a[contains(text(), "{lateststr}")]'
def starter(self): self.lang = lang
indexPage = self.getPage(self.url) self.endOfLife = eol
self.chapters = self.match(indexPage, '//a[contains(text(), "Read this episode")]/@href') self.multipleImagesPerStrip = multi
self.currentChapter = len(self.chapters)
return indirectStarter(self)
def namer(self, imageUrl, pageUrl):
filename = pageUrl.rstrip('/').rsplit('/', 1)[-1]
filename = filename.replace('espanol-escape-25', 'escape-26')
filename = filename.replace('espanol-w-a-l-l-y', 'w-a-l-l-y')
filename = filename.replace('hogar-prision', 'home-prison')
filename = filename.replace('strip', 'pe').replace('purpurina-effect', 'pe')
filename = filename.replace('sector-de-seguridad', 'security-sector')
filename = 'ch' + str(self.currentChapter) + '-' + filename
if pageUrl in self.chapters:
self.currentChapter -= 1
return filename
@classmethod @classmethod
def getmodules(cls): def getmodules(cls):
return ( return itertools.chain.from_iterable((
cls('Derideal', 'derideal', 'cover-prime'), cls('', lang, 'derideal', 'chimeras-cover'),
cls('Legacy', 'derideal-legacy', 'the-dream-cover', last='derideal-is-on-hiatus'), cls('Legacy', lang, 'derideal-legacy', 'the-dream-cover', eol=True),
cls('LRE', 'RLE', 'the-leyend-of-the-rose-cover'), cls('LostMemories', lang, 'lost-memories', 'lost-memories-pixi', multi=True),
cls('ProjectPrime', 'project-prime', 'custus-part-i-cover'), cls('Nova', lang, 'nova', 'xen-prelude-cover'),
cls('PurpurinaEffect', 'purpurina-effect', 'purpurina-effect-cover'), cls('ProjectPrime', lang, 'project-prime', 'custus-part-i-cover'),
cls('TheVoid', 'the-void', 'the-void-cover'), cls('PurpurinaEffect', lang, 'purpurina-effect', 'purpurina-effect-cover'),
) cls('RLE', lang, 'RLE', 'the-leyend-of-the-rose-cover'),
cls('TheVoid', lang, 'the-void', 'the-void-cover'),
) for lang in ('en', 'es'))

View file

@ -54,6 +54,16 @@ class LeastICouldDo(ParserScraper):
help = 'Index format: yyyymmdd' help = 'Index format: yyyymmdd'
class LeifAndThorn(ParserScraper):
url = 'https://leifandthorn.com/'
stripUrl = url + 'comic/%s/'
firstStripUrl = stripUrl % 'magical-comic-lyrical-test-post'
imageSearch = '//img[d:class("attachment-full")]'
prevSearch = '//a[d:class("previous-webcomic-link")]'
nextSearch = '//a[d:class("next-webcomic-link")]'
starter = bounceStarter
class LetsSpeakEnglish(ComicControlScraper): class LetsSpeakEnglish(ComicControlScraper):
url = 'http://www.marycagle.com' url = 'http://www.marycagle.com'

View file

@ -1748,6 +1748,7 @@ class Renamed(Scraper):
# Renamed in 3.1 # Renamed in 3.1
cls('ComicsKingdom/SlylockFoxAndComicsForKids', 'ComicsKingdom/SlylockFox'), cls('ComicsKingdom/SlylockFoxAndComicsForKids', 'ComicsKingdom/SlylockFox'),
cls('ComicsKingdom/SlylockFoxAndComicsForKidsSpanish', 'ComicsKingdom/SlylockFoxSpanish'), cls('ComicsKingdom/SlylockFoxAndComicsForKidsSpanish', 'ComicsKingdom/SlylockFoxSpanish'),
cls('Derideal/LRE', 'Derideal/RLE'),
cls('Exiern', 'ComicFury/Exiern'), cls('Exiern', 'ComicFury/Exiern'),
cls('MaxOveracts', 'OccasionalComicsDisorder'), cls('MaxOveracts', 'OccasionalComicsDisorder'),
cls('SafelyEndangered', 'WebToons/SafelyEndangered'), cls('SafelyEndangered', 'WebToons/SafelyEndangered'),

View file

@ -19,7 +19,6 @@
{ {
devShells.default = pkgs.mkShell { devShells.default = pkgs.mkShell {
buildInputs = [ buildInputs = [
pkgs.cowsay
pkgs.alejandra pkgs.alejandra
pkgs.python310Full pkgs.python310Full
]; ];