Add MangaDex site engine (#159)
This commit is contained in:
parent
84f4a81cf5
commit
595c49ba53
1 changed files with 87 additions and 0 deletions
87
dosagelib/plugins/mangadex.py
Normal file
87
dosagelib/plugins/mangadex.py
Normal file
|
@ -0,0 +1,87 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright (C) 2019-2020 Tobias Gruetzmacher
|
||||||
|
# Copyright (C) 2019-2020 Daniel Ring
|
||||||
|
import json
|
||||||
|
|
||||||
|
from ..scraper import _ParserScraper
|
||||||
|
|
||||||
|
|
||||||
|
class MangaDex(_ParserScraper):
|
||||||
|
imageSearch = '//img[contains(@class, "_images")]/@data-url'
|
||||||
|
prevSearch = '//a[contains(@class, "_prevEpisode")]'
|
||||||
|
multipleImagesPerStrip = True
|
||||||
|
|
||||||
|
def __init__(self, name, mangaid):
|
||||||
|
super(MangaDex, self).__init__('MangaDex/' + name)
|
||||||
|
|
||||||
|
baseUrl = 'https://mangadex.org/api/'
|
||||||
|
self.url = baseUrl + '?id=%s&type=manga' % str(mangaid)
|
||||||
|
self.stripUrl = baseUrl + '?id=%s&type=chapter'
|
||||||
|
|
||||||
|
def starter(self):
|
||||||
|
# Retrieve manga metadata from API
|
||||||
|
manga = self.session.get(self.url)
|
||||||
|
manga.raise_for_status()
|
||||||
|
mangaData = manga.json()
|
||||||
|
# Determine if manga is complete and/or adult
|
||||||
|
if mangaData['manga']['last_chapter'] != '0':
|
||||||
|
self.endOfLife = True
|
||||||
|
if mangaData['manga']['hentai'] != '0':
|
||||||
|
self.adult = True
|
||||||
|
# Prepare chapter list
|
||||||
|
self.chapters = []
|
||||||
|
for ch in mangaData['chapter']:
|
||||||
|
if mangaData['chapter'][ch]['lang_code'] != 'gb':
|
||||||
|
continue
|
||||||
|
if len(self.chapters) < 1:
|
||||||
|
self.chapters.append(ch)
|
||||||
|
continue
|
||||||
|
if mangaData['chapter'][ch]['chapter'] == mangaData['chapter'][self.chapters[-1]]['chapter']:
|
||||||
|
continue
|
||||||
|
if mangaData['chapter'][ch]['chapter'] == '':
|
||||||
|
continue
|
||||||
|
self.chapters.append(ch)
|
||||||
|
self.chapters.reverse()
|
||||||
|
# Find first and last chapter
|
||||||
|
self.firstStripUrl = self.stripUrl % self.chapters[0]
|
||||||
|
return self.stripUrl % self.chapters[-1]
|
||||||
|
|
||||||
|
def getPrevUrl(self, url, data):
|
||||||
|
chapter = url.replace('&type=chapter', '').rsplit('=', 1)[-1]
|
||||||
|
return self.stripUrl % self.chapters[self.chapters.index(chapter) - 1]
|
||||||
|
|
||||||
|
def fetchUrls(self, url, data, urlSearch):
|
||||||
|
# Retrieve chapter metadata from API
|
||||||
|
chapterData = json.loads(data.text_content())
|
||||||
|
self.chapter = chapterData['chapter']
|
||||||
|
# Save link order for position-based filenames
|
||||||
|
imageUrl = chapterData['server'] + chapterData['hash'] + '/%s'
|
||||||
|
self.imageUrls = [imageUrl % page for page in chapterData['page_array']]
|
||||||
|
return self.imageUrls
|
||||||
|
|
||||||
|
def namer(self, imageUrl, pageUrl):
|
||||||
|
# Construct filename from episode number and page index in array
|
||||||
|
chapterNum = self.chapter
|
||||||
|
pageNum = self.imageUrls.index(imageUrl)
|
||||||
|
pageExt = imageUrl.rsplit('.')[-1]
|
||||||
|
return '%s-%02d.%s' % (chapterNum, pageNum, pageExt)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def getmodules(cls):
|
||||||
|
return (
|
||||||
|
cls('Beastars', 20523),
|
||||||
|
cls('DragonDrive', 5165),
|
||||||
|
cls('HoriMiya', 6770),
|
||||||
|
cls('JingaiNoYomeToIchaIchaSuru', 22651),
|
||||||
|
cls('KanojoOkarishimasu', 22151),
|
||||||
|
cls('ModernMoGal', 30308),
|
||||||
|
cls('OokamiToKoshinryou', 1168),
|
||||||
|
cls('OtomeYoukaiZakuro', 4533),
|
||||||
|
cls('SaekiSanWaNemutteru', 28834),
|
||||||
|
cls('SewayakiKitsuneNoSenkoSan', 22723),
|
||||||
|
cls('SwordArtOnline', 1360),
|
||||||
|
cls('SwordArtOnlineProgressive', 9604),
|
||||||
|
cls('TomoChanWaOnnanoko', 15722),
|
||||||
|
cls('TonikakuKawaii', 23439),
|
||||||
|
cls('YuYuHakusho', 1738),
|
||||||
|
)
|
Loading…
Reference in a new issue