Convert starters and other helpers to new interface.

This allows those starters to work with future scrapers.
This commit is contained in:
Tobias Gruetzmacher 2014-07-23 20:53:59 +02:00
parent 4265053846
commit 2567bd4e57
7 changed files with 37 additions and 37 deletions

View file

@ -1,7 +1,7 @@
# -*- coding: iso-8859-1 -*- # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs # Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam # Copyright (C) 2012-2014 Bastian Kleineidam
from .util import fetchUrl, getPageContent, getQueryParams from .util import getQueryParams
def queryNamer(paramName, usePageUrl=False): def queryNamer(paramName, usePageUrl=False):
"""Get name from URL query part.""" """Get name from URL query part."""
@ -30,10 +30,10 @@ def bounceStarter(url, nextSearch):
@classmethod @classmethod
def _starter(cls): def _starter(cls):
"""Get bounced start URL.""" """Get bounced start URL."""
data, baseUrl = getPageContent(url, cls.session) data = cls.getPage(url)
url1 = fetchUrl(url, data, baseUrl, cls.prevSearch) url1 = cls.fetchUrl(url, data, cls.prevSearch)
data, baseUrl = getPageContent(url1, cls.session) data = cls.getPage(url1)
return fetchUrl(url1, data, baseUrl, nextSearch) return cls.fetchUrl(url1, data, nextSearch)
return _starter return _starter
@ -42,6 +42,6 @@ def indirectStarter(url, latestSearch):
@classmethod @classmethod
def _starter(cls): def _starter(cls):
"""Get indirect start URL.""" """Get indirect start URL."""
data, baseUrl = getPageContent(url, cls.session) data = cls.getPage(url)
return fetchUrl(url, data, baseUrl, latestSearch) return cls.fetchUrl(url, data, latestSearch)
return _starter return _starter

View file

@ -3,7 +3,7 @@
# Copyright (C) 2012-2014 Bastian Kleineidam # Copyright (C) 2012-2014 Bastian Kleineidam
from re import compile from re import compile
from ..scraper import make_scraper from ..scraper import make_scraper
from ..util import tagre, getQueryParams, fetchUrl, getPageContent from ..util import tagre, getQueryParams
_linkTag = tagre("a", "href", r'([^"]+)') _linkTag = tagre("a", "href", r'([^"]+)')
@ -25,15 +25,15 @@ def add(name, shortName, imageFolder=None, lastStrip=None):
@classmethod @classmethod
def _starter(cls): def _starter(cls):
# first, try hopping to previous and next comic # first, try hopping to previous and next comic
data, _baseUrl = getPageContent(baseUrl, cls.session) data = cls.getPage(baseUrl)
try: try:
url = fetchUrl(baseUrl, data, _baseUrl, _prevSearch) url = cls.fetchUrl(baseUrl, data, _prevSearch)
except ValueError: except ValueError:
# no previous link found, try hopping to last comic # no previous link found, try hopping to last comic
return fetchUrl(baseUrl, data, _baseUrl, _lastSearch) return cls.fetchUrl(baseUrl, data, _lastSearch)
else: else:
data, _baseUrl = getPageContent(url, cls.session) data = cls.getPage(url)
return fetchUrl(url, data, _baseUrl, _nextSearch) return cls.fetchUrl(url, data, _nextSearch)
attrs = dict( attrs = dict(
name='CloneManga/' + name, name='CloneManga/' + name,

View file

@ -4,7 +4,7 @@
from re import compile from re import compile
from ..scraper import make_scraper, Genre from ..scraper import make_scraper, Genre
from ..util import tagre, fetchUrl, getPageContent from ..util import tagre
# note: adding the compile() functions inside add() is a major performance hog # note: adding the compile() functions inside add() is a major performance hog
_imageSearch = compile(tagre("img", "src", r'(https://s3\.amazonaws\.com/media\.drunkduck\.com/[^"]+)', before="page-image")) _imageSearch = compile(tagre("img", "src", r'(https://s3\.amazonaws\.com/media\.drunkduck\.com/[^"]+)', before="page-image"))
@ -27,15 +27,15 @@ def add(name, path):
@classmethod @classmethod
def _starter(cls): def _starter(cls):
# first, try hopping to previous and next comic # first, try hopping to previous and next comic
data, baseUrl = getPageContent(_url, cls.session) data = cls.getPage(_url)
try: try:
url = fetchUrl(_url, data, baseUrl, _prevSearch) url = cls.fetchUrl(_url, data, _prevSearch)
except ValueError: except ValueError:
# no previous link found, try hopping to last comic # no previous link found, try hopping to last comic
return fetchUrl(_url, data, baseUrl, _lastSearch) return cls.fetchUrl(_url, data, _lastSearch)
else: else:
data, baseUrl = getPageContent(url, cls.session) data = cls.getPage(url)
return fetchUrl(url, data, baseUrl, _nextSearch) return cls.fetchUrl(url, data, _nextSearch)
attrs = dict( attrs = dict(
name = 'DrunkDuck/' + name, name = 'DrunkDuck/' + name,

View file

@ -3,7 +3,7 @@
from re import compile, escape from re import compile, escape
from ..scraper import _BasicScraper from ..scraper import _BasicScraper
from ..util import tagre, getPageContent, fetchUrls from ..util import tagre
from ..helpers import bounceStarter from ..helpers import bounceStarter
@ -21,9 +21,9 @@ class HagarTheHorrible(_BasicScraper):
def starter(cls): def starter(cls):
"""Return last gallery link.""" """Return last gallery link."""
url = 'http://www.hagardunor.net/comics.php' url = 'http://www.hagardunor.net/comics.php'
content = getPageContent(url, cls.session)[0] data = cls.getPage(url)
pattern = compile(tagre("a", "href", cls.prevUrl)) pattern = compile(tagre("a", "href", cls.prevUrl))
for starturl in fetchUrls(url, content, url, pattern): for starturl in cls.fetchUrls(url, data, pattern):
pass pass
return starturl return starturl

View file

@ -5,7 +5,7 @@
from re import compile, escape from re import compile, escape
from ..scraper import _BasicScraper from ..scraper import _BasicScraper
from ..helpers import bounceStarter, queryNamer, indirectStarter from ..helpers import bounceStarter, queryNamer, indirectStarter
from ..util import tagre, fetchUrl, getPageContent from ..util import tagre
class PandyLand(_BasicScraper): class PandyLand(_BasicScraper):
@ -104,10 +104,10 @@ class PennyArcade(_BasicScraper):
@classmethod @classmethod
def starter(cls): def starter(cls):
"""Get bounced start URL.""" """Get bounced start URL."""
data, baseUrl = getPageContent(cls.url, cls.session) data = cls.getPage(cls.url)
url1 = fetchUrl(cls.url, data, baseUrl, cls.prevSearch) url1 = cls.fetchUrl(cls.url, data, cls.prevSearch)
data, baseUrl = getPageContent(url1, cls.session) data = cls.getPage(url1)
url2 = fetchUrl(url1, data, baseUrl, cls.nextSearch) url2 = cls.fetchUrl(url1, data, cls.nextSearch)
return cls.prevUrlModifier(url2) return cls.prevUrlModifier(url2)
@classmethod @classmethod

View file

@ -3,7 +3,7 @@
# Copyright (C) 2012-2014 Bastian Kleineidam # Copyright (C) 2012-2014 Bastian Kleineidam
from re import compile from re import compile
from ..scraper import make_scraper from ..scraper import make_scraper
from ..util import tagre, quote, fetchUrl, case_insensitive_re, getPageContent from ..util import tagre, quote, case_insensitive_re
# SmackJeeves is a crawlers nightmare - users are allowed to edit HTML directly. # SmackJeeves is a crawlers nightmare - users are allowed to edit HTML directly.
# That's why there are so much different search patterns. # That's why there are so much different search patterns.
@ -45,11 +45,11 @@ def add(name, url, description, adult, bounce):
def _starter(cls): def _starter(cls):
"""Get start URL.""" """Get start URL."""
url1 = modifier(url) url1 = modifier(url)
data, baseUrl = getPageContent(url1, cls.session) data = cls.getPage(url1)
url2 = fetchUrl(url1, data, baseUrl, cls.prevSearch) url2 = cls.fetchUrl(url1, data, cls.prevSearch)
if bounce: if bounce:
data, baseUrl = getPageContent(url2, cls.session) data = cls.getPage(url2)
url3 = fetchUrl(url2, data, baseUrl, _nextSearch) url3 = cls.fetchUrl(url2, data, _nextSearch)
return modifier(url3) return modifier(url3)
return modifier(url2) return modifier(url2)

View file

@ -5,7 +5,7 @@
from re import compile, escape, IGNORECASE from re import compile, escape, IGNORECASE
from ..scraper import _BasicScraper from ..scraper import _BasicScraper
from ..helpers import indirectStarter from ..helpers import indirectStarter
from ..util import tagre, fetchUrl, getPageContent from ..util import tagre
class TheBrads(_BasicScraper): class TheBrads(_BasicScraper):
@ -223,11 +223,11 @@ class TheThinHLine(_BasicScraper):
indirectImageSearch = compile(tagre('a', 'href', r'(%simage/\d+)' % rurl)) indirectImageSearch = compile(tagre('a', 'href', r'(%simage/\d+)' % rurl))
def getComicStrip(self, url, data, baseUrl): def getComicStrip(self, url, data):
"""The comic strip image is in a separate page.""" """The comic strip image is in a separate page."""
pageUrl = fetchUrl(url, data, baseUrl, self.indirectImageSearch) pageUrl = self.fetchUrl(url, data, self.indirectImageSearch)
pageData, pageBaseUrl = getPageContent(pageUrl, self.session) pageData = self.getPage(pageUrl)
return super(TheThinHLine, self).getComicStrip(pageUrl, pageData, pageBaseUrl) return super(TheThinHLine, self).getComicStrip(pageUrl, pageData)
@classmethod @classmethod
def namer(cls, imageUrl, pageUrl): def namer(cls, imageUrl, pageUrl):