dosage/dosagelib/scraper.py

583 lines
20 KiB
Python
Raw Normal View History

# -*- coding: utf-8 -*-
# Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs
2014-01-05 15:50:57 +00:00
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015-2016 Tobias Gruetzmacher
from __future__ import absolute_import, division, print_function
2013-03-08 05:46:50 +00:00
import time
2014-01-05 15:23:45 +00:00
import random
2013-04-25 20:40:06 +00:00
import os
import re
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
try:
from lxml import html
from lxml.html.defs import link_attrs as html_link_attrs
except ImportError:
html = None
try:
import cssselect
except ImportError:
cssselect = None
try:
import pycountry
except ImportError:
pycountry = None
from . import loader, configuration, languages
from .util import (get_page, makeSequence, get_system_uid, urlopen, getDirname,
2016-04-10 11:16:30 +00:00
unescape, tagre, normaliseURL, prettyMatcherList,
requests_session)
2012-10-11 10:03:12 +00:00
from .comic import ComicStrip
2012-10-11 17:53:37 +00:00
from .output import out
from .events import getHandler
2012-06-20 19:58:13 +00:00
class Scraper(object):
'''Base class for all comic scraper, but without a specific scrape
implementation.'''
2013-03-07 17:22:39 +00:00
# The URL for the comic strip
url = None
# A string that is interpolated with the strip index to yield the URL for a
# particular strip.
2013-03-07 17:22:39 +00:00
stripUrl = None
# Stop search for previous URLs at this URL
2013-02-13 18:59:59 +00:00
firstStripUrl = None
# if more than one image per URL is expected
multipleImagesPerStrip = False
2012-12-05 20:52:52 +00:00
2012-12-08 20:29:57 +00:00
# set to True if this comic contains adult content
adult = False
2013-04-25 20:40:06 +00:00
# set to True if this comic will not get updated anymore
endOfLife = False
2013-03-08 21:33:05 +00:00
# langauge of the comic (two-letter ISO 639-1 code)
lang = 'en'
# an expression that will locate the URL for the previous strip in a page
# this can also be a list or tuple
2013-03-07 17:22:39 +00:00
prevSearch = None
# an expression that will locate the strip image URLs strip in a page
# this can also be a list or tuple
2013-03-07 17:22:39 +00:00
imageSearch = None
# an expression to store a text together with the image
2013-11-29 19:26:49 +00:00
# sometimes comic strips have additional text info for each comic
textSearch = None
# Is the additional text required or optional? When it is required (the
# default), you see an error message whenever a comic page is encountered
# that does not have the text
textOptional = False
# usually the index format help
2012-12-12 16:41:29 +00:00
help = ''
2012-10-11 10:03:12 +00:00
2016-03-13 20:27:31 +00:00
# HTTP session for configuration & cookies
session = requests_session()
2012-12-05 20:52:52 +00:00
@property
def indexes(self):
return self._indexes
@indexes.setter
def indexes(self, val):
if val:
self._indexes = tuple(sorted(val))
def __init__(self):
2012-10-11 10:03:12 +00:00
"""Initialize internal variables."""
self.urls = set()
self._indexes = tuple()
2013-02-20 19:51:39 +00:00
self.skippedUrls = set()
2013-02-21 18:48:21 +00:00
self.hitFirstStripUrl = False
2013-02-13 19:00:16 +00:00
def __cmp__(self, other):
2013-02-18 19:02:16 +00:00
"""Compare scraper by name and index list."""
if not isinstance(other, Scraper):
2013-02-13 19:00:16 +00:00
return 1
# first, order by name
d = cmp(self.name, other.name)
2013-02-13 19:00:16 +00:00
if d != 0:
return d
# then by indexes
return cmp(self.indexes, other.indexes)
def __hash__(self):
2013-02-18 19:02:16 +00:00
"""Get hash value from name and index list."""
return hash((self.name, self.indexes))
2012-10-11 10:03:12 +00:00
def shouldSkipUrl(self, url, data):
"""Determine if search for images in given URL should be skipped."""
return False
2013-03-04 18:10:27 +00:00
def getComicStrip(self, url, data):
2013-03-04 18:10:27 +00:00
"""Get comic strip downloader for given URL and data."""
imageUrls = self.fetchUrls(url, data, self.imageSearch)
# map modifier function on image URLs
imageUrls = [self.imageUrlModifier(x, data) for x in imageUrls]
# remove duplicate URLs
imageUrls = set(imageUrls)
if len(imageUrls) > 1 and not self.multipleImagesPerStrip:
out.warn(
u"Found %d images instead of 1 at %s with expressions %s" %
(len(imageUrls), url, prettyMatcherList(self.imageSearch)))
2013-04-11 16:27:43 +00:00
image = sorted(imageUrls)[0]
2014-07-23 18:53:59 +00:00
out.warn(u"Choosing image %s" % image)
2013-04-11 16:27:43 +00:00
imageUrls = (image,)
elif not imageUrls:
out.warn(u"Found no images at %s with expressions %s" % (url,
prettyMatcherList(self.imageSearch)))
2013-11-29 19:26:49 +00:00
if self.textSearch:
text = self.fetchText(url, data, self.textSearch,
optional=self.textOptional)
2013-11-29 19:26:49 +00:00
else:
text = None
return ComicStrip(self.name, url, imageUrls, self.namer,
self.session, text=text)
2012-10-11 10:03:12 +00:00
def getStrips(self, maxstrips=None):
"""Get comic strips."""
2012-12-07 23:45:18 +00:00
if maxstrips:
2013-04-30 04:40:20 +00:00
word = u"strip" if maxstrips == 1 else "strips"
msg = u'Retrieving %d %s' % (maxstrips, word)
2012-12-07 23:45:18 +00:00
else:
2013-04-30 04:40:20 +00:00
msg = u'Retrieving all strips'
2013-01-29 17:51:35 +00:00
if self.indexes:
if len(self.indexes) == 1:
2013-04-30 04:40:20 +00:00
msg += u" for index %s" % self.indexes[0]
else:
2013-04-30 04:40:20 +00:00
msg += u" for indexes %s" % self.indexes
# Always call starter() since it might initialize cookies.
# See for example Oglaf comic.
self.starter()
urls = [self.getIndexStripUrl(index) for index in self.indexes]
else:
2016-04-15 21:42:24 +00:00
urls = [self.starter()]
2012-12-12 16:41:29 +00:00
if self.adult:
2013-04-30 04:40:20 +00:00
msg += u" (including adult content)"
2012-12-12 16:41:29 +00:00
out.info(msg)
for url in urls:
2012-12-07 23:45:18 +00:00
for strip in self.getStripsFor(url, maxstrips):
2012-10-11 17:53:37 +00:00
yield strip
2012-12-07 23:45:18 +00:00
def getStripsFor(self, url, maxstrips):
"""Get comic strips for an URL. If maxstrips is a positive number, stop after
retrieving the given number of strips."""
2013-02-21 18:48:21 +00:00
self.hitFirstStripUrl = False
2012-10-11 10:03:12 +00:00
seen_urls = set()
while url:
2013-04-30 04:40:20 +00:00
out.info(u'Get strip URL %s' % url, level=1)
data = self.getPage(url)
if self.shouldSkipUrl(url, data):
2013-04-30 04:40:20 +00:00
out.info(u'Skipping URL %s' % url)
self.skippedUrls.add(url)
2013-02-18 19:03:27 +00:00
else:
2013-03-15 06:03:54 +00:00
try:
yield self.getComicStrip(url, data)
2013-03-15 06:03:54 +00:00
except ValueError as msg:
# image not found
2013-03-25 18:48:47 +00:00
out.exception(msg)
2013-02-13 18:59:59 +00:00
if self.firstStripUrl == url:
2013-04-30 04:40:20 +00:00
out.debug(u"Stop at first URL %s" % url)
2013-02-21 18:48:21 +00:00
self.hitFirstStripUrl = True
2013-02-13 18:59:59 +00:00
break
if maxstrips is not None:
maxstrips -= 1
if maxstrips <= 0:
break
prevUrl = self.getPrevUrl(url, data)
2012-10-11 10:03:12 +00:00
seen_urls.add(url)
2012-12-08 20:29:57 +00:00
if prevUrl in seen_urls:
# avoid recursive URL loops
2013-04-30 04:40:20 +00:00
out.warn(u"Already seen previous URL %r" % prevUrl)
2012-12-08 20:29:57 +00:00
break
url = prevUrl
2014-01-05 15:23:45 +00:00
if url:
2014-01-05 16:14:19 +00:00
# wait up to 2 seconds for next URL
time.sleep(1.0 + random.random())
2012-10-11 10:03:12 +00:00
def getPrevUrl(self, url, data):
"""Find previous URL."""
prevUrl = None
if self.prevSearch:
try:
prevUrl = self.fetchUrl(url, data, self.prevSearch)
except ValueError as msg:
# assume there is no previous URL, but print a warning
2013-04-30 04:40:20 +00:00
out.warn(u"%s Assuming no previous comic strips exist." % msg)
else:
prevUrl = self.prevUrlModifier(prevUrl)
out.debug(u"Found previous URL %s" % prevUrl)
getHandler().comicPageLink(self.name, url, prevUrl)
return prevUrl
def getIndexStripUrl(self, index):
"""Get comic strip URL from index."""
return self.stripUrl % index
2012-10-11 10:03:12 +00:00
@property
def name(self):
2012-10-11 10:03:12 +00:00
"""Get scraper name."""
return self.__class__.__name__
2012-10-11 10:03:12 +00:00
def starter(self):
2012-10-11 10:03:12 +00:00
"""Get starter URL from where to scrape comic strips."""
return self.url
2012-10-11 10:03:12 +00:00
def namer(self, image_url, page_url):
2012-10-11 10:03:12 +00:00
"""Return filename for given image and page URL."""
return None
def prevUrlModifier(self, prev_url):
2012-12-02 17:35:06 +00:00
"""Optional modification of parsed previous URLs. Useful if
there are domain redirects. The default implementation does
not modify the URL.
"""
return prev_url
2012-12-02 17:35:06 +00:00
def imageUrlModifier(self, image_url, data):
2013-03-04 18:10:27 +00:00
"""Optional modification of parsed image URLs. Useful if the URL
needs to be fixed before usage. The default implementation does
not modify the URL. The given data is the URL page data.
2013-03-04 18:10:27 +00:00
"""
return image_url
2013-03-04 18:10:27 +00:00
def vote(self):
2013-04-08 18:19:10 +00:00
"""Cast a public vote for this comic."""
2013-04-09 17:33:50 +00:00
url = configuration.VoteUrl + 'count/'
2013-04-08 19:20:01 +00:00
uid = get_system_uid()
data = {"name": self.name.replace('/', '_'), "uid": uid}
page = urlopen(url, self.session, data=data)
2013-04-09 17:33:50 +00:00
return page.text
2013-04-08 18:19:10 +00:00
2013-04-25 20:40:06 +00:00
def getCompleteFile(self, basepath):
"""Get filename indicating all comics are downloaded."""
dirname = getDirname(self.name)
2013-04-25 20:40:06 +00:00
return os.path.join(basepath, dirname, "complete.txt")
def isComplete(self, basepath):
"""Check if all comics are downloaded."""
return os.path.isfile(self.getCompleteFile(basepath))
def setComplete(self, basepath):
"""Set complete flag for this comic, ie. all comics are downloaded."""
if self.endOfLife:
filename = self.getCompleteFile(basepath)
if not os.path.exists(filename):
with open(filename, 'w') as f:
f.write('All comics should be downloaded here.')
def getPage(self, url):
"""
Fetch a page and return the opaque repesentation for the data parameter
of fetchUrls and fetchText.
Implementation notes: While this base class does not restrict how the
returned data is structured, subclasses (specific scrapers) should
specify how this data works, since the stracture is passed into
different methods which can be defined by comic modules and these
methods should be able to use the data if they so desire... (Affected
methods: shouldSkipUrl, imageUrlModifier)
"""
raise ValueError("No implementation for getPage!")
def fetchUrls(self, url, data, urlsearch):
raise ValueError("No implementation for fetchUrls!")
def fetchUrl(self, url, data, urlsearch):
return self.fetchUrls(url, data, urlsearch)[0]
def fetchText(self, url, data, textsearch, optional):
raise ValueError("No implementation for fetchText!")
def getDisabledReasons(self):
"""
Get a dict of reasons why this comic module is disabled. The key is a
short (unique) identifier, the value is a string explaining why the
module is deactivated. If the module is not disabled, just return an
empty dict.
"""
return {}
def language(self):
"""
Return language of the comic as a human-readable language name instead
of a 2-character ISO639-1 code.
"""
lang = 'Unknown (%s)' % self.lang
if pycountry is None:
if self.lang in languages.Languages:
lang = languages.Languages[self.lang]
else:
try:
lang = pycountry.languages.get(alpha2=self.lang).name
except KeyError:
try:
lang = pycountry.languages.get(
iso639_1_code=self.lang).name
except KeyError:
pass
return lang
class _BasicScraper(Scraper):
"""
Scraper base class that matches regular expressions against HTML pages.
Subclasses of this scraper should use compiled regular expressions as
values for prevSearch, imageSearch and textSearch.
Implementation note: The return value of getPage is a tuple: the first
element is the raw HTML page text, the second element is the base URL (if
any).
"""
BASE_SEARCH = re.compile(tagre("base", "href", '([^"]*)'))
def getPage(self, url):
content = get_page(url, self.session).text
# determine base URL
baseUrl = None
match = self.BASE_SEARCH.search(content)
if match:
baseUrl = match.group(1)
else:
baseUrl = url
return (content, baseUrl)
def fetchUrls(self, url, data, urlSearch):
"""Search all entries for given URL pattern(s) in a HTML page."""
searchUrls = []
searches = makeSequence(urlSearch)
for search in searches:
for match in search.finditer(data[0]):
searchUrl = match.group(1)
if not searchUrl:
raise ValueError("Pattern %s matched empty URL at %s." %
(search.pattern, url))
out.debug(u'matched URL %r with pattern %s' %
(searchUrl, search.pattern))
searchUrls.append(normaliseURL(urljoin(data[1], searchUrl)))
if searchUrls:
# do not search other links if one pattern matched
break
if not searchUrls:
patterns = [x.pattern for x in searches]
raise ValueError("Patterns %s not found at URL %s." %
(patterns, url))
return searchUrls
def fetchText(self, url, data, textSearch, optional):
"""Search text entry for given text pattern in a HTML page."""
if textSearch:
match = textSearch.search(data[0])
if match:
text = match.group(1)
out.debug(u'matched text %r with pattern %s' %
(text, textSearch.pattern))
return unescape(text).strip()
if optional:
return None
else:
raise ValueError("Pattern %s not found at URL %s." %
(textSearch.pattern, url))
else:
return None
2012-10-11 10:03:12 +00:00
class _ParserScraper(Scraper):
"""
Scraper base class that uses a HTML parser and XPath expressions.
All links are resolved before XPath searches are applied, so all URLs are
absolute!
Subclasses of this class should use XPath expressions as values for
prevSearch, imageSearch and textSearch. When the XPath directly selects an
attribute, it is used as the output.
All those searches try to do something intelligent when they match a
complete HTML Element: prevSearch and imageSearch try to find a "link
attribute" and use that as URL. textSearch strips all tags from the content
of the HTML element and returns that.
"""
# Taken directly from LXML
XML_DECL = re.compile(
r'^(<\?xml[^>]+)\s+encoding\s*=\s*["\'][^"\']*["\'](\s*\?>|)', re.U)
NS = {
"re": "http://exslt.org/regular-expressions"
}
# Switch between CSS and XPath selectors for this class. Since CSS needs
# another Python module, XPath is the default for now.
css = False
def getPage(self, url):
page = get_page(url, self.session)
if page.encoding:
# Requests figured out the encoding, so we can deliver Unicode to
# LXML. Unfortunatly, LXML feels betrayed if there is still an XML
# declaration with (probably wrong!) encoding at the top of the
# document. Web browsers ignore such if the encoding was specified
# in the HTTP header and so do we.
text = self.XML_DECL.sub('\1\2', page.text, count=1)
tree = self._parse_page(text)
else:
tree = self._parse_page(page.content)
tree.make_links_absolute(url)
return tree
def _parse_page(self, data):
tree = html.document_fromstring(data)
return tree
def fetchUrls(self, url, data, urlSearch):
"""Search all entries for given XPath in a HTML page."""
searchUrls = []
for match, search in self._matchPattern(data, urlSearch):
searchUrl = None
try:
for attrib in html_link_attrs:
if attrib in match.attrib:
searchUrl = match.get(attrib)
except AttributeError:
searchUrl = str(match)
out.debug(u'Matched URL %r with pattern %s' % (searchUrl, search))
if searchUrl is not None:
searchUrls.append(searchUrl)
if not searchUrls:
raise ValueError("XPath %s not found at URL %s." %
(urlSearch, url))
return searchUrls
def fetchText(self, url, data, textSearch, optional):
"""Search text entry for given text XPath in a HTML page."""
if not textSearch:
return None
text = []
for match, search in self._matchPattern(data, textSearch):
try:
text.append(match.text_content())
except AttributeError:
text.append(match)
out.debug(u'Matched text %r with XPath %s' % (text, search))
text = u' '.join(text)
if text.strip() == '':
if optional:
return None
else:
raise ValueError("XPath %s did not match anything at URL %s." %
(textSearch, url))
return text.strip()
def _matchPattern(self, data, patterns):
if self.css:
searchFun = data.cssselect
else:
def searchFun(s):
return data.xpath(s, namespaces=self.NS)
patterns = makeSequence(patterns)
for search in patterns:
matched = False
for match in searchFun(search):
matched = True
yield match, search
if matched and not self.multipleImagesPerStrip:
# do not search other links if one pattern matched
break
def getDisabledReasons(self):
res = {}
if self.css and cssselect is None:
res['css'] = (u"This module needs the cssselect " +
u"(python-cssselect) python module which is " +
u"not installed.")
if html is None:
res['lxml'] = (u"This module needs the lxml (python-lxml) " +
u"python module which is not installed.")
return res
def find_scrapers(comic, multiple_allowed=False):
"""Get a list comic scraper objects.
Can return more than one entry if multiple_allowed is True, else it raises
a ValueError if multiple modules match. The match is a case insensitive
substring search.
"""
2012-12-12 16:41:29 +00:00
if not comic:
raise ValueError("empty comic name")
2012-06-20 19:58:13 +00:00
candidates = []
2012-10-11 10:03:12 +00:00
cname = comic.lower()
for scrapers in get_scrapers():
lname = scrapers.name.lower()
2012-06-20 19:58:13 +00:00
if lname == cname:
# perfect match
2013-02-13 21:18:05 +00:00
if not multiple_allowed:
return [scrapers]
2013-02-13 21:18:05 +00:00
else:
candidates.append(scrapers)
2013-02-13 21:18:05 +00:00
elif cname in lname:
candidates.append(scrapers)
2013-02-13 21:18:05 +00:00
if len(candidates) > 1 and not multiple_allowed:
comics = ", ".join(x.name for x in candidates)
2012-12-12 16:41:29 +00:00
raise ValueError('multiple comics found: %s' % comics)
2013-02-13 21:18:05 +00:00
elif not candidates:
2012-12-12 16:41:29 +00:00
raise ValueError('comic %r not found' % comic)
2013-02-13 21:18:05 +00:00
return candidates
2012-06-20 19:58:13 +00:00
_scrapers = None
def get_scrapers():
2012-06-20 19:58:13 +00:00
"""Find all comic scraper classes in the plugins directory.
The result is cached.
@return: list of Scraper classes
@rtype: list of Scraper
2012-06-20 19:58:13 +00:00
"""
global _scrapers
if _scrapers is None:
2013-04-30 04:40:20 +00:00
out.debug(u"Loading comic modules...")
2013-12-11 16:54:39 +00:00
modules = loader.get_modules('plugins')
plugins = loader.get_plugins(modules, Scraper)
_scrapers = sorted([x() for x in plugins], key=lambda p: p.name)
2012-06-20 19:58:13 +00:00
check_scrapers()
out.debug(u"... %d modules loaded." % len(_scrapers))
return _scrapers
2012-06-20 19:58:13 +00:00
def check_scrapers():
"""Check for duplicate scraper names."""
2012-06-20 19:58:13 +00:00
d = {}
for scraper in _scrapers:
name = scraper.name.lower()
2012-06-20 19:58:13 +00:00
if name in d:
name1 = scraper.name
name2 = d[name].name
raise ValueError('duplicate scrapers %s and %s found' %
(name1, name2))
d[name] = scraper
2012-11-26 06:14:02 +00:00
def make_scraper(classname, scraperType=_BasicScraper, **attributes):
2012-11-26 06:14:02 +00:00
"""Make a new scraper class with given name and attributes."""
2015-05-15 12:15:32 +00:00
return type(classname, (scraperType,), attributes)