# -*- coding: iso-8859-1 -*- # Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs # Copyright (C) 2012-2014 Bastian Kleineidam import requests import time import random import os from . import loader, configuration, util from .util import (makeSequence, get_system_uid, urlopen, getDirname) from .comic import ComicStrip from .output import out from .events import getHandler class Genre: """Genre of a comic strip.""" adventure = u"Adventure" crazy = u"Crazy" drama = u"Drama" fantasy = u"Fantasy" gaming = u"Gaming" humor = u"Humor" reallife = u"Real life" scifi = u"Sci-fi" other = u"Other" class Scraper(object): '''Base class for all comic scraper, but without a specific scrape implementation.''' # The URL for the comic strip url = None # A string that is interpolated with the strip index to yield the URL for a particular strip. stripUrl = None # Stop search for previous URLs at this URL firstStripUrl = None # if more than one image per URL is expected multipleImagesPerStrip = False # set to False if previous URLs do not match the strip URL (ie. because of redirects) prevUrlMatchesStripUrl = True # set to True if this comic contains adult content adult = False # set to True if this comic will not get updated anymore endOfLife = False # a description of the comic contents description = u'' # langauge of the comic (two-letter ISO 639-1 code) lang = 'en' # list of genres for this comic strip genres = (Genre.other,) # an expression that will locate the URL for the previous strip in a page # this can also be a list or tuple prevSearch = None # an expression that will locate the strip image URLs strip in a page # this can also be a list or tuple imageSearch = None # an expression to store a text together with the image # sometimes comic strips have additional text info for each comic textSearch = None # Is the additional text required or optional? When it is required (the # default), you see an error message whenever a comic page is encountered # that does not have the text textOptional = False # usually the index format help help = '' # HTTP session storing cookies session = requests.session() def __init__(self, indexes=None): """Initialize internal variables.""" self.urls = set() if indexes: self.indexes = tuple(sorted(indexes)) else: self.indexes = tuple() self.skippedUrls = set() self.hitFirstStripUrl = False def __cmp__(self, other): """Compare scraper by name and index list.""" if not isinstance(other, Scraper): return 1 # first, order by name d = cmp(self.getName(), other.getName()) if d != 0: return d # then by indexes return cmp(self.indexes, other.indexes) def __hash__(self): """Get hash value from name and index list.""" return hash((self.getName(), self.indexes)) def shouldSkipUrl(self, url, data): """Determine if search for images in given URL should be skipped.""" return False def getComicStrip(self, url, data): """Get comic strip downloader for given URL and data.""" imageUrls = self.fetchUrls(url, data, self.imageSearch) # map modifier function on image URLs imageUrls = [self.imageUrlModifier(x, data) for x in imageUrls] # remove duplicate URLs imageUrls = set(imageUrls) if len(imageUrls) > 1 and not self.multipleImagesPerStrip: patterns = [x.pattern for x in makeSequence(self.imageSearch)] out.warn(u"Found %d images instead of 1 at %s with expressions %s" % (len(imageUrls), url, patterns)) image = sorted(imageUrls)[0] out.warn(u"Choosing image %s" % image) imageUrls = (image,) elif not imageUrls: patterns = [x.pattern for x in makeSequence(self.imageSearch)] out.warn(u"Found no images at %s with expressions %s" % (url, patterns)) if self.textSearch: text = self.fetchText(url, data, self.textSearch, optional=self.textOptional) else: text = None return ComicStrip(self.getName(), url, imageUrls, self.namer, self.session, text=text) def getStrips(self, maxstrips=None): """Get comic strips.""" if maxstrips: word = u"strip" if maxstrips == 1 else "strips" msg = u'Retrieving %d %s' % (maxstrips, word) else: msg = u'Retrieving all strips' if self.indexes: if len(self.indexes) == 1: msg += u" for index %s" % self.indexes[0] else: msg += u" for indexes %s" % self.indexes # Always call starter() since it might initialize cookies. # See for example Oglaf comic. self.starter() urls = [self.getIndexStripUrl(index) for index in self.indexes] else: urls = [self.getLatestUrl()] if self.adult: msg += u" (including adult content)" out.info(msg) for url in urls: for strip in self.getStripsFor(url, maxstrips): yield strip def getStripsFor(self, url, maxstrips): """Get comic strips for an URL. If maxstrips is a positive number, stop after retrieving the given number of strips.""" self.hitFirstStripUrl = False seen_urls = set() while url: out.info(u'Get strip URL %s' % url, level=1) data = self.getPage(url) if self.shouldSkipUrl(url, data): out.info(u'Skipping URL %s' % url) self.skippedUrls.add(url) else: try: yield self.getComicStrip(url, data) except ValueError as msg: # image not found out.exception(msg) if self.firstStripUrl == url: out.debug(u"Stop at first URL %s" % url) self.hitFirstStripUrl = True break if maxstrips is not None: maxstrips -= 1 if maxstrips <= 0: break prevUrl = self.getPrevUrl(url, data) seen_urls.add(url) if prevUrl in seen_urls: # avoid recursive URL loops out.warn(u"Already seen previous URL %r" % prevUrl) break url = prevUrl if url: # wait up to 2 seconds for next URL time.sleep(1.0 + random.random()) def getPrevUrl(self, url, data): """Find previous URL.""" prevUrl = None if self.prevSearch: try: prevUrl = self.fetchUrl(url, data, self.prevSearch) except ValueError as msg: # assume there is no previous URL, but print a warning out.warn(u"%s Assuming no previous comic strips exist." % msg) else: prevUrl = self.prevUrlModifier(prevUrl) out.debug(u"Found previous URL %s" % prevUrl) getHandler().comicPageLink(self.getName(), url, prevUrl) return prevUrl def getIndexStripUrl(self, index): """Get comic strip URL from index.""" return self.stripUrl % index @classmethod def getName(cls): """Get scraper name.""" if hasattr(cls, 'name'): return cls.name return cls.__name__ @classmethod def starter(cls): """Get starter URL from where to scrape comic strips.""" return cls.url @classmethod def namer(cls, imageUrl, pageUrl): """Return filename for given image and page URL.""" return None @classmethod def prevUrlModifier(cls, prevUrl): """Optional modification of parsed previous URLs. Useful if there are domain redirects. The default implementation does not modify the URL. """ return prevUrl @classmethod def imageUrlModifier(cls, imageUrl, data): """Optional modification of parsed image URLs. Useful if the URL needs to be fixed before usage. The default implementation does not modify the URL. The given data is the URL page data. """ return imageUrl def getLatestUrl(self): """Get starter URL from where to scrape comic strips.""" return self.starter() @classmethod def vote(cls): """Cast a public vote for this comic.""" url = configuration.VoteUrl + 'count/' uid = get_system_uid() data = {"name": cls.getName().replace('/', '_'), "uid": uid} page = urlopen(url, cls.session, data=data) return page.text def getCompleteFile(self, basepath): """Get filename indicating all comics are downloaded.""" dirname = getDirname(self.getName()) return os.path.join(basepath, dirname, "complete.txt") def isComplete(self, basepath): """Check if all comics are downloaded.""" return os.path.isfile(self.getCompleteFile(basepath)) def setComplete(self, basepath): """Set complete flag for this comic, ie. all comics are downloaded.""" if self.endOfLife: filename = self.getCompleteFile(basepath) if not os.path.exists(filename): with open(filename, 'w') as f: f.write('All comics should be downloaded here.') @classmethod def getPage(cls, url): """ Fetch a page and return the opaque repesentation for the data parameter of fetchUrls and fetchText. Implementation notes: While this base class does not restrict how the returned data is structured, subclasses (specific scrapers) should specify how this data works, since the stracture is passed into different methods which can be defined by comic modules and these methods should be able to use the data if they so desire... (Affected methods: shouldSkipUrl, imageUrlModifier) """ raise ValueError("No implementation for getPage!") @classmethod def fetchUrls(cls, url, data, urlSearch): raise ValueError("No implementation for fetchUrls!") @classmethod def fetchUrl(cls, url, data, urlSearch): return cls.fetchUrls(url, data, urlSearch)[0] @classmethod def fetchText(cls, url, data, textSearch, optional): raise ValueError("No implementation for fetchText!") class _BasicScraper(Scraper): """ Scraper base class that matches regular expressions against HTML pages. Subclasses of this scraper should use compiled regular expressions as values for prevSearch, imageSearch and textSearch. Implementation note: The return value of getPage is a tuple: the first element is the raw HTML page text, the second element is the base URL (if any). """ @classmethod def getPage(cls, url): content, baseUrl = util.getPageContent(url, cls.session) return (content, baseUrl) @classmethod def fetchUrls(cls, url, data, urlSearch): """Search all entries for given URL pattern(s) in a HTML page.""" return util.fetchUrls(url, data[0], data[1], urlSearch) @classmethod def fetchText(cls, url, data, textSearch, optional): """Search text entry for given text pattern in a HTML page.""" return util.fetchText(url, data[0], textSearch, optional) def find_scraperclasses(comic, multiple_allowed=False): """Get a list comic scraper classes. Can return more than one entries if multiple_allowed is True, else it raises a ValueError if multiple modules match. The match is a case insensitive substring search.""" if not comic: raise ValueError("empty comic name") candidates = [] cname = comic.lower() for scraperclass in get_scraperclasses(): lname = scraperclass.getName().lower() if lname == cname: # perfect match if not multiple_allowed: return [scraperclass] else: candidates.append(scraperclass) elif cname in lname: candidates.append(scraperclass) if len(candidates) > 1 and not multiple_allowed: comics = ", ".join(x.getName() for x in candidates) raise ValueError('multiple comics found: %s' % comics) elif not candidates: raise ValueError('comic %r not found' % comic) return candidates _scraperclasses = None def get_scraperclasses(): """Find all comic scraper classes in the plugins directory. The result is cached. @return: list of Scraper classes @rtype: list of Scraper """ global _scraperclasses if _scraperclasses is None: out.debug(u"Loading comic modules...") modules = loader.get_modules('plugins') plugins = loader.get_plugins(modules, Scraper) _scraperclasses = list(plugins) check_scrapers() out.debug(u"... %d modules loaded." % len(_scraperclasses)) return _scraperclasses def check_scrapers(): """Check for duplicate scraper class names.""" d = {} for scraperclass in _scraperclasses: name = scraperclass.getName().lower() if name in d: name1 = scraperclass.getName() name2 = d[name].getName() raise ValueError('duplicate scrapers %s and %s found' % (name1, name2)) d[name] = scraperclass def make_scraper(classname, **attributes): """Make a new scraper class with given name and attributes.""" return type(classname, (_BasicScraper,), attributes)