diff --git a/dosagelib/scraper.py b/dosagelib/scraper.py index 04b463664..71878b2d6 100644 --- a/dosagelib/scraper.py +++ b/dosagelib/scraper.py @@ -4,7 +4,7 @@ import requests import time from . import loader -from .util import fetchUrl, fetchUrls, getPageContent, makeList +from .util import fetchUrl, fetchUrls, getPageContent, makeSequence from .comic import ComicStrip from .output import out from .events import getHandler @@ -104,10 +104,10 @@ class _BasicScraper(object): imageUrls = fetchUrls(url, data, baseUrl, self.imageSearch) imageUrls = set(map(self.imageUrlModifier, imageUrls)) if len(imageUrls) > 1 and not self.multipleImagesPerStrip: - patterns = [x.pattern for x in makeList(self.imageSearch)] + patterns = [x.pattern for x in makeSequence(self.imageSearch)] out.warn("found %d images instead of 1 at %s with patterns %s" % (len(imageUrls), url, patterns)) elif not imageUrls: - patterns = [x.pattern for x in makeList(self.imageSearch)] + patterns = [x.pattern for x in makeSequence(self.imageSearch)] out.warn("found no images at %s with patterns %s" % (url, patterns)) return ComicStrip(self.getName(), url, imageUrls, self.namer, self.session) diff --git a/dosagelib/util.py b/dosagelib/util.py index 725d63166..36921c280 100644 --- a/dosagelib/util.py +++ b/dosagelib/util.py @@ -147,18 +147,18 @@ def getImageObject(url, referrer, session, max_content_bytes=MaxImageBytes): return urlopen(url, session, referrer=referrer, max_content_bytes=max_content_bytes) -def makeList(item): - """If tiem is already a list or tuple, return it. - Else return a list with item as single element.""" +def makeSequence(item): + """If item is already a list or tuple, return it. + Else return a tuple with item as single element.""" if isinstance(item, (list, tuple)): return item - return [item] + return (item,) def fetchUrls(url, data, baseUrl, urlSearch): """Search all entries for given URL pattern(s) in a HTML page.""" searchUrls = [] - searches = makeList(urlSearch) + searches = makeSequence(urlSearch) for search in searches: for match in search.finditer(data): searchUrl = match.group(1)