New feature: Comic modules ca be "disabled".

This is modeled parallel to the "adult" feature, except the user can't
override it via the command line. Each comic module can override the
classmethod getDisabledReasons and give the user a reason why this
module is disabled. The user can see the reason in the comic list (-l or
--singlelist) and the comic module refuses to run, showing the same
message.

This is currently used to disable modules that use the _ParserScraper if
the LXML python module is missing.
This commit is contained in:
Tobias Gruetzmacher 2014-10-13 21:39:13 +02:00
parent d495d95ee0
commit e92a3fb3a1
3 changed files with 63 additions and 32 deletions

31
dosage
View file

@ -136,7 +136,7 @@ def displayHelp(options):
"""Print help for comic strips."""
errors = 0
try:
for scraperobj in director.getScrapers(options.comic, options.basepath):
for scraperobj in director.getScrapers(options.comic, options.basepath, listing=True):
errors += displayComicHelp(scraperobj)
except ValueError as msg:
out.exception(msg)
@ -239,12 +239,17 @@ def doList(columnList=True, verbose=False):
out.info(u'Available comic scrapers:')
out.info(u'Comics tagged with [%s] require age confirmation with the --adult option.' % TAG_ADULT)
out.info(u'Non-english comics are tagged with [%s].' % TAG_LANG)
scrapers = sorted(director.getAllScrapers(), key=lambda s: s.getName())
scrapers = sorted(director.getAllScrapers(listing=True), key=lambda s: s.getName())
if columnList:
num = doColumnList(scrapers)
num, disabled = doColumnList(scrapers)
else:
num = doSingleList(scrapers, verbose=verbose)
num, disabled = doSingleList(scrapers, verbose=verbose)
out.info(u'%d supported comics.' % num)
if disabled:
out.info('')
out.info(u'Some comics are disabled, they are tagged with [%s:REASON], where REASON is one of:' % TAG_DISABLED)
for k in disabled:
out.info(u' %-10s %s' % (k, disabled[k]))
if page:
pydoc.pager(fd.getvalue())
return 0
@ -254,38 +259,46 @@ def doList(columnList=True, verbose=False):
def doSingleList(scrapers, verbose=False):
"""Get list of scraper names, one per line."""
disabled = {}
for num, scraperobj in enumerate(scrapers):
if verbose:
displayComicHelp(scraperobj)
else:
out.info(getScraperName(scraperobj))
return num
out.info(getScraperName(scraperobj, reasons=disabled))
return num, disabled
def doColumnList(scrapers):
"""Get list of scraper names with multiple names per line."""
disabled = {}
screenWidth = get_columns(sys.stdout)
# limit name length so at least two columns are there
limit = (screenWidth // 2) - 8
names = [getScraperName(scraperobj, limit=limit) for scraperobj in scrapers]
names = [getScraperName(scraperobj, limit=limit, reasons=disabled) for scraperobj in scrapers]
num = len(names)
maxlen = max(len(name) for name in names)
namesPerLine = max(screenWidth // (maxlen + 1), 1)
while names:
out.info(u''.join(name.ljust(maxlen) for name in names[:namesPerLine]))
del names[:namesPerLine]
return num
return num, disabled
TAG_ADULT = "adult"
TAG_LANG = "lang"
TAG_DISABLED = "dis"
def getScraperName(scraperobj, limit=None):
def getScraperName(scraperobj, limit=None, reasons=None):
"""Get comic scraper name."""
tags = []
if scraperobj.adult:
tags.append(TAG_ADULT)
if scraperobj.lang != "en":
tags.append("%s:%s" % (TAG_LANG, scraperobj.lang))
disabled = scraperobj.getDisabledReasons()
if disabled:
reasons.update(disabled)
for reason in disabled:
tags.append("%s:%s" % (TAG_DISABLED, reason))
if tags:
suffix = " [" + ", ".join(tags) + "]"
else:

View file

@ -189,12 +189,12 @@ def finish():
out.warn("Waiting for download threads to finish.")
def getAllScrapers():
def getAllScrapers(listing=False):
"""Get all scrapers."""
return getScrapers(['@@'])
return getScrapers(['@@'], listing=listing)
def getScrapers(comics, basepath=None, adult=True, multiple_allowed=False):
def getScrapers(comics, basepath=None, adult=True, multiple_allowed=False, listing=False):
"""Get scraper objects for the given comics."""
if '@' in comics:
# only scrapers whose directory already exists
@ -203,12 +203,12 @@ def getScrapers(comics, basepath=None, adult=True, multiple_allowed=False):
for scraperclass in scraper.get_scraperclasses():
dirname = getDirname(scraperclass.getName())
if os.path.isdir(os.path.join(basepath, dirname)):
if shouldRunScraper(scraperclass, adult):
if shouldRunScraper(scraperclass, adult, listing):
yield scraperclass()
elif '@@' in comics:
# all scrapers
for scraperclass in scraper.get_scraperclasses():
if shouldRunScraper(scraperclass, adult):
if shouldRunScraper(scraperclass, adult, listing):
yield scraperclass()
else:
# get only selected comic scrapers
@ -229,20 +229,30 @@ def getScrapers(comics, basepath=None, adult=True, multiple_allowed=False):
indexes = None
scraperclasses = scraper.find_scraperclasses(name, multiple_allowed=multiple_allowed)
for scraperclass in scraperclasses:
if shouldRunScraper(scraperclass, adult):
if shouldRunScraper(scraperclass, adult, listing):
scraperobj = scraperclass(indexes=indexes)
if scraperobj not in scrapers:
scrapers.add(scraperobj)
yield scraperobj
def shouldRunScraper(scraperclass, adult=True):
def shouldRunScraper(scraperclass, adult=True, listing=False):
if listing:
return True
if not adult and scraperclass.adult:
warn_adult(scraperclass)
return False
reasons = scraperclass.getDisabledReasons()
if reasons:
warn_disabled(scraperclass, reasons)
return False
return True
def warn_adult(scraperclass):
"""Print warning about adult content."""
out.warn(u"skipping adult comic %s; use the --adult option to confirm your age" % scraperclass.getName())
def warn_disabled(scraperclass, reasons):
"""Print warning about disabled comic modules."""
out.warn(u"Skipping comic %s: %s" % (scraperclass.getName(), ' '.join(reasons.values())))

View file

@ -10,6 +10,13 @@ try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
try:
from lxml import html
from lxml.html.defs import link_attrs as html_link_attrs
except ImportError:
html = None
from . import loader, configuration, util
from .util import (getPageContent, makeSequence, get_system_uid, urlopen,
getDirname, unescape, tagre, normaliseURL)
@ -308,6 +315,16 @@ class Scraper(object):
def fetchText(cls, url, data, textSearch, optional):
raise ValueError("No implementation for fetchText!")
@classmethod
def getDisabledReasons(cls):
"""
Get a dict of reasons why this comic module is disabled. The key is a
short (unique) identifier, the value is a string explaining why the
module is deactivated. If the module is not disabled, just return an
empty dict.
"""
return {}
class _BasicScraper(Scraper):
"""
@ -389,23 +406,8 @@ class _ParserScraper(Scraper):
of the HTML element and returns that.
"""
@classmethod
def xpath(cls, expr):
return expr
@classmethod
def css(cls, expr, attr=None):
return expr
@classmethod
def getPage(cls, url):
try:
from lxml import html
except ImportError:
raise ValueError(u"Skipping comic %s: Needs lxml (python-lxml) installed." % cls.getName())
from lxml.html.defs import link_attrs
cls.link_attrs = link_attrs
cls.html = html
tree = html.document_fromstring(getPageContent(url, cls.session))
tree.make_links_absolute(url)
return tree
@ -418,7 +420,7 @@ class _ParserScraper(Scraper):
for search in searches:
for match in data.xpath(search):
try:
for attrib in cls.link_attrs:
for attrib in html_link_attrs:
if attrib in match.attrib:
searchUrls.append(match.get(attrib))
except AttributeError:
@ -450,6 +452,12 @@ class _ParserScraper(Scraper):
else:
return None
@classmethod
def getDisabledReasons(cls):
res = {}
if html is None:
res['lxml'] = u"This module needs the lxml (python-lxml) python module which is not installed."
return res
def find_scraperclasses(comic, multiple_allowed=False):
"""Get a list comic scraper classes. Can return more than one entries if