Enable warnings and fix some of them
This commit is contained in:
parent
be525dbd54
commit
fbb3a18c91
7 changed files with 33 additions and 23 deletions
|
@ -1,3 +1,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
SITE_NAME = "dosage"
|
||||
SITE_SUB = "a comic downloader and archiver"
|
||||
SITE_AUTHOR = "Tobias Gruetzmacher"
|
||||
|
@ -6,7 +9,7 @@ SITE_LINKS = (
|
|||
{'name': 'Report Issue', 'icon': 'bug', 'url': 'https://github.com/webcomics/dosage/issues/new'},
|
||||
)
|
||||
SITE_ICONS = (
|
||||
{'name': 'GitHub', 'icon': 'github', 'url': 'https://github.com/webcomics/dosage/releases/latest'},
|
||||
{'name': 'GitHub', 'icon': 'github', 'url': 'https://github.com/webcomics/dosage'},
|
||||
)
|
||||
|
||||
THEME = "doc/.theme"
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs
|
||||
# Copyright (C) 2012-2014 Bastian Kleineidam
|
||||
# Copyright (C) 2015-2017 Tobias Gruetzmacher
|
||||
# Copyright (C) 2015-2018 Tobias Gruetzmacher
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
|
@ -42,7 +42,7 @@ class AbsurdNotions(_BasicScraper):
|
|||
imageSearch = compile(tagre('img', 'src', r'(an[^"]+)'))
|
||||
multipleImagesPerStrip = True
|
||||
prevSearch = compile(tagre('a', 'href', r'([^"]+)') +
|
||||
tagre('img', 'src', 'nprev\.gif'))
|
||||
tagre('img', 'src', r'nprev\.gif'))
|
||||
help = 'Index format: n (unpadded)'
|
||||
|
||||
|
||||
|
@ -52,7 +52,7 @@ class AcademyVale(_BasicScraper):
|
|||
firstStripUrl = stripUrl % '001'
|
||||
imageSearch = compile(tagre('img', 'src', r'(avale\d{4}-\d{2}\.gif)'))
|
||||
prevSearch = compile(tagre('a', 'href', r'(avarch[^">]+)', quote="") +
|
||||
tagre('img', 'src', 'AVNavBack\.gif'))
|
||||
tagre('img', 'src', r'AVNavBack\.gif'))
|
||||
help = 'Index format: nnn'
|
||||
|
||||
|
||||
|
@ -240,6 +240,7 @@ class Annyseed(_ParserScraper):
|
|||
return self.stripUrl % '149'
|
||||
return tourl
|
||||
|
||||
|
||||
class AppleGeeks(_BasicScraper):
|
||||
url = 'http://www.applegeeks.com/'
|
||||
stripUrl = url + 'comics/viewcomic.php?issue=%s'
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs
|
||||
# Copyright (C) 2012-2014 Bastian Kleineidam
|
||||
# Copyright (C) 2015-2017 Tobias Gruetzmacher
|
||||
# Copyright (C) 2015-2018 Tobias Gruetzmacher
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
from re import compile, escape
|
||||
|
||||
from ..scraper import _BasicScraper, _ParserScraper
|
||||
from ..helpers import bounceStarter, indirectStarter
|
||||
from ..helpers import indirectStarter
|
||||
from ..util import tagre
|
||||
from .common import _TumblrScraper, _WordPressScraper, _WPNavi
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs
|
||||
# Copyright (C) 2012-2014 Bastian Kleineidam
|
||||
# Copyright (C) 2015-2017 Tobias Gruetzmacher
|
||||
# Copyright (C) 2015-2018 Tobias Gruetzmacher
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
|
@ -72,6 +72,7 @@ class Deathbulge(_BasicScraper):
|
|||
imageSearch = compile(r"(/images/comics/[^\.]+\.jpg)")
|
||||
prevSearch = compile(r'"previous":(\d+),')
|
||||
firstStripUrl = url + '/1'
|
||||
|
||||
def getPrevUrl(self, url, data):
|
||||
if data[1] == self.url:
|
||||
data = (data[0], data[1] + '/')
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs
|
||||
# Copyright (C) 2012-2014 Bastian Kleineidam
|
||||
# Copyright (C) 2015-2016 Tobias Gruetzmacher
|
||||
# Copyright (C) 2015-2018 Tobias Gruetzmacher
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
|
@ -19,7 +19,12 @@ import traceback
|
|||
import time
|
||||
import subprocess
|
||||
|
||||
from six.moves.html_parser import HTMLParser
|
||||
try:
|
||||
import html
|
||||
except ImportError:
|
||||
# Python 2.7
|
||||
from HTMLParser import HTMLParser
|
||||
html = HTMLParser()
|
||||
from six.moves import range
|
||||
import six
|
||||
|
||||
|
@ -203,12 +208,9 @@ def prettyMatcherList(things):
|
|||
return "('%s')" % "', '".join(norm)
|
||||
|
||||
|
||||
_htmlparser = HTMLParser()
|
||||
|
||||
|
||||
def unescape(text):
|
||||
"""Replace HTML entities and character references."""
|
||||
return _htmlparser.unescape(text)
|
||||
return html.unescape(text)
|
||||
|
||||
|
||||
_nopathquote_chars = "-;/=,~*+()@!"
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs
|
||||
# Copyright (C) 2012-2014 Bastian Kleineidam
|
||||
# Copyright (C) 2015-2017 Tobias Gruetzmacher
|
||||
# Copyright (C) 2015-2018 Tobias Gruetzmacher
|
||||
'''update languages.py from pycountry'''
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
|
@ -30,12 +30,12 @@ def main():
|
|||
|
||||
|
||||
def get_used_languages():
|
||||
lang = {}
|
||||
languages = {}
|
||||
for scraperobj in get_scrapers():
|
||||
l = scraperobj.lang
|
||||
if l not in lang:
|
||||
lang[l] = scraperobj.language()
|
||||
return lang
|
||||
lang = scraperobj.lang
|
||||
if lang not in languages:
|
||||
languages[lang] = scraperobj.language()
|
||||
return languages
|
||||
|
||||
|
||||
def write_languages(f, l):
|
||||
|
|
3
tox.ini
3
tox.ini
|
@ -35,3 +35,6 @@ max-line-length = 120
|
|||
ignore = E121,E126,E241,FI12,FI14,FI15,FI50,FI51,FI53,FI54,FI55
|
||||
require-code = True
|
||||
min-version = 2.7
|
||||
|
||||
[pytest]
|
||||
filterwarnings = default
|
||||
|
|
Loading…
Reference in a new issue