dosage/scripts/keenspot.py

70 lines
2 KiB
Python
Raw Permalink Normal View History

#!/usr/bin/env python3
# SPDX-License-Identifier: MIT
2016-10-28 22:21:41 +00:00
# Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs
2014-01-05 15:50:57 +00:00
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015-2020 Tobias Gruetzmacher
# Copyright (C) 2019-2020 Daniel Ring
2013-03-11 21:03:17 +00:00
"""
Script to get a list of KeenSpot comics and save the info in a
JSON file for further processing.
"""
2016-04-12 22:52:16 +00:00
from urllib.parse import urlsplit
2016-10-13 22:14:53 +00:00
from scriptutil import ComicListUpdater
from dosagelib.util import check_robotstxt
class KeenSpotUpdater(ComicListUpdater):
2019-06-13 02:26:17 +00:00
dup_templates = ('Creators/%s', 'GoComics/%s', 'ComicGenesis/%s')
2016-10-13 22:14:53 +00:00
# names of comics to exclude
excluded_comics = (
# non-standard navigation
2019-06-13 02:26:17 +00:00
'BrawlInTheFamily',
'Flipside',
'LastBlood',
'TheGodChild',
'Twokinds',
'Yirmumah',
2016-10-13 22:14:53 +00:00
)
extra = {
2019-06-13 02:26:17 +00:00
'CrowScare': "last='20111031'",
'Dreamless': "last='20100726'",
'GeneCatlow': "last='20170412'",
'MysticRevolution': "path='?cid=%s'",
'PunchAnPie': "path='daily/%s.html'",
'ShockwaveDarkside': "path='2d/%s.html'",
2016-10-13 22:14:53 +00:00
}
def collect_results(self):
"""Parse the front page."""
data = self.get_url('http://keenspot.com/')
for comiclink in data.xpath('//td[@id]/a'):
comicurl = comiclink.attrib['href']
2019-06-13 02:26:17 +00:00
name = comiclink.xpath('string()')
2016-10-13 22:14:53 +00:00
try:
2019-06-13 02:26:17 +00:00
if '/d/' not in comicurl:
check_robotstxt(comicurl + 'd/', self.session)
2016-10-13 22:14:53 +00:00
else:
check_robotstxt(comicurl, self.session)
except IOError as e:
2019-06-13 02:26:17 +00:00
print('[%s] INFO: robots.txt denied: %s' % (name, e))
2016-10-13 22:14:53 +00:00
continue
2013-03-11 21:03:17 +00:00
2016-10-13 22:14:53 +00:00
self.add_comic(name, comicurl)
2013-03-11 21:03:17 +00:00
2016-10-13 22:14:53 +00:00
def get_entry(self, name, url):
sub = urlsplit(url).hostname.split('.', 1)[0]
if name in self.extra:
extra = ', ' + self.extra[name]
else:
extra = ''
return u"cls('%s', '%s'%s)," % (name, sub, extra)
2013-03-11 21:03:17 +00:00
if __name__ == '__main__':
2016-10-13 22:14:53 +00:00
KeenSpotUpdater(__file__).run()