dosage/scripts/arcamax.py

106 lines
3.2 KiB
Python
Raw Normal View History

2013-01-23 18:34:11 +00:00
#!/usr/bin/env python
2016-04-12 22:52:16 +00:00
# -*- coding: utf-8 -*-
# Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015-2016 Tobias Gruetzmacher
2013-01-23 18:34:11 +00:00
"""
2016-04-12 22:52:16 +00:00
Script to get arcamax comics and save the info in a JSON file for further
processing.
2013-01-23 18:34:11 +00:00
"""
2016-04-12 22:52:16 +00:00
from __future__ import absolute_import, division, print_function
2013-05-22 20:29:03 +00:00
import codecs
2013-01-23 18:34:11 +00:00
import sys
import os
2016-04-12 22:52:16 +00:00
2013-02-12 20:53:57 +00:00
import requests
from lxml import html
2016-04-12 22:52:16 +00:00
sys.path.append(os.path.join(os.path.dirname(__file__), "..")) # noqa
from dosagelib.util import get_page
from dosagelib.scraper import get_scrapers
from scriptutil import (contains_case_insensitive, save_result, load_result,
truncate_name, format_name)
2013-01-23 18:34:11 +00:00
json_file = __file__.replace(".py", ".json")
# names of comics to exclude
exclude_comics = [
2016-04-12 22:52:16 +00:00
"HagartheHorrible", # better source available
2013-01-23 18:34:11 +00:00
]
2013-02-12 20:53:57 +00:00
def handle_url(url, session, res):
2013-01-23 18:34:11 +00:00
"""Parse one search result page."""
print("Parsing", url, file=sys.stderr)
try:
data = html.document_fromstring(get_page(url, session).text)
data.make_links_absolute(url)
2013-01-23 18:34:11 +00:00
except IOError as msg:
print("ERROR:", msg, file=sys.stderr)
return
for comiclink in data.cssselect('a.comic-icon'):
path = comiclink.attrib['href']
name = format_name(comiclink.attrib['title'])
2013-01-23 18:34:11 +00:00
if name in exclude_comics:
continue
if contains_case_insensitive(res, name):
# we cannot handle two comics that only differ in case
2013-03-12 19:47:11 +00:00
print("INFO: skipping possible duplicate", repr(name), file=sys.stderr)
2013-01-23 18:34:11 +00:00
continue
res[name] = path.rsplit('/', 2)[1]
2013-01-23 18:34:11 +00:00
if not res:
print("ERROR:", "did not match any comics", file=sys.stderr)
def get_results():
"""Parse all search result pages."""
# store info in a dictionary {name -> shortname}
res = {}
2013-02-12 20:53:57 +00:00
session = requests.Session()
handle_url('http://www.arcamax.com/comics', session, res)
2013-01-23 18:34:11 +00:00
save_result(res, json_file)
def find_dups(name):
2013-01-23 18:34:11 +00:00
"""Check if comic name already exists."""
names = [
("Creators/%s" % name).lower(),
("DrunkDuck/%s" % name).lower(),
("GoComics/%s" % name).lower(),
("KeenSpot/%s" % name).lower(),
("ComicGenesis/%s" % name).lower(),
2013-01-23 18:34:11 +00:00
("SmackJeeves/%s" % name).lower(),
]
for scraperobj in get_scrapers():
lname = scraperobj.name.lower()
2013-03-26 19:02:13 +00:00
if lname in names or lname == name.lower():
return scraperobj.name
return None
def first_lower(x):
return x[0].lower()
2013-01-23 18:34:11 +00:00
def print_results(args):
"""Print all comics that have at least the given number of minimum comic strips."""
2013-05-22 20:29:03 +00:00
min_comics, filename = args
with codecs.open(filename, 'a', 'utf-8') as fp:
data = load_result(json_file)
for name, path in sorted(data.items(), key=first_lower):
dup = find_dups(name)
if dup is not None:
fp.write(u"# %s has a duplicate in %s\n" % (name, dup))
2013-05-22 20:29:03 +00:00
else:
fp.write(u"\n\nclass %s(_Arcamax):\n path = %r\n" % (
truncate_name(name), path))
2013-01-23 18:34:11 +00:00
if __name__ == '__main__':
if len(sys.argv) > 1:
print_results(sys.argv[1:])
else:
get_results()