2013-02-13 16:53:36 +00:00
|
|
|
#!/usr/bin/env python
|
2014-01-05 15:50:57 +00:00
|
|
|
# Copyright (C) 2013-2014 Bastian Kleineidam
|
2016-03-16 23:44:06 +00:00
|
|
|
# Copyright (C) 2016 Tobias Gruetzmacher
|
2013-02-13 16:53:36 +00:00
|
|
|
"""
|
2016-03-16 23:44:06 +00:00
|
|
|
Script to get ComicFury comics and save the info in a JSON file for further
|
|
|
|
processing.
|
2013-02-13 16:53:36 +00:00
|
|
|
"""
|
2016-03-16 23:44:06 +00:00
|
|
|
from __future__ import print_function, absolute_import
|
2013-05-22 20:29:03 +00:00
|
|
|
import codecs
|
2013-02-13 16:53:36 +00:00
|
|
|
import sys
|
|
|
|
import os
|
|
|
|
import requests
|
2016-03-16 23:44:06 +00:00
|
|
|
from lxml import html
|
|
|
|
|
|
|
|
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) # noqa
|
2013-02-13 16:53:36 +00:00
|
|
|
from dosagelib.util import getPageContent
|
|
|
|
from dosagelib.scraper import get_scraperclasses
|
2016-03-16 23:44:06 +00:00
|
|
|
from scriptutil import (contains_case_insensitive, save_result, load_result,
|
|
|
|
truncate_name, format_name)
|
2013-02-13 16:53:36 +00:00
|
|
|
|
2016-03-16 23:44:06 +00:00
|
|
|
# Absolute minumum number of pages a comic may have (restrict search space)
|
|
|
|
MIN_COMICS = 90
|
2013-02-13 16:53:36 +00:00
|
|
|
|
2016-03-16 23:44:06 +00:00
|
|
|
json_file = __file__.replace(".py", ".json")
|
2013-02-13 16:53:36 +00:00
|
|
|
|
|
|
|
# names of comics to exclude
|
|
|
|
exclude_comics = [
|
2016-03-16 23:44:06 +00:00
|
|
|
# unsuitable navigation
|
|
|
|
"AlfdisAndGunnora",
|
|
|
|
"AnAmericanNerdinAnimatedTokyo",
|
|
|
|
"AngryAlien",
|
|
|
|
"BoozerAndStoner",
|
|
|
|
"Bonejangles",
|
|
|
|
"ConradStory",
|
|
|
|
"Crossing",
|
|
|
|
"ChristianHumberReloaded",
|
|
|
|
"CorkandBlotto",
|
|
|
|
"Democomix",
|
|
|
|
"ErraticBeatComics",
|
|
|
|
"EnergyWielders",
|
|
|
|
"EvilBearorg",
|
|
|
|
"Fiascos",
|
|
|
|
"FateoftheBlueStar",
|
|
|
|
"FPK",
|
|
|
|
"Fanartgyle",
|
|
|
|
"FrigginRandom",
|
|
|
|
"GoodbyeKitty",
|
|
|
|
"HighlyExperiMental",
|
|
|
|
"IfAndCanBeFlowers",
|
|
|
|
"JournalismStory",
|
|
|
|
"JohnsonSuperior",
|
|
|
|
"Keel",
|
|
|
|
"JudgeDredBasset",
|
|
|
|
"LomeathAndHuilii",
|
|
|
|
"MNPB",
|
|
|
|
"LucidsDream",
|
|
|
|
"MadDog",
|
|
|
|
"Minebreakers",
|
|
|
|
"Moonlightvalley",
|
|
|
|
"MyImmortalFool",
|
|
|
|
"NATO",
|
|
|
|
"NothingFits",
|
|
|
|
"OptimisticFishermenandPessimisticFishermen",
|
|
|
|
"Old2G",
|
|
|
|
"NothingFitsArtBlog",
|
|
|
|
"OutToLunchTheStingRayWhoreStory",
|
|
|
|
"Pandemonium",
|
|
|
|
"Pewfell",
|
|
|
|
"ProjectX",
|
|
|
|
"Ratantia",
|
|
|
|
"RealLifeTrips",
|
|
|
|
"Sandgate",
|
|
|
|
"Secondpuberty",
|
|
|
|
"Seconds",
|
|
|
|
"SlightlyEccentricOrigins",
|
|
|
|
"StardusttheCat",
|
|
|
|
"StrangerthanFiction",
|
|
|
|
"TalamakGreatAdventure",
|
|
|
|
"TheBattalion",
|
|
|
|
"TheDailyProblem",
|
|
|
|
"TheMansionofE",
|
|
|
|
"ThePainter",
|
|
|
|
"TheSeekers",
|
|
|
|
"TheTrialsofKlahadoftheAbyss",
|
|
|
|
"TheStickmen",
|
|
|
|
"ThornsInOurSide",
|
|
|
|
"TopHeavyVeryBustyPinUpsForAdults",
|
|
|
|
"USBUnlimitedsimulatedbody",
|
|
|
|
"TylerHumanRecycler",
|
|
|
|
"UAF",
|
|
|
|
"WhenPigsFly",
|
|
|
|
"YeOldeLegotimeTheatre",
|
|
|
|
|
|
|
|
# no content
|
|
|
|
"Angst",
|
|
|
|
|
|
|
|
# images gone
|
|
|
|
"BaseballCapsandTiaras",
|
|
|
|
"CROSSWORLDSNEXUS",
|
|
|
|
"Fathead",
|
|
|
|
"KevinZombie",
|
|
|
|
"KindergardenCrisIs",
|
|
|
|
"NoSongsForTheDead",
|
|
|
|
"RequiemShadowbornPariah",
|
|
|
|
"TezzleandZeek",
|
|
|
|
|
|
|
|
# broken HTML
|
|
|
|
"CrossingOver",
|
|
|
|
|
|
|
|
# unique html
|
|
|
|
"IKilledtheHero",
|
|
|
|
"PowerofPower",
|
|
|
|
"Schizmatic",
|
|
|
|
"WaketheSleepers",
|
|
|
|
"WeightofEternity",
|
2013-02-13 16:53:36 +00:00
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
def handle_url(url, session, res):
|
|
|
|
"""Parse one search result page."""
|
|
|
|
print("Parsing", url, file=sys.stderr)
|
|
|
|
try:
|
2016-03-16 23:44:06 +00:00
|
|
|
data = html.document_fromstring(getPageContent(url, session))
|
|
|
|
data.make_links_absolute(url)
|
2013-02-13 16:53:36 +00:00
|
|
|
except IOError as msg:
|
|
|
|
print("ERROR:", msg, file=sys.stderr)
|
|
|
|
return
|
2016-03-16 23:44:06 +00:00
|
|
|
|
|
|
|
num = 999
|
|
|
|
for comicdiv in data.cssselect('div.searchresult'):
|
|
|
|
comiclink = comicdiv.cssselect('h3 a')[0]
|
|
|
|
comicurl = comiclink.attrib['href']
|
|
|
|
name = format_name(comiclink.text)
|
2013-02-13 16:53:36 +00:00
|
|
|
if contains_case_insensitive(res, name):
|
|
|
|
# we cannot handle two comics that only differ in case
|
2016-03-16 23:44:06 +00:00
|
|
|
print("INFO: skipping possible duplicate", repr(name),
|
|
|
|
file=sys.stderr)
|
2013-02-13 16:53:36 +00:00
|
|
|
continue
|
2016-03-16 23:44:06 +00:00
|
|
|
|
|
|
|
info = comicdiv.cssselect('span.comicinfo')
|
2013-02-13 16:53:36 +00:00
|
|
|
# find out how many images this comic has
|
2016-03-16 23:44:06 +00:00
|
|
|
num = int(info[1].text.strip())
|
2013-02-13 16:53:36 +00:00
|
|
|
# find activity
|
2016-03-16 23:44:06 +00:00
|
|
|
active = info[6].text.strip().lower() == "active"
|
|
|
|
lang = info[7].text.strip().lower()
|
|
|
|
res[name] = [comicurl, num, active, lang]
|
|
|
|
|
|
|
|
return num
|
2013-02-13 16:53:36 +00:00
|
|
|
|
|
|
|
|
|
|
|
def get_results():
|
|
|
|
"""Parse all search result pages."""
|
|
|
|
# store info in a dictionary {name -> shortname}
|
|
|
|
res = {}
|
|
|
|
session = requests.Session()
|
2016-03-16 23:44:06 +00:00
|
|
|
# Sort by page count, so we can abort when we get under some threshold.
|
|
|
|
baseUrl = ('http://comicfury.com/search.php?search=1&webcomics=1&query=' +
|
|
|
|
'&worder=1&asc=0&incvi=1&incse=1&incnu=1&incla=1&all_ge=1' +
|
|
|
|
'&all_st=1&all_la=1&page=%d')
|
|
|
|
last_count = 999
|
|
|
|
page = 1
|
|
|
|
print("Parsing search result pages...", file=sys.stderr)
|
|
|
|
while last_count >= MIN_COMICS:
|
|
|
|
last_count = handle_url(baseUrl % page, session, res)
|
|
|
|
page += 1
|
|
|
|
print(last_count, file=sys.stderr, end=" ")
|
2013-02-13 16:53:36 +00:00
|
|
|
save_result(res, json_file)
|
|
|
|
|
|
|
|
|
2016-03-16 23:44:06 +00:00
|
|
|
def find_dups(name):
|
2013-02-13 16:53:36 +00:00
|
|
|
"""Check if comic name already exists."""
|
|
|
|
names = [
|
|
|
|
("Creators/%s" % name).lower(),
|
|
|
|
("DrunkDuck/%s" % name).lower(),
|
|
|
|
("GoComics/%s" % name).lower(),
|
|
|
|
("KeenSpot/%s" % name).lower(),
|
|
|
|
("SmackJeeves/%s" % name).lower(),
|
|
|
|
("Arcamax/%s" % name).lower(),
|
|
|
|
]
|
|
|
|
for scraperclass in get_scraperclasses():
|
2013-03-06 19:00:30 +00:00
|
|
|
lname = scraperclass.getName().lower()
|
2013-02-13 16:53:36 +00:00
|
|
|
if lname in names:
|
2016-03-16 23:44:06 +00:00
|
|
|
return scraperclass.getName().lower()
|
|
|
|
return None
|
2013-02-13 16:53:36 +00:00
|
|
|
|
|
|
|
|
2016-03-31 21:26:56 +00:00
|
|
|
def first_lower(x):
|
|
|
|
return x[0].lower()
|
|
|
|
|
|
|
|
|
2013-02-13 16:53:36 +00:00
|
|
|
def print_results(args):
|
2016-03-16 23:44:06 +00:00
|
|
|
"""Print all comics that have at least the given number of minimum
|
|
|
|
comic strips."""
|
2013-05-22 20:29:03 +00:00
|
|
|
min_comics, filename = args
|
|
|
|
min_comics = int(min_comics)
|
|
|
|
with codecs.open(filename, 'a', 'utf-8') as fp:
|
2016-03-31 21:26:56 +00:00
|
|
|
data = load_result(json_file)
|
|
|
|
for name, entry in sorted(data.items(), key=first_lower):
|
2016-03-16 23:44:06 +00:00
|
|
|
url, num, active, lang = entry
|
2013-05-22 20:29:03 +00:00
|
|
|
if name in exclude_comics:
|
2016-03-16 23:44:06 +00:00
|
|
|
fp.write(u"# %s is excluded\n" % name)
|
2013-05-22 20:29:03 +00:00
|
|
|
continue
|
|
|
|
if num < min_comics:
|
|
|
|
continue
|
2016-03-16 23:44:06 +00:00
|
|
|
dup = find_dups(name)
|
|
|
|
if dup is not None:
|
|
|
|
fp.write(u"# %s has a duplicate in %s\n" % (name, dup))
|
2013-05-22 20:29:03 +00:00
|
|
|
else:
|
2016-03-16 23:44:06 +00:00
|
|
|
fp.write(u"class CF%s(_ComicFury):\n url = %r\n\n\n" % (
|
|
|
|
truncate_name(name), str(url)))
|
2013-02-13 16:53:36 +00:00
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
if len(sys.argv) > 1:
|
|
|
|
print_results(sys.argv[1:])
|
|
|
|
else:
|
|
|
|
get_results()
|