Add some scriptsutil functions.

This commit is contained in:
Bastian Kleineidam 2013-02-13 20:02:47 +01:00
parent b0526243bb
commit d6118c1cbf
2 changed files with 23 additions and 10 deletions

View file

@ -1,6 +1,7 @@
# Copyright (C) 2012-2013 Bastian Kleineidam # Copyright (C) 2012-2013 Bastian Kleineidam
import re import re
import json import json
from dosagelib.util import unescape, unquote, asciify
def contains_case_insensitive(adict, akey): def contains_case_insensitive(adict, akey):
"""Check if key is in adict. The search is case insensitive.""" """Check if key is in adict. The search is case insensitive."""
@ -46,3 +47,20 @@ def load_result(json_file):
def truncate_name(text): def truncate_name(text):
"""Ensure the comic name does not exceed 100 characters.""" """Ensure the comic name does not exceed 100 characters."""
return text[:100] return text[:100]
def format_name(text):
"""Format a comic name."""
name = unescape(text)
name = asciify(name.replace('&', 'And').replace('@', 'At'))
name = capfirst(name)
return name
def format_description(text):
"""Format a comic description."""
desc = remove_html_tags(text)
desc = unescape(desc)
desc = unquote(desc)
desc = compact_whitespace(desc).strip()
return desc

View file

@ -10,9 +10,9 @@ import os
import urlparse import urlparse
import requests import requests
sys.path.append(os.path.join(os.path.dirname(__file__), "..")) sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from dosagelib.util import getPageContent, asciify, unescape, tagre, unquote from dosagelib.util import getPageContent, tagre
from dosagelib.scraper import get_scrapers from dosagelib.scraper import get_scraperclasses
from scriptutil import contains_case_insensitive, remove_html_tags, capfirst, compact_whitespace, save_result, load_result, truncate_name from scriptutil import contains_case_insensitive, save_result, load_result, truncate_name, format_name, format_description
json_file = __file__.replace(".py", ".json") json_file = __file__.replace(".py", ".json")
@ -225,9 +225,7 @@ def handle_url(url, session, res):
for match in page_matcher.finditer(data): for match in page_matcher.finditer(data):
page_url = match.group(1) page_url = match.group(1)
page_url = urlparse.urljoin(url, page_url) page_url = urlparse.urljoin(url, page_url)
name = unescape(match.group(2)) name = format_name(match.group(2))
name = asciify(name.replace('&', 'And').replace('@', 'At'))
name = capfirst(name)
if name in exclude_comics: if name in exclude_comics:
continue continue
if contains_case_insensitive(res, name): if contains_case_insensitive(res, name):
@ -259,10 +257,7 @@ def handle_url(url, session, res):
if not mo: if not mo:
print("ERROR matching comic description:", repr(data2[end:end+300]), file=sys.stderr) print("ERROR matching comic description:", repr(data2[end:end+300]), file=sys.stderr)
continue continue
desc = remove_html_tags(mo.group(1)) desc = format_description(mo.group(1))
desc = unescape(desc)
desc = unquote(desc)
desc = compact_whitespace(desc).strip()
# search for adult flag # search for adult flag
adult = adult_matcher.search(data2[end:]) adult = adult_matcher.search(data2[end:])
bounce = name not in repeat_comics bounce = name not in repeat_comics