Arthur de Jong

Open Source / Free Software developer

summaryrefslogtreecommitdiffstats
path: root/parsers/html/__init__.py
blob: 09966f4765c01d0505235a3ad8ca4e947850e8ad (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123

# html.py - parser functions for html content
#
# Copyright (C) 2005, 2006, 2007, 2008, 2011 Arthur de Jong
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301 USA
#
# The files produced as output from the software do not automatically fall
# under the copyright of the software, unless explicitly stated otherwise.

"""Parser functions for processing HTML content. This a front-end
module that tries to load the BeatifulSoup parser first and falls
back to loading the legacy HTMLParser parser."""

import debugio
import re
import htmlentitydefs
import config

# the list of mimetypes this module should be able to handle
mimetypes = ('text/html', 'application/xhtml+xml', 'text/x-server-parsed-html')

# pattern for matching all html entities
_entitypattern = re.compile('&(#[0-9]{1,6}|[a-zA-Z]{2,10});')


def htmlescape(txt):
    """HTML escape the given string and return an ASCII clean string with
    known entities and character entities for the other values."""
    # check for empty string
    if not txt:
        return u''
    # convert to unicode object
    if not isinstance(txt, unicode):
        txt = unicode(txt)
    # the output string
    out = ''
    # loop over the characters of the string
    for c in txt:
        if ord(c) in htmlentitydefs.codepoint2name:
            out += '&%s;' % htmlentitydefs.codepoint2name[ord(c)]
        elif ord(c) > 126:
            out += '&#%d;' % ord(c)
        else:
            out += c.encode('utf-8')
    return out


def _unescape_entity(match):
    """Helper function for htmlunescape().
    This funcion unescapes a html entity, it is passed to the sub()
    function."""
    if match.group(1) in htmlentitydefs.name2codepoint:
        # we have a named entity, return proper character
        return unichr(htmlentitydefs.name2codepoint[match.group(1)])
    elif match.group(1)[0] == '#':
        # we have a numeric entity, replace with proper character
        return unichr(int(match.group(1)[1:]))
    else:
        # we have something else, just keep the original
        return match.group(0)


def htmlunescape(txt):
    """This function unescapes a html encoded string.
    This function returns a unicode string."""
    # check for empty string
    if not txt:
        return u''
    # convert to unicode
    if not isinstance(txt, unicode):
        txt = unicode(txt, errors='replace')
    # replace &name; and &#nn; refs with proper characters
    txt = _entitypattern.sub(_unescape_entity, txt)
    # we're done
    return txt


def _parsefunction(content, link):
    # we find a suitable parse function
    global _parsefunction
    try:
        # try BeautifulSoup parser first
        import parsers.html.beautifulsoup
        debugio.debug('parsers.html.parse(): the BeautifulSoup parser is ok')
        _parsefunction = parsers.html.beautifulsoup.parse
    except ImportError:
        # fall back to legacy HTMLParser parser
        debugio.warn('falling back to the legacy HTML parser, '
                     'consider installing BeautifulSoup')
        import parsers.html.htmlparser
        _parsefunction = parsers.html.htmlparser.parse
    # call the actual parse function
    _parsefunction(content, link)


def parse(content, link):
    """Parse the specified content and extract an url list, a list of images a
    title and an author. The content is assumed to contain HMTL."""
    # call the normal parse function
    _parsefunction(content, link)
    # call the tidy parse function
    if config.TIDY_OPTIONS:
        try:
            import calltidy
            debugio.debug('parsers.html.parse(): the Tidy parser is ok')
            calltidy.parse(content, link)
        except ImportError:
            debugio.warn('tidy library (python-utidylib) is unavailable')
            # remove config to only try once
            config.TIDY_OPTIONS = None