# html.py - parser functions for html content
#
# Copyright (C) 2005, 2006, 2007 Arthur de Jong
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# The files produced as output from the software do not automatically fall
# under the copyright of the software, unless explicitly stated otherwise.
"""Parser functions for processing HTML content. This module uses
the legacy HTMLParser module. It will only be used if BeatifulSoup
is not available and can be considered depricated. This parser
will only handle properly formatted HTML."""
import debugio
import HTMLParser
import urlparse
import re
import crawler
import myurllib
from parsers.html import htmlunescape
# pattern for matching numeric html entities
_charentitypattern = re.compile('([0-9]{1,3});')
# pattern for matching spaces
_spacepattern = re.compile(' ')
# pattern for matching charset declaration for http-equiv tag
_charsetpattern = re.compile('charset=([^ ]*)', re.I)
# pattern for matching the encoding part of an xml declaration
_encodingpattern = re.compile('^xml .*encoding="([^"]*)"', re.I)
class _MyHTMLParser(HTMLParser.HTMLParser):
"""A simple subclass of HTMLParser.HTMLParser continuing after errors
and gathering some information from the parsed content."""
def __init__(self, link):
"""Inialize the menbers in which we collect data from parsing the
document."""
self.link = link
self.collect = None
self.base = None
self.title = None
self.author = None
self.embedded = []
self.children = []
self.anchors = []
self.errmsg = None
self.errcount = 0
HTMLParser.HTMLParser.__init__(self)
def _location(self):
"""Return the current parser location as a string."""
(lineno, offset) = self.getpos()
if lineno is not None:
msg = 'at line %d' % lineno
else:
msg = 'at unknown line'
if offset is not None:
msg += ', column %d' % (offset + 1)
return msg
def _cleanurl(self, url, what='link'):
"""Do some translations of url."""
# check for spaces in urls
# (characters are escaped in myurllib.normalizeurl())
if _spacepattern.search(url):
self.link.add_pageproblem(
what+' contains unescaped spaces: '+url+', '+self._location() )
# replace nnn; entity refs with proper characters
url = _charentitypattern.sub(lambda x:chr(int(x.group(1))), url)
return myurllib.normalizeurl(url)
def error(self, message):
"""Override superclass' error() method to ignore errors."""
# construct error message
message += ', ' + self._location()
# store error message
debugio.debug('parsers.html.htmlparser._MyHTMLParser.error(): problem parsing html: '+message)
if self.errmsg is None:
self.errmsg = message
# increment error count
self.errcount += 1
if self.errcount > 10:
raise HTMLParser.HTMLParseError(message, self.getpos())
def check_for_whole_start_tag(self, i):
"""Override to catch assertion exception."""
try:
return HTMLParser.HTMLParser.check_for_whole_start_tag(self, i)
except AssertionError:
debugio.debug('parsers.html.htmlparser._MyHTMLParser.check_for_whole_start_tag(): caught assertion error')
return None
def handle_starttag(self, tag, attrs):
"""Handle start tags in html."""
# turn attrs into hash
attrs = dict(attrs)
#