Arthur de Jong

Open Source / Free Software developer

summaryrefslogtreecommitdiffstats
path: root/crawler.py
blob: ece05e8c4bd629c6a85f9789ffbdc1469cdcb3a5 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511

# crawler.py - definition of Link class for storing the crawled site
#
# Copyright (C) 1998, 1999 Albert Hopkins (marduk)
# Copyright (C) 2002 Mike W. Meyer
# Copyright (C) 2005, 2006 Arthur de Jong
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301 USA
#
# The files produced as output from the software do not automatically fall
# under the copyright of the software, unless explicitly stated otherwise.

"""General module to do site-checking. This module contains the Site class
containing the state for the crawled site and some functions to access and
manipulate the crawling of the website. This module also contains the Link
class that holds all the link related properties."""

import config
import debugio
import urlparse
import urllib
import robotparser
import schemes
import parsers
import re
import time

# pattern for matching spaces
_spacepattern = re.compile(" ")

# pattern for matching url encoded characters
_urlencpattern = re.compile('(%[0-9a-fA-F]{2})' ,re.IGNORECASE)

# characters that should not be escaped in urls
_reservedurlchars = ';/?:@&=+$,%#'
_okurlchars = '-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz~'

def _urlclean(url):
    """Clean the url of uneccesary parts."""
    # url decode any printable normal characters except reserved characters with special meanings in urls
    for c in _urlencpattern.findall(url):
        r = chr(int(c[1:3],16))
        if r in _okurlchars:
            url = url.replace(c, r)
    # url encode any nonprintable or problematic characters (but not reserved chars)
    url = ''.join(map(lambda x: (x not in _reservedurlchars and x not in _okurlchars) and ('%%%02X' % ord(x)) or x, url))
    # split the url in useful parts (discarding fragment)
    (scheme, netloc, path, query) = urlparse.urlsplit(url)[0:4]
    if ( scheme == "http" or scheme == "https" or scheme == "ftp" ):
        # http(s) urls should have a non-empty path
        if path == "":
            path="/"
        # make hostname lower case
        (userpass, hostport) = urllib.splituser(netloc)
        netloc=hostport.lower()
        # trim trailing :
        if netloc[-1:] == ":":
            netloc = netloc[:-1]
        if userpass is not None:
            netloc = userpass+"@"+netloc
    # put the url back together again
    return urlparse.urlunsplit((scheme, netloc, path, query, ""))

class Site:
    """Class to represent gathered data of a site.

    The available properties of this class are:

      linkMap    - a map of urls to link objects
      base       - a url that points to the base of the site
   """

    def __init__(self):
        """Creates an instance of the Site class and initializes the
        state of the site."""
        # list of internal urls
        self._internal_urls = []
        # list of regexps considered internal
        self._internal_res = []
        # list of regexps considered external
        self._external_res = []
        # list of regexps matching links that should not be checked
        self._yanked_res = []
        # map of scheme+netloc to robot handleds
        self._robotparsers = {}
        # a map of urls to Link objects
        self.linkMap = {}

    def add_internal(self,url):
        """Add the given url and consider all urls below it to be internal.
        These links are all marked for checking with the crawl() function."""
        url=_urlclean(url)
        if url not in self._internal_urls:
            self._internal_urls.append(url)

    def add_internal_re(self,exp):
        """Adds the gived regular expression as a pattern to match internal
        urls."""
        self._internal_res.append(re.compile(exp,re.IGNORECASE))

    def add_external_re(self,exp):
        """Adds the gived regular expression as a pattern to match external
        urls."""
        self._external_res.append(re.compile(exp,re.IGNORECASE))

    def add_yanked_re(self,exp):
        """Adds the gived regular expression as a pattern to match urls that
        will not be checked at all."""
        self._yanked_res.append(re.compile(exp,re.IGNORECASE))

    def _is_internal(self,link):
        """Check whether the specified url is external or internal.
        This uses the urls marked with add_internal() and the regular
        expressions passed with add_external_re()."""
        # check if it is internal through the regexps
        for x in self._internal_res:
            if x.search(link.url) is not None:
                return True
        res = False
        # check that the url starts with an internal url
        if config.BASE_URLS_ONLY:
            # the url must start with one of the _internal_urls
            for i in self._internal_urls:
                res |= (i==link.url[:len(i)])
        else:
            # the netloc must match a netloc of an _internal_url
            for i in self._internal_urls:
                res |= (urlparse.urlsplit(i)[1]==link.netloc)
        # if it is not internal now, it never will be
        if not res:
            return False
        # check if it is external through the regexps
        for x in self._external_res:
            # if the url matches it is external and we can stop
            if x.search(link.url) is not None:
                return False
        return True

    def _get_robotparser(self,link):
        """Return the proper robots parser for the given url or None if one
        cannot be constructed. Robot parsers are cached per scheme and
        netloc."""
        # only some schemes have a meaningful robots.txt file
        if link.scheme != "http" and link.scheme != "https":
            debugio.debug("crawler._get_robotparser() called with unsupported scheme (%s)" % link.scheme)
            return None
        # split out the key part of the url
        location = urlparse.urlunsplit((link.scheme, link.netloc, "", "", ""))
        # try to create a new robotparser if we don't already have one
        if not self._robotparsers.has_key(location):
            import httplib
            debugio.info("  getting robots.txt for %s" % location)
            self._robotparsers[location] = None
            try:
                rp=robotparser.RobotFileParser()
                rp.set_url(urlparse.urlunsplit((link.scheme, link.netloc, "/robots.txt", "", "")))
                rp.read()
                self._robotparsers[location] = rp
            except (TypeError, IOError, httplib.HTTPException):
                pass
        return self._robotparsers[location]

    def _is_yanked(self,link):
        """Check whether the specified url should not be checked at all.
        This uses the regualr expressions passed with add_yanked_re() and the
        robots information present."""
        # check if it is yanked through the regexps
        for x in self._yanked_res:
            # if the url matches it is yanked and we can stop
            if x.search(link.url) is not None:
                return "yanked"
        # check if we should avoid external links
        if not link.isinternal and config.AVOID_EXTERNAL_LINKS:
            return "external avoided"
        # skip schemes not haveing robot.txt files
        if link.scheme != "http" and link.scheme != "https":
            return False
        # skip robot checks for external urls
        # TODO: make this configurable
        if not link.isinternal:
            return False
        # check robots for remaining links
        rp = self._get_robotparser(link)
        if rp is not None and not rp.can_fetch('webcheck',link.url):
            return "robot restriced"
        # fall back to allowing the url
        return False

    def _get_link(self,url):
        """Return a link object for the given url.
        This function checks the map of cached link objects for an
        instance."""
        # clean the url
        url = _urlclean(url)
        # check if we have an object ready
        if self.linkMap.has_key(url):
            return self.linkMap[url]
        # create a new instance
        return Link(self,url)

    def crawl(self):
        """Crawl the website based on the urls specified with
        add_internal()."""
        # TODO: have some different scheme to crawl a site (e.g. separate
        #       internal and external queues, threading, etc)
        tocheck = []
        for u in self._internal_urls:
            tocheck.append(self._get_link(u))
        # repeat until we have nothing more to check
        while len(tocheck) > 0:
            debugio.debug("crawler.crawl(): items left to check: %d" % len(tocheck))
            # choose a link from the tocheck list
            link=tocheck.pop(0)
            # skip link it there is nothing to check
            if link.isyanked or link.isfetched:
                continue
            # fetch the link's contents
            link.fetch()
            # add children to tocheck
            for child in link.children:
                if not child.isyanked and not child.isfetched and not child in tocheck:
                    tocheck.append(child)
            # add embedded content
            for embed in link.embedded:
                if not embed.isyanked and not embed.isfetched and not embed in tocheck:
                    tocheck.append(embed)
            # sleep between requests if configured
            if config.WAIT_BETWEEN_REQUESTS > 0:
                debugio.debug('sleeping %s seconds' %  config.WAIT_BETWEEN_REQUESTS)
                time.sleep(config.WAIT_BETWEEN_REQUESTS)
        # build the list of urls that were set up with add_internal() that
        # do not have a parent (they form the base for the site)
        bases = [ ]
        for u in self._internal_urls:
            l = self.linkMap[u].follow_link()
            if l == None:
                debugio.warn('base link %s redirects to nowhere' % u)
                continue
            # if the link has no parent add it to the result list unless it is the first one
            if len(l.parents) == 0 or len(bases) == 0:
                debugio.debug('crawler.crawl(): adding %s to bases' % l.url)
                bases.append(l)
        # if we got no bases, just use the first internal one
        if len(bases) == 0:
            debugio.debug('crawler.crawl(): fallback to adding %s to bases' % self._internal_urls[0])
            bases.append(self.linkMap[self._internal_urls[0]])
        # do a breadth first traversal of the website to determin depth and
        # figure out page children
        tocheck = []
        for link in bases:
            link.depth = 0
            tocheck.append(link)
        # repeat until we have nothing more to check
        while len(tocheck) > 0:
            debugio.debug("crawler.crawl(): items left to examine: %d" % len(tocheck))
            # choose a link from the tocheck list
            link = tocheck.pop(0)
            # figure out page children
            for child in link._pagechildren():
                # skip children already in our list or the wrong depth
                if child in tocheck or child.depth != link.depth+1:
                    continue
                tocheck.append(child)
        # set some compatibility properties
        # TODO: figure out a better way to get to this to the plugins
        self.base = bases[0].url

class Link:
    """This is a basic class representing a url.

    Some basic information about a url is stored in instances of this
    class:

      url        - the url this link represents
      scheme     - the scheme part of the url
      netloc     - the netloc part of the url
      path       - the path part of the url
      query      - the query part of the url
      parents    - list of parent links (all the Links that link to this
                   page)
      children   - list of child links (the Links that this page links to)
      pagechildren - list of child pages, including children of embedded
                     elements
      embedded   - list of links to embeded content
      depth      - the number of clicks from the base urls this page to
                   find
      isinternal - whether the link is considered to be internal
      isyanked   - whether the link should be checked at all
      isfetched  - whether the lis is fetched already
      ispage     - whether the link represents a page
      mtime      - modification time (in seconds since the Epoch)
      size       - the size of this document
      mimetype   - the content-type of the document
      encoding   - the character set used in the document
      title      - the title of this document (unicode)
      author     - the author of this document (unicode)
      status     - the result of retreiving the document
      linkproblems - list of problems with retrieving the link
      pageproblems - list of problems in the parsed page
      redirectdepth - the number of this redirect (=0 not a redirect)

   Instances of this class should be made through a site instance
   by adding internal urls and calling crawl().
   """

    def __init__(self, site, url):
        """Creates an instance of the Link class and initializes the
        documented properties to some sensible value."""
        # store a reference to the site
        self.site = site
        # split the url in useful parts and store the parts
        (self.scheme, self.netloc, self.path, self.query) = urlparse.urlsplit(url)[0:4]
        # store the url (without the fragment)
        url=urlparse.urlunsplit((self.scheme, self.netloc, self.path, self.query, ""))
        self.url=url
        # ensure that we are not creating something that already exists
        assert not self.site.linkMap.has_key(url)
        # store the Link object in the linkMap
        self.site.linkMap[url] = self
        # deternmin the kind of url (internal or external)
        self.isinternal = self.site._is_internal(self)
        # check if the url is yanked
        self.isyanked = self.site._is_yanked(self)
        # initialize some properties
        self.parents = []
        self.children = []
        self.pagechildren = None
        self.embedded = []
        self.depth = None
        self.isfetched = False
        self.ispage = False
        self.mtime = None
        self.size = None
        self.mimetype = None
        self.encoding = None
        self.title = None
        self.author = None
        self.status = None
        self.linkproblems = []
        self.pageproblems = []
        self.redirectdepth = 0

    def _checkurl(self, url):
        """Check to see if the url is formatted properly, correct formatting
        if possible and log an error in the formatting to the current page."""
        if _spacepattern.search(url):
            self.add_pageproblem('link contains unescaped spaces: %s' % url)
            # replace spaces by %20
            url=_spacepattern.sub("%20",url)
        return url

    def add_child(self, child):
        """Add a link object to the child relation of this link.
        The reverse relation is also made."""
        # ignore children for external links
        if not self.isinternal:
            return
        # convert the url to a link object if we were called with a url
        if type(child) is str:
            child = self.site._get_link(self._checkurl(child))
        # add to children
        if child not in self.children:
            self.children.append(child)
        # add self to parents of child
        if self not in child.parents:
            child.parents.append(self)

    def add_embed(self, link):
        """Mark the given link object as used as an image on this page."""
        # ignore embeds for external links
        if not self.isinternal:
            return
        # convert the url to a link object if we were called with a url
        if type(link) is str:
            link = self.site._get_link(self._checkurl(link))
        # add to embedded
        if link not in self.embedded:
            self.embedded.append(link)
        # add self to parents of embed
        if self not in link.parents:
            link.parents.append(self)

    def redirect(self, url):
        """Indicate that this link redirects to the specified url. Maximum
        redirect counting is done as well as loop detection."""
        # figure out determin depth
        redirectdepth = 0
        redirectlist = []
        for p in self.parents:
            if p.redirectdepth > redirectdepth:
                redirectdepth = p.redirectdepth
                redirectlist = p.redirectlist
        self.redirectdepth = redirectdepth + 1
        self.redirectlist = redirectlist
        self.redirectlist.append(self.url)
        # check depth
        if self.redirectdepth >= config.REDIRECT_DEPTH:
            self.add_linkproblem("too many redirects (%d)" % self.redirectdepth)
            return None
        # check for redirect to self
        url = self._checkurl(url)
        if url == self.url:
            self.add_linkproblem("redirect same as source: %s" % url)
            return None
        # check for redirect loop
        if url in self.redirectlist:
            self.add_linkproblem("redirect loop %s" % url)
        # add child
        self.add_child(url)

    def add_linkproblem(self, problem):
        """Indicate that something went wrong while retreiving this link."""
        self.linkproblems.append(problem)

    def add_pageproblem(self, problem):
        """Indicate that something went wrong with parsing the document."""
        # only think about problems on internal pages
        if not self.isinternal:
            return
        self.pageproblems.append(problem)

    def fetch(self):
        """Attempt to fetch the url (if isyanked is not True) and fill in link
        attributes (based on isinternal)."""
        # fully ignore links that should not be feteched
        if self.isyanked:
            debugio.info("  %s" % self.url)
            debugio.info("    "+self.isyanked)
            return
        # see if we can import the proper module for this scheme
        schememodule = schemes.get_schememodule(self.scheme)
        if schememodule is None:
            self.isyanked="unsupported scheme ("+self.scheme+")"
            debugio.info("  %s" % self.url)
            debugio.info("    "+self.isyanked)
            return
        debugio.info("  %s" % self.url)
        content=schememodule.fetch(self, parsers.get_mimetypes())
        self.isfetched = True
        # skip parsing of content if we were returned nothing
        if content is None:
            return
        # find a parser for the content-type
        parsermodule = parsers.get_parsermodule(self.mimetype)
        if parsermodule is None:
            debugio.debug("crawler.Link.fetch(): unsupported content-type: %s" % self.mimetype)
            return
        # parse the content
        parsermodule.parse(content, self)

    def follow_link(self, visited=[]):
        """If this link represents a redirect return the redirect target,
        otherwise return self. If this redirect does not find a referenced
        link None is returned."""
        if self.redirectdepth == 0:
            return self
        if len(self.children) == 0:
            return None
        # check for loops
        visited.append(self)
        if self.children[0] in visited:
            # TODO: report problem if loop is found
            return None
        return self.children[0].follow_link(visited)

    def _pagechildren(self):
        """Determin the page children of this link, combining the children of
        embedded items and following redirects."""
        # if we already have pagechildren defined we're done
        if self.pagechildren is not None:
            return self.pagechildren
        self.pagechildren = []
        # add my own children, following redirects
        for child in self.children:
            # follow redirects
            child=child.follow_link()
            # skip children we already have
            if child is None or child in self.pagechildren:
                continue
            # set depth of child if it is not already set
            if child.depth is None:
                child.depth = self.depth+1
            # add child pages to out pagechildren
            if child.ispage:
                self.pagechildren.append(child)
        # add my embedded element's children
        for embed in self.embedded:
            # set depth of embed if it is not already set
            if embed.depth is None:
                embed.depth = self.depth
            # merge in children of embeds
            for child in embed._pagechildren():
                # skip children we already have
                if child in self.pagechildren:
                    continue
                # add it to our list
                self.pagechildren.append(child)
        # return the results
        return self.pagechildren