summaryrefslogtreecommitdiffstats
path: root/download.py
blob: 5c77654992963342fc9d46d495ef3b4b21d71806 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import urllib
import sgmllib
import re
import os

savedir = "/home/slash/Pictures/4grab/"
 
class MyParser(sgmllib.SGMLParser):
    "A simple parser class."
    
    def parse(self, s):
        "Parse the given string 's'."
        self.feed(s)
        self.close()
 
    def __init__(self, verbose=0):
        "Initialise an object, passing 'verbose' to the superclass."
        
        sgmllib.SGMLParser.__init__(self, verbose)
        self.hyperlinks = []
 
        self.url_reg = re.compile('res/\d+\Z')
        self.img_reg = re.compile('/\d+\.(jpg|gif|bmp|png|jpeg)\Z')
 
    def start_a(self, attributes):
        "Process a hyperlink and its 'attributes'."
 
        for name, value in attributes:
            if name == "href":
                if self.url_reg.search(value) != None:
                    self.hyperlinks.append(value)
 
    def get_hyperlinks(self):
        "Return the list of hyperlinks."
 
        return self.hyperlinks
 
class MySubParser(MyParser):
    def __init__(self, verbose=0):
        MyParser.__init__(self, verbose)
        self.url_reg = re.compile('/src/\d+\.\w{3,4}\Z')

if __name__ == "__main__":
    # Get a file-like object for the 4chan.org w/imgboard
    base_url = "http://boards.4chan.org/w/"
    myparser = MyParser()
    total = 10
    for i in range(0, total):
        if i > 0:
            url = base_url + str(i)
        else:
            url = base_url
        
        tries = 10
        while tries > 0:
            try:    
                f = urllib.urlopen(url)
                break
            except IOError:
                tries = tries - 1
                print "Try of", url, "failed,", tries, "tries left"
        if not f is None:
            # Read the object
            s = f.read()
            f.close()
            
            # Try and process the page.
            # The class should have been defined first, remember.
            myparser.parse(s)
            print "Parsed", url, "-", i + 1, "of", total
        else:
            "Opening of", url, "did not succeed, trying next one..."
 
    # Get the hyperlinks.
    t = myparser.get_hyperlinks()
    mysubparser = MySubParser()
    total = len(t)
    i = 1
    for link in t:
        img_url = base_url + link
        tries = 10
        while tries > 0:
            try:
                f = urllib.urlopen(img_url)
                break
            except IOError:
                tries = tries - 1
                print "Try of", img_url, "failed,", tries, "tries left"
        if not f is None:
            s = f.read()
            f.close()

            mysubparser.parse(s)
            print "Parsed", img_url, "-", i, "of", total
        else:
            print "Opening of", img_url, "did not succeed, trying next one..."
        i = i + 1

    t = mysubparser.get_hyperlinks()
    total = len(t)
    i = 1
    for link in t:
        filename = os.path.join(savedir, os.path.split(link)[1])
        if not os.path.exists(filename):
            tries = 10
            while tries > 0:
                try:
                    urllib.urlretrieve(link, filename)
                    print "Retrieved", link, "-", i, "of", total
                    break
                except IOError:
                    tries = tries - 1
                    print "Downloading of", link, "failed,", tries, "left"
                    
        else:
            print "Not downloading", link, "already downloaded"
        i = i + 1