summaryrefslogtreecommitdiffstats
path: root/download.py
blob: d892608f73dad6d5077698f94492d72772259bf9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
#!/usr/bin/env python

######################################################################
# Copyright 2009, 2010 ryuslash
#
# This file is part of 4grab.
#
# 4grab is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# 4grab is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with 4grab.  If not, see <http://www.gnu.org/licenses/>.
######################################################################

import urllib
import os
import htmlparser
import progressbar
import config

savedir = config.Configuration().get_download_location()
if not os.path.exists(savedir):
    os.makedirs(savedir)

def get_thread_links(baseurl):
    myparser = htmlparser.MyParser()
    t = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
    i = 1
    total = len(t)
    progress = progressbar.Progress(total)

    for pagenum in t:
        progress.show_progress(i)

        url = baseurl + pagenum
        tries = 10
        while tries > 0:
            try:
                f = urllib.urlopen(url)
                break
            except IOError:
                tries -= 1
                print "\rTry of", url, "failed,", tries, "tries left"
        if not f is None:
            # Read the response
            s = f.read()
            f.close()

            # Process the page.
            myparser.parse(s)
        else:
            "\rOpening of", url, "did not succeed, trying next one..."
        i += 1
    return myparser.get_hyperlinks()

def get_image_links(baseurl, t = []):
    mysubparser = htmlparser.MySubParser()
    total = len(t)
    progress = progressbar.Progress(total)
    i = 1
    
    for link in t:
        progress.show_progress(i)

        img_url = baseurl + link
        tries = 10
        while tries > 0:
            try:
                f = urllib.urlopen(img_url)
                break
            except IOError:
                tries -= 1
                print "\rTry of", img_url, "failed,", tries, "tries left"
        if not f is None:
            s = f.read()
            f.close()

            mysubparser.parse(s)
        else:
            print "\rOpening of", img_url, "did not succeed, trying next one..."
        i += 1

    return mysubparser.get_hyperlinks()

def get_images(t = []):
    total = len(t)
    progress = progressbar.Progress(total)
    i = 1
    for link in t:
        progress.show_progress(i)
        filename = os.path.join(savedir, os.path.split(link)[1])
        if not os.path.exists(filename):
            tries = 10
            while tries > 0:
                try:
                    urllib.urlretrieve(link, filename)
                    break
                except IOError:
                    tries -= 1
                    print "\rDownloading of", link, "failed,", tries, "left"
        else:
            print "\rNot downloading", link, "already downloaded"
        i += 1

if __name__ == "__main__":
    # Get a file-like object for the 4chan.org w/imgboard
    base_url = "http://boards.4chan.org/" + config.Configuration().get_category() + "/"
 
    # Get the hyperlinks.
    t = get_thread_links(base_url)
    t = get_image_links(base_url, t)
    get_images(t)