{Python??图片下载}
{Python??图片下载}
2011年07月29日
v1.0 这次是用的XPATH解析的网页,代码长度是原来的一半
v1.1 加入多线程支持,修改了读取图片列表的bug,加入多线程后可以同时开启几十个线程同时下载,很奇怪为啥没有被封...
uthor__ = 'Dodola'
from lxml.html import parse
from time import sleep,ctime
import time
import urllib.request
import threading
import os
BASEURL = "http://www.topit.me"
ALBUMURL="http://www.topit.me/album/"
ALBUMPERURL="http://www.topit.me/album/%s?p=%s"
def Download(path, pageUrl):
try:
spath = parse(BASEURL + pageUrl)
imageUrls = spath.xpath('//a[@id="item-tip"]')
imageUrl = imageUrls[0].attrib["href"]
print("正在下载%s"%imageUrl)
imageNames=imageUrl.rsplit('/')
imageName=imageNames[len(imageNames)-1]
urllib.request.urlretrieve(imageUrl, path+imageName)
print("保存成功:%s%s"%(path ,imageName))
except Exception as err:
print("下载错误{}".format(err))
def DownloadAlbum(path,albumId):
pageUrl=ALBUMURL+albumId
tempdir="%s%s\"%(path,time.strftime("%Y%m%d%H%M%S",time.localtime(time.time())))
print(tempdir)
os.mkdir(tempdir)
print(pageUrl)
spath=parse(pageUrl)
pagecounts=spath.xpath("id('pagination')/div/a")
print(pagecounts)
print("页面总数:%s"%(int(pagecounts[len(pagecounts)-2].text_content())))
if len(pagecounts)>1:
pagecount=int(pagecounts[len(pagecounts)-2].text_content())
else:
pagecount=2
for page in range(1,pagecount):
print("访问第%s页"%page)
DownloadPerAlbum(tempdir,albumId,page)
def DownloadPerAlbum(path,albumid,page):
pageUrl=ALBUMPERURL% (albumid,page)
pageel=parse(pageUrl)
imgUrls=pageel.xpath("//a[starts-with(@href,'/album/%s/item/')]/@href"%albumid)
imgUrls=set(imgUrls)#去重
print("第%s页图片数%s"%(page,len(imgUrls)))
task_threads=[] #存储线程
count=1
for i in imgUrls:
t= threading.Thread(target=Download,args=(path,i))
#Download(path,i.attrib.get("href"))
count=count+1
task_threads.append(t)
for task in task_threads:
task.start()
#task.join()#将线程改为1,过多线程会被封
for task in task_threads:
task.join() #等待所有线程结束
print("线程结束")
DownloadAlbum("E:\\topit.me\","225489")