Python修炼,并行编程(二)<2016.4.24>

731 查看

再看一下昨天那个爬虫吧
还是贴代码,然后我今天出来一个小错误,没有发现问题在哪,还需要排除。(刚刚找到错误了,代码放在最下面)

#!/usr/bin/env python
# coding: utf-8
#copyRight by heibanke

import urllib
import os
import re
from threading import Thread
import time

def downloadURL(urls,dirpath):
    for url in urls:
        if len(url)>0:
            content = urllib.urlopen(url).read()
            if not os.path.exists(dirpath):
                os.makedirs(dirpath)
            open(dirpath+r'/'+url[-26],'w').write(content)#这一行-26那个地方缺个冒号,导致反复写一个文件。

def parseTarget(url):
    urls=[]
    content=urllib.urlopen(url).read()
    pattern = r'<a title=(.*?) href="(.*?)">'
    hrefs = re.findall(pattern,content)

    for href in hrefs:
        urls.append(href[1])
    return urls

def thread_job(n,Thread,url_list,job):
    local_time = time.time()
    threads = [Thread(target=job,args=(url_list[i],str(n)+Thread.__name__)) for i in xrange(n)]
    for t in threads:
        t.start()
    for t in threads:
        t.join()
    print n,Thread.__name__,"run job need",time.time()-local_time

if __name__=="__main__":
    t=time.time()
    urls=[]
    for i in xrange(7):
        urls.extend(parseTarget('http://blog.sina.com.cn/s/articlelist_1191258123_0_'+str(i+1)+'.html'))

    url_len = len(urls)

    print "total urls number is ",url_len

    for n in [4]:
        url_list=[]
        url_split_len = url_len//n
        for i in xrange(n):
            if i in xrange(n):#这一句写重了!
                if i==n-1:
                    url_list.append(urls[i*url_split_len:url_len])
                else:
                    url_list.append(urls[i*url_split_len:(i+1)*url_split_len])
        thread_job(n,Thread,url_list,downloadURL)
    print "All done in",time.time()-t

这个就是运行以后的结果是完全不对的,程序根本不往文件夹里写东西
然后下面的代码是老师写的,但是他为了测试时间写了很多线程和进程,我想把进程部分去掉,结果写了两遍都不通,后来直接照抄,问题还是存在。大家可以看一下问题出在哪,感激不尽!

#!/usr/bin/env python
# coding: utf-8
#copyRight by heibanke

import urllib
import os
import re
from threading import Thread
from multiprocessing import Process
import time

def downloadURL(urls,dirpath):
    """
    urls: 需要下载的url列表
    dirpath: 下载的本地路径
    """
    for url in urls:
        if len(url)>0:
            #print "current process id is ",os.getpid()
            content = urllib.urlopen(url).read()
            if not os.path.exists(dirpath):
                os.makedirs(dirpath)
            open(dirpath+r'/'+url[-26:],'w').write(content)

def parseTarget(url):
    """
    根据目标url获取文章列表的urls
    """
    urls=[]
    content=urllib.urlopen(url).read()
    pattern = r'<a title=(.*?) href="(.*?)">'
    hrefs = re.findall(pattern,content)

    for href in hrefs:
        urls.append(href[1])

    return urls   

def thread_process_job(n, Thread_or_Process, url_list, job):
    """
    n: 多线程或多进程数
    Thread_Process: Thread/Process类 
    job: countdown任务
    """
    local_time=time.time()
    threads_or_processes = [Thread_or_Process(target=job,args=(url_list[i],str(n)+Thread_or_Process.__name__)) for i in xrange(n)]
    for t in threads_or_processes:
        t.start()
    for t in threads_or_processes:
        t.join()

    print n,Thread_or_Process.__name__," run job need ",time.time()-local_time

if __name__=="__main__":

    t=time.time()

    urls=[]
    for i in xrange(7):
        urls.extend(parseTarget('http://blog.sina.com.cn/s/articlelist_1191258123_0_'+str(i+1)+'.html'))

    url_len = len(urls)

    print "total urls number is ",url_len

    for n in [8,4,2,1]:
        #将urls分割到url_list
        url_list=[]
        url_split_len = url_len//n
        for i in xrange(n):
            if i==n-1:
                print "*************",len(url_list)
                url_list.append(urls[i*url_split_len:url_len])
                print len(url_list),'%%%%%%%'
            else:
                url_list.append(urls[i*url_split_len:(i+1)*url_split_len])
        #分割任务后创建线程
        thread_process_job(n,Thread, url_list, downloadURL)
        thread_process_job(n,Process, url_list, downloadURL)

    print "All done in ",time.time()-t

刚刚把代码调通了,错误的地方在错误代码里注释出来了

#!/usr/bin/env python
# coding: utf-8


import urllib
import os
import re
from threading import Thread
import time

def downloadURL(urls,dirpath):
    for url in urls:
        if len(url)>0:
            content = urllib.urlopen(url).read()
            if not os.path.exists(dirpath):
                os.makedirs(dirpath)
            open(dirpath+r'/'+url[-26:],'w').write(content)

def parseTarget(url):
    urls=[]
    content=urllib.urlopen(url).read()
    pattern = r'<a title=(.*?) href="(.*?)">'
    hrefs = re.findall(pattern,content)

    for href in hrefs:
        urls.append(href[1])
    return urls

def thread_job(n,Thread,url_list,job):
    local_time = time.time()
    threads = [Thread(target=job,args=(url_list[i],str(n)+Thread.__name__)) for i in xrange(n)]
    for t in threads:
        t.start()
    for t in threads:
        t.join()
    print n,Thread.__name__,"run job need",time.time()-local_time

if __name__=="__main__":
    t=time.time()
    urls=[]
    for i in xrange(7):
        urls.extend(parseTarget('http://blog.sina.com.cn/s/articlelist_1191258123_0_'+str(i+1)+'.html'))

    url_len = len(urls)

    print "total urls number is ",url_len

    for n in [4]:
        url_list=[]
        url_split_len = url_len//n
        for i in xrange(n):
            if i==n-1:
                url_list.append(urls[i*url_split_len:url_len])
            else:
                url_list.append(urls[i*url_split_len:(i+1)*url_split_len])

        thread_job(n,Thread,url_list,downloadURL)

    print "All done in",time.time()-t

这个是正确的。