股票数据的网站抓取(4.2)代码优化

800 查看

代码优化内容

  1. 开多个浏览器实例
  2. 进一步细分函数,让每个函数的功能更单一
  3. 开多线程,加快抓取处理。
    1. 一个流水线用于抓取网页
    2. 一个流水线用于数据保存
    3. 一个流水线用于错误信息保存.

程序运行情况


运行情况

命令行输出情况


命令行输出情况

代码


#coding=utf-8
from selenium import webdriver
import time
import os
import re
import sys  
import threading
import Queue
import Tkinter as tk
from selenium.common.exceptions import NoSuchElementException
def myinit():
    reload(sys)  
    sys.setdefaultencoding('utf8')

#获取屏幕分辨率
def getscreeninfo():
    #这个没考虑多显示器的问题
    info={}
    root = tk.Tk()
    info['width_px'] = root.winfo_screenwidth()
    info['height_px'] = root.winfo_screenheight() 
    info['width_mm'] = root.winfo_screenmmwidth()
    info['height_mm'] = root.winfo_screenmmheight() 
    # 2.54 cm = in
    info['width_in'] = info['width_mm'] / 25.4
    info['height_in'] = info['height_mm'] / 25.4
    info['width_dpi'] = info['width_px']/info['width_in']
    info['height_dpi'] = info['height_px']/info['height_in'] 
    return info

########################
#生成多个浏览器实例
def makebr(number):
    brs=[]
    for i in range(number):
        br=webdriver.Firefox()
        brs.append(br)
    return brs

#设置所有浏览器实例大小
def setbrsize(brs,x,y):
    for i in brs:
        i.set_window_size(200,200)
#排列打开的浏览器
def layoutbrs(brs,x=0,y=0,dx=10,dy=10,mode=0,hang=0):
    #mode   0,横向, 1,纵向 2,矩形 3,对角线
    #hang   0为尽量按正方形排列
    screeninfo=getscreeninfo()
    if(mode==0):
        dy=0
    elif(mode==1):
        dx=0
    if(mode!=2):
        for i in brs:
            i.set_window_position(x,y)
            x=x+dx
            y=y+dy
    else:
        #这个以后有空再弄
        pass 
        #hang=math.ceil(math.sqrt(len(dbrs)))
#关闭所有浏览器
def closebr(brs):
    for i in brs:
        i.quit()
#######################
#获得指定股票相关数据
def getinfo(mydriver,gourl):
    title='//*[@id="name"]'
    code='//*[@id="code"]'
    hexinshujuxpath="/html/body/div[14]/div[1]/div[4]/div[1]"
    restr=":".decode('utf8')
    myre=re.compile(restr,re.I|re.M|re.S)
    linetext=""
    errorinfo=""
    mydriver.get(gourl)
    try:
        gupiaoming=mydriver.find_element_by_xpath(title).text
        gupiaocode=mydriver.find_element_by_xpath(code).text
        hexinshuju=mydriver.find_element_by_class_name('pad5')
        shujuhang=hexinshuju.find_elements_by_tag_name('tr')
        for i in range(len(shujuhang)-2):
            shujulie=shujuhang[i].find_elements_by_tag_name('td')
            tmpshuju=myre.split(shujulie[0].text)
            linetext=linetext+"~"+tmpshuju[1]
        shuju=myre.split(shujuhang[8].text)
        linetext=linetext+"~"+shuju[1]
        tmpshuju=myre.split(shujuhang[9].text)
        linetext=linetext+"~"+tmpshuju[1]
        linetext="%s~%s%s"%(gupiaoming,gupiaocode,linetext)
        #print "数据:",linetext
    except  NoSuchElementException,e:
        #print "不是股票"
        pass
    except Exception:
        errorinfo= "非预期错误"+gourl
        print errorinfo
    finally:
        return linetext,errorinfo

#获得所有股票链接
def geturls(br):

    #通过link对象获得链接地址的text文本
    def getlinkurl(linklist):
        my=[]
        for x in linklist:
            my.append(x.get_attribute('href'))
        return my
    sz=[]
    sh=[]
    br.get("http://quote.eastmoney.com/stocklist.html")
    shxpath="/html/body/div[9]/div[2]/div/ul[1]"
    szxpath="/html/body/div[9]/div[2]/div/ul[2]"
    shgupiao=br.find_element_by_xpath(shxpath)
    szgupiao=br.find_element_by_xpath(szxpath)
    shgupiaolist=shgupiao.find_elements_by_tag_name('a')
    szgupiaolist=szgupiao.find_elements_by_tag_name('a')
    sh=getlinkurl(shgupiaolist)
    sz=getlinkurl(szgupiaolist)
    return sh,sz

#多线程执行用的函数
def thread_getinfo(br,jobslink_queue,jieguo_queue,errorinfo_queue):
    while True:
        try:
            #获得队列里的地址
            url=jobslink_queue.get(False)  #False =Don't wait
        except Queue.Empty:
            print "完成退出"
            #br.quit()
            return
        #print url
        if(url!=None):
            (linetext,errorinfo)=getinfo(br,url)
            if(linetext!=""):
                jieguo_queue.put(linetext)
            if(errorinfo!=""):
                errorinfo_queue.put(errorinfo)


#######################
#多线程控制函数
def saveinfoabc(info_filename,error_filename,urllist):

    jobslink=Queue.Queue(0)
    jieguo=Queue.Queue(0)
    errorsinfo=Queue.Queue(0)

    for x in urllist[200:250]:#为测试方便这里只取了50,如果要全下载,取消[]就好
        jobslink.put(x)

    #启动线程
    for x in range(THREAD_NUM):
        t=threading.Thread(target=thread_getinfo,args=(brs[x],jobslink,jieguo,errorsinfo))
        t.start()

    f=open(info_filename,'w')
    e=open(error_filename,'w')
    mycount=0

    while (threading.activeCount()>1) or (not jobslink.empty()):
        while jieguo.qsize()>0 or errorsinfo.qsize()>0:
            if(jieguo.qsize()>0):
                jieguotxt=jieguo.get()
                f.write(jieguotxt+"\n")
            if(errorsinfo.qsize()>0):
                error=errorsinfo.get()
                e.write(error+"\n")
        mycount=mycount+1
        if(mycount%100)==0:
            print "%d:  活动线程:%d,剩余连接数:%d,结果剩余条数:%d,错误剩余条数:%d"%(mycount,threading.activeCount(),jobslink.qsize(),jieguo.qsize(),errorsinfo.qsize())
        time.sleep(0.01)
    e.close()
    f.close()

    print "数据下载完成"

######################
myinit()

br=webdriver.Firefox()
print "获得所有链接地址"
(sh,sz)=geturls(br)
br.quit()

#info_filename='shinfo.txt'
#error_filename='sherror.txt'
THREAD_NUM=10
brs=makebr(THREAD_NUM)
setbrsize(brs,200,300)
layoutbrs(brs ,0,0,80,80,0)

saveinfoabc('shinfo.txt','sherror.txt',sh)
saveinfoabc('szinfo.txt','szerror.txt',sz)
closebr(brs)