欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页  >  IT编程

python实现多线程采集的2个代码例子

程序员文章站 2024-01-12 16:27:04
代码一: #!/usr/bin/python # -*- coding: utf-8 -*- #encoding=utf-8   import...

代码一:

#!/usr/bin/python
# -*- coding: utf-8 -*-
#encoding=utf-8
 
import threading
import queue
import sys
import urllib2
import re
import mysqldb
 
#
# 数据库变量设置
#
db_host = '127.0.0.1'
db_user = "xxxx"
db_passwd = "xxxxxxxx"
db_name = "xxxx"
 
#
# 变量设置
#
thread_limit = 3
jobs = queue.queue(5)
singlelock = threading.lock()
info = queue.queue()
 
def workerbee(inputlist):
    for x in xrange(thread_limit):
        print 'thead {0} started.'.format(x)
        t = spider()
        t.start()
    for i in inputlist:
        try:
            jobs.put(i, block=true, timeout=5)
        except:
            singlelock.acquire()
            print "the queue is full !"
            singlelock.release()
 
    # wait for the threads to finish
    singlelock.acquire()        # acquire the lock so we can print
    print "waiting for threads to finish."
    singlelock.release()        # release the lock
    jobs.join()              # this command waits for all threads to finish.
    # while not jobs.empty():
    #   print jobs.get()
 
def gettitle(url,time=10):
    response = urllib2.urlopen(url,timeout=time)
    html = response.read()
    response.close()
    reg = r'<title>(.*?)</title>'
    title = re.compile(reg).findall(html)
    # title = title[0].decode('gb2312','replace').encode('utf-8')
    title = title[0]
    return title
 
class spider(threading.thread):
    def run(self):
        while 1:
            try:
                job = jobs.get(true,1)
                singlelock.acquire()
                title = gettitle(job[1])
                info.put([job[0],title], block=true, timeout=5)
                # print 'this {0} is {1}'.format(job[1],title)
                singlelock.release()
                jobs.task_done()
            except:
                break;
 
if __name__ == '__main__':
    con = none
    urls = []
    try:
        con = mysqldb.connect(db_host,db_user,db_passwd,db_name)
        cur = con.cursor()
        cur.execute('select id,url from `table_name` where `status`=0 limit 10')
        rows = cur.fetchall()
        for row in rows:
            # print row
            urls.append([row[0],row[1]])
        workerbee(urls)
        while not info.empty():
            print info.get()
    finally:
        if con:
            con.close()

代码二:

#!/usr/bin/python
# -*- coding: utf-8 -*-
#encoding=utf-8
#filename:robot.py
 
import threading,queue,sys,urllib2,re
#
# 变量设置
#
thread_limit = 3        #设置线程数
jobs = queue.queue(5)      #设置队列长度
singlelock = threading.lock()    #设置一个线程锁,避免重复调用
 
urls = ['http://games.sina.com.cn/w/n/2013-04-28/1634703505.shtml','http://games.sina.com.cn/w/n/2013-04-28/1246703487.shtml','http://games.sina.com.cn/w/n/2013-04-28/1028703471.shtml','http://games.sina.com.cn/w/n/2013-04-27/1015703426.shtml','http://games.sina.com.cn/w/n/2013-04-26/1554703373.shtml','http://games.sina.com.cn/w/n/2013-04-26/1512703346.shtml','http://games.sina.com.cn/w/n/2013-04-26/1453703334.shtml','http://games.sina.com.cn/w/n/2013-04-26/1451703333.shtml','http://games.sina.com.cn/w/n/2013-04-26/1445703329.shtml','http://games.sina.com.cn/w/n/2013-04-26/1434703322.shtml','http://games.sina.com.cn/w/n/2013-04-26/1433703321.shtml','http://games.sina.com.cn/w/n/2013-04-26/1433703320.shtml','http://games.sina.com.cn/w/n/2013-04-26/1429703318.shtml','http://games.sina.com.cn/w/n/2013-04-26/1429703317.shtml','http://games.sina.com.cn/w/n/2013-04-26/1409703297.shtml','http://games.sina.com.cn/w/n/2013-04-26/1406703296.shtml','http://games.sina.com.cn/w/n/2013-04-26/1402703292.shtml','http://games.sina.com.cn/w/n/2013-04-26/1353703286.shtml','http://games.sina.com.cn/w/n/2013-04-26/1348703284.shtml','http://games.sina.com.cn/w/n/2013-04-26/1327703275.shtml','http://games.sina.com.cn/w/n/2013-04-26/1239703265.shtml','http://games.sina.com.cn/w/n/2013-04-26/1238703264.shtml','http://games.sina.com.cn/w/n/2013-04-26/1231703262.shtml','http://games.sina.com.cn/w/n/2013-04-26/1229703261.shtml','http://games.sina.com.cn/w/n/2013-04-26/1228703260.shtml','http://games.sina.com.cn/w/n/2013-04-26/1223703259.shtml','http://games.sina.com.cn/w/n/2013-04-26/1218703258.shtml','http://games.sina.com.cn/w/n/2013-04-26/1202703254.shtml','http://games.sina.com.cn/w/n/2013-04-26/1159703251.shtml','http://games.sina.com.cn/w/n/2013-04-26/1139703233.shtml']
 
def workerbee(inputlist):
  for x in xrange(thread_limit):
    print 'thead {0} started.'.format(x)
    t = spider()
    t.start()
  for i in inputlist:
    try:
      jobs.put(i, block=true, timeout=5)
    except:
      singlelock.acquire()
      print "the queue is full !"
      singlelock.release()
 
  # wait for the threads to finish
  singlelock.acquire()    # acquire the lock so we can print
  print "waiting for threads to finish."
  singlelock.release()    # release the lock
  jobs.join()       # this command waits for all threads to finish.
  # while not jobs.empty():
  #  print jobs.get()
 
def gettitle(url,time=10):
  response = urllib2.urlopen(url,timeout=time)
  html = response.read()
  response.close()
  reg = r'<title>(.*?)</title>'
  title = re.compile(reg).findall(html)
  title = title[0].decode('gb2312','replace').encode('utf-8')
  return title
 
class spider(threading.thread):
  def run(self):
    while 1:
      try:
        job = jobs.get(true,1)
        singlelock.acquire()
        title = gettitle(job)
        print 'this {0} is {1}'.format(job,title)
        singlelock.release()
        jobs.task_done()
      except:
        break;
 
if __name__ == '__main__':
  workerbee(urls)