欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页  >  IT编程

Go秒爬博客园100页新闻

程序员文章站 2022-06-16 21:44:40
利用go语言的协程并发优势爬取网页速度相当之快,博客园100页新闻标题只需一秒即可全部爬取 package main import ( "bytes"...

利用go语言的协程并发优势爬取网页速度相当之快,博客园100页新闻标题只需一秒即可全部爬取

package main

import (
 "bytes"
 "fmt"
 "github.com/puerkitobio/goquery"
 "log"
 "net/http"
 "runtime"
 "strconv"
 "sync"
)

func scraper(page string) string {
 // request the html page.
 scrapeurl := "https://news.cnblogs.com/n/page/" + page
 client := &http.client{}
 reqest, _ := http.newrequest("get", scrapeurl, nil)
 reqest.header.set("accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8")
 reqest.header.set("accept-charset", "gbk,utf-8;q=0.7,*;q=0.3")
 //reqest.header.set("accept-encoding", "gzip,deflate,sdch")
 reqest.header.set("accept-language", "zh-cn,zh;q=0.8")
 reqest.header.set("cache-control", "max-age=0")
 reqest.header.set("connection", "keep-alive")
 reqest.header.set("user-agent", "mozilla/5.0 (windows nt 10.0; win64; x64) applewebkit/537.36 (khtml, like gecko) chrome/68.0.3440.75 safari/537.36")
 res, err := client.do(reqest)
 if err != nil {
  log.fatal(err)
 }
 defer res.body.close()
 if res.statuscode != 200 {
  log.fatalf("status code error: %d %s", res.statuscode, res.status)
 }

 // load the html document
 doc, err := goquery.newdocumentfromreader(res.body)
 if err != nil {
  log.fatal(err)
 }

 // find the review items
 var buffer bytes.buffer
 buffer.writestring("**********scraped page " + page + "**********\n")
 doc.find(".content .news_entry").each(func(i int, s *goquery.selection) {
  // for each item found, get the band and title
  title := s.find("a").text()
  url, _ := s.find("a").attr("href")
  buffer.writestring("review " + strconv.itoa(i) + ": " + title + "\nhttps://news.cnblogs.com" + url + "\n")
 })
 return buffer.string()
}

func main() {
 runtime.gomaxprocs(runtime.numcpu())
 ch := make(chan string, 100)
 wg := &sync.waitgroup{}
 var page string
 for i := 1; i < 101; i++ {
  wg.add(1)
  go func(i int) {
   page = strconv.itoa(i)
   fmt.printf("scraping page %s...\n", page)
   ch <- scraper(page)
   wg.done()
  }(i)
 }
 wg.wait()

 //print result
 for i := 0; i < 101; i++ {
  fmt.println(<-ch)
 }
}

总结
以上就是这篇文章的全部内容了,希望本文的内容对大家的学习或者工作具有一定的参考学习价值,谢谢大家对的支持。如果你想了解更多相关内容请查看下面相关链接