欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页

爬虫实战——xpath爬取电影天堂

程序员文章站 2022-05-02 17:04:45
...
from lxml import etree
import requests


HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)     Chrome/80.0.3987.149 Safari/537.36'
}
BASE_DOMAIN = 'https://www.ygdy8.net/'


def get_detail_urls(url):
    response = requests.get(url, headers=HEADERS)
    text = response.content.decode(encoding='gbk', errors='ignore')
    html = etree.HTML(text)
    detail_urls = html.xpath("//table[@class='tbspan']//a/@href")
    detail_urls = map(lambda url:BASE_DOMAIN+url, detail_urls)
    return detail_urls


def parse_detail_page(url):
    movie = {}
    response = requests.get(url, headers=HEADERS)
    text = response.content.decode(encoding='gbk', errors='ignore')
    html = etree.HTML(text)
    title = html.xpath("//div[@class='title_all']//font[@color='#07519a']/text()")[0]
    movie['title'] = title
    # print(etree.tostring(x,encoding='utf-8').decode("utf-8"))
    zoomE = html.xpath("//div[@id='Zoom']")[0]
    cover = zoomE.xpath(".//img/@src")
    movie['cover'] = cover

    def parse_info(info, rule):
        return info.replace(rule, "").strip()

    infos = zoomE.xpath("//text()")
    # print(infos)
    for index, info in enumerate(infos):
        # print(info)
        # print(index)
        # print("-"*30)
        if info.startswith("◎片  名"):
            info = parse_info(info, "◎片  名")
            movie['name'] = info
            print(info)
        elif info.startswith("◎产  地"):
            info = info.replace("◎产  地", "").strip()
            movie['country'] = info
        elif info.startswith("◎类  别"):
            info = info.replace("◎类  别", "").strip()
            movie['type'] = info
            print(info)
        elif info.startswith("◎豆瓣评分"):
            info = parse_info(info, "◎豆瓣评分")
            movie["douban"] = info
            print(info)
        elif info.startswith("◎主  演"):
            info = parse_info(info, "◎主  演")
            actors = [info]
            # print(info)
            # movie["zhuyan"] = info
            for x in range(index + 1, len(infos)):
                actor = infos[x].strip()
                if actor.startswith("◎"):
                    break
                actors.append(actor)
            movie['actors'] = actors
        elif info.startswith("◎简  介"):
            info = parse_info(info, "◎简  介")
            for x in range(index + 1, len(infos)):
                profile = infos[x].strip()
                # print(profile)
                if profile.startswith("【下载地址】"):
                    break
                print(profile)
                movie["jianjie"] = profile
    download_url = html.xpath("//td[@bgcolor='#fdfddf']/a/@href")
    print(download_url)
    movie["download_url"] = download_url
    return movie


def spider():
    base_url = "https://www.ygdy8.net/html/gndy/dyzz/list_23_{}.html"
    movies = []
    for x in range(1, 8):
        url = base_url.format(x)
        detail_urls = get_detail_urls(url)
        for detail_url in detail_urls:
            movie = parse_detail_page(detail_url)
            movies.append(movie)
            print(movie)

    with open('D:/Users/24913/Desktop/dianying.txt', 'w', encoding='utf-8') as fp:
        num = 0
        for movie in movies:
            num = num + 1
            fp.write(str(num)+'\n')
            for key,value in movie.items():
            # 按照下面的输出格式会有错误
            # fp.write(str(num)+"\n剧名:"+str(movie['title'])+"\n封面:"+str(movie['cover'])+"\n年代:"+str(movie['year'])+
            #          "\n国家:"+str(movie['country'])+"\n类别:"+str(movie['category'])+"\n豆瓣评分:"+str(movie['dou_rank'])+
            #          "\n片长:" + str(movie['duration']) +"\n导演:"+str(movie['director'])+"\n主演:"+str(movie['actors'])+
            #          "\n简介:" + str(movie['profile']) +"\n下载地址:"+str(movie['download_url'])+'\n')
                fp.write(str(key)+ ":" + str(value) +'\n')
    print("文件保存成功")


if __name__ == '__main__':
    spider()

利用xpath爬取电影天堂的电影相关的信息