首页 > 编程语言 >python爬取网页的多种方式以及保存方法

python爬取网页的多种方式以及保存方法

时间:2022-11-21 17:34:47浏览次数:35  
标签:网页 item python html 爬取 re book import append

爬取网页信息并保存

bs4和lxml都是用来将接收的数据解析html

1.bs4+excel(openpyxl):

import requests
from bs4 import BeautifulSoup
from openpyxl import Workbook

wb = Workbook()
sheet = wb.active
sheet.title = '豆瓣读书Top250'
header = ['书名', '评分', '链接']
sheet.append(header)
headers = { 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36' }
res = requests.get('https://book.douban.com/top250', headers=headers)
soup = BeautifulSoup(res.text, 'html.parser')
items = soup.find_all(class_='item')
for i in items:
  tag = i.find(class_='pl2').find('a')
  rating = i.find(class_='rating_nums').text
  name = tag['title']
  link = tag['href']
  row = [name, rating, link]
  sheet.append(row)
wb.save('豆瓣.xlsx')

2.bs4+scv

import requests
from bs4 import BeautifulSoup
import csv
with open('豆瓣.csv', 'w', newline='') as file:
  csv_writer = csv.writer(file)
  header = ['书名', '评分', '链接']
  csv_writer.writerow(header)
  headers = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'}
  res = requests.get('https://book.douban.com/top250', headers=headers)
  soup = BeautifulSoup(res.text, 'html.parser')
  items = soup.find_all(class_='item')
  for i in items:
    tag = i.find(class_='pl2').find('a')
    rating = i.find(class_='rating_nums').text
    name = tag['title']
    link = tag['href']
    row = [name, rating, link]
    csv_writer.writerow(row)

3.lxml+csv

#  导入相关的库
from lxml import etree
import requests
import csv     #  运用Python中的csv库,把爬取到的信息存储在本地的CSV文件中


#  新建一个csv文件
# Permission denied
# 重复使用同一个csv文件会出现[没有权限;拒绝访问]
with open('douban.csv','w',newline='',encoding='utf-8') as fp:
    # csv.writer()中可以传一个文件对象
    writer = csv.writer(fp)
    # 写入表头信息
    writer.writerow(('name', 'url', 'author', 'publisher', 'date', 'price', 'rate', 'comment'))

    #  构造urls
    urls = 'https://book.douban.com/top250'

    #  加入请求头
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'
    }
    # 用requests库获取网页信息,lxml解析html文件
    html = requests.get(urls, headers=headers)
    selector = etree.HTML(html.text)
    # 取大标签,以此类推
    # <tr class='item'>
    infos = selector.xpath('//tr[@class="item"]')
    for info in infos:
        #  IndexError: list index out of range
        name = info.xpath('td/div/a/@title')[0]
        print(name)
        url = info.xpath('td/div/a/@href')[0]
        # /text 是获取到定位元素的文本值
        book_infos = info.xpath('td/p/text()')[0]
        # print(book_infos)
        author = book_infos.split('/')[0]
        publisher = book_infos.split('/')[-3]
        date = book_infos.split('/')[-2]
        price = book_infos.split('/')[-1]
        rate = info.xpath('td[2]/div[2]/span[2]/text()')[0]
        comments = info.xpath('td/p/span/text()')
        comment = comments[0] if len(comments) != 0 else "空"

        #  打印查看结果
        print(name, url, author, publisher, date, price, rate, comment)
        #  将上述的数据写入到csv文件
        writer.writerow((name, url, author, publisher, date, price, rate, comment))

    #  关闭csv文件
    fp.close()

4.lxml+excel(openpyxl)

#  导入相关的库
from lxml import etree
import requests
from openpyxl import Workbook #excel库

#  构造urls
urls = 'https://book.douban.com/top250'
wb = Workbook() # 在本地创建Excel工作簿
sheet = wb.active # 激活worksheet
sheet.title = '豆瓣读书Top250'
header = ['name', 'url', 'author', 'publisher', 'date', 'price', 'rate', 'comment']
sheet.append(header)
headers = { 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36' }
html = requests.get(url=urls, headers=headers)
selector = etree.HTML(html.text)
# 取大标签,以此类推
# <tr class='item'>
infos = selector.xpath('//tr[@class="item"]')
for info in infos:
    #  IndexError: list index out of range
    name = info.xpath('td/div/a/@title')[0]
    url = info.xpath('td/div/a/@href')[0]
    # /text 是获取到定位元素的文本值
    book_infos = info.xpath('td/p/text()')[0]
    # print(book_infos)
    author = book_infos.split('/')[0]
    publisher = book_infos.split('/')[-3]
    date = book_infos.split('/')[-2]
    price = book_infos.split('/')[-1]
    rate = info.xpath('td[2]/div[2]/span[2]/text()')[0]
    comments = info.xpath('td/p/span/text()')
    comment = comments[0] if len(comments) != 0 else "空"
    row = [name, url, author, publisher, date, price, rate, comment]
    sheet.append(row)
wb.save('豆瓣.xlsx')

5.bs4+excel(xlwt)

import re  # 正则表达式,进行文字匹配
# import bs4 #只需要使用bs4中的BeautifulSoup因此可以如下写法:
from bs4 import BeautifulSoup  # 网页解析,获取数据
import xlwt  # 进行excel操作
import sqlite3  # 进行SQLlite数据库操作
import urllib.request, urllib.error  # 指定url,获取网页数据


def main():
    # 爬取的网页
    baseurl = "https://movie.douban.com/top250?start="
    # # 保存的路径
    savepath = ".\\豆瓣电影Top250.xls"  # 使用\\表示层级目录或者在整个字符串前加r“.\豆瓣电影Top250”
    savepath2Db = "movies.db"
    # # 1.爬取网页
    # print(askURL(baseurl))
    datalist = getData(baseurl)
    print(datalist)
    # # 3.保存数据(存储到excel中)
    saveData(datalist, savepath)


# 影片详情链接的规则
findLink = re.compile('<a href="(.*?)">')  # 创建正则表达式对象
# 影片图片的链接规则
findImgSrc = re.compile('<img alt=".*src="(.*?)"', re.S)  # re.S忽略换行
# 影片片名
findTitle = re.compile('<span class="title">(.*)</span>')
# 影片评分
findRating = re.compile('<span class="rating_num" property="v:average">(.*)</span>')
# 评价人数
# findJudge = re.compile('<span>(\d*)(.*)人评价</span>')
findJudge = re.compile('<span>(\d*)人评价</span>')
# 概况
findInq = re.compile('<span class="inq">(.*)</span>')
# 影片相关内容
findBd = re.compile('<p class="">(.*?)</p>', re.S)  # 中间有</br>,因此要忽略换行符


# 爬取网页
def getData(baseurl):
    datalist = []
    for i in range(0, 10):  # 一页25条电影
        url = baseurl + str(i*25)
        html = askURL(url)  # 保存获取到的网页源码
        # print(html)
        # 2.解析数据(逐一)
        soup = BeautifulSoup(html, "html.parser")  # 使用html.parser解析器解析html文档形成树形结构数据
        for item in soup.find_all("div", class_="item"):  # 查找符合要求的字符串,形成列表
            # print(item)
            data = []  # 保存一部电影的信息
            item = str(item)
            # 影片详情链接
            link = re.findall(findLink, item)[0]
            data.append(link)
            # 图片
            img = re.findall(findImgSrc, item)[0]
            data.append(img)
            # 标题
            titles = re.findall(findTitle, item)
            if(len(titles) == 2):
                ctitle = titles[0]  # 中文名
                data.append(ctitle)
                otitle = titles[1].replace("/", "")
                data.append(otitle)  # 外文名
            else:
                data.append(titles[0])
                data.append(' ')  # 外文名留空
            # data.append(title)
            # 评分
            rating = re.findall(findRating, item)[0]
            data.append(rating)
            # 评价人数
            judgeNum = re.findall(findJudge, item)[0]
            # print(judgeNum)
            data.append(judgeNum)
            # 添加概述
            inq = re.findall(findInq, item)
            if len(inq) == 0:
                data.append(" ")
            else:
                data.append(inq[0].replace("。", ""))
            # 影片相关内容
            bd = re.findall(findBd, item)[0]
            bd = re.sub('<br(\s+)?/>(\s+)?', " ", bd)  # 去掉</br>
            bd = re.sub('/', " ", bd)  # 替换/
            data.append(bd.strip())  # 去掉前后的空格

            datalist.append(data)  # 把处理好的一部电影的信息保存
        # for it in datalist:
        #     print(it)
    return datalist


# 得到执行url的网页信息
def askURL(url):
    # 头部信息 其中用户代理用于伪装浏览器访问网页
    head = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
                          "Chrome/87.0.4280.88 Safari/537.36"}
    req = urllib.request.Request(url, headers=head)
    html = ""  # 获取到的网页源码
    try:
        response = urllib.request.urlopen(req)
        html = response.read().decode("utf-8")
    except urllib.error.URLError as e:
        if hasattr(e, "code"):  # has attribute
            print(e.code)
        if hasattr(e, "reason"):
            print(e.reason)
    return html


def saveData(datalist, savepath):
    book = xlwt.Workbook(encoding="utf-8", style_compression=0)  # style_compression:压缩的效果
    sheet = book.add_sheet("豆瓣电影top250", cell_overwrite_ok=True)  # 单元格内容可覆盖
    col = ("电影详情链接", "图片链接", "影片中文名", "影片外文名", "评分", "评价数", "概述", "相关信息")  # 元组添加表头
    for i in range(8):  # 写入表头(列名)
        sheet.write(0, i, col[i])

    for i in range(1, len(datalist)+1):
        for j in range(len(datalist[i-1])):
            sheet.write(i, j, datalist[i-1][j])
    book.save(savepath)




if __name__ == '__main__':
    main()

标签:网页,item,python,html,爬取,re,book,import,append
From: https://www.cnblogs.com/MaybeGut/p/16912072.html

相关文章

  • python数据合并 concat方法的运用案例
    案例背景:三个员工分工输入数据,格式一致,列标题一致,存在唯一标识。现在需要将三个员工输入的数据,合并到一个表格里。假设,员工甲输入的数据如下:假设,员工乙输入的数据如下......
  • 使用Python去掉试卷上的蓝色和红色笔记
    #-*-encoding:utf-8-*-importcv2importnumpyasnpclassSealRemove(object):"""印章处理类"""defremove_red_seal(self,image):......
  • python-面向对象-类的定义和实例化
    python是一种面向对象编程语言,自然也有类的概念。python中的类通过class关键字定义,提供了面向对象的所有标准特性,例如允许一个类继承多个基类,子类可以覆盖父类的方法,封装......
  • Windows下使用VSCode搭建IDA Python脚本开发环境
    由于本人是VSCode的重度沉迷用户,需要写代码时总会想起这个软件,因此选择在VSCode中搭建IDAPython的开发环境本文适用的环境如下:1.操作系统windows2.Python33.IDAPro......
  • python3 请求webSocket实例
    网上搜了好久,没看到python写的webSocketClient这里贴个示例供大家参考,测过了asyncdefwebSocketClient(self,url,sendData,headers,breakTag):"""......
  • Python3.10 的开发环境的搭建
    安装下载Python3.10或者其他版本:DownloadPython|Python.org如果Windows操作系统下载,默认是下载64位操作系统的exe安装包:python-3.10.0-amd64.exe双击安装......
  • 网页动画的12原则,帮你做出漂亮的动画效果
    译者| @EthonLau作者|@donovanh原文| https://cssanimation.rocks/principles/作为前端的设计师和工程师,我们用css去做样式、定位并创建出好看的网站。我们经常用......
  • 【Python】字典dict_相同key,不同value的添加方法
     dict.setdefault(key,[]).append(value) #coding:utf-8fromloguruimportloggeraslogsclassdemo:defrun(self):new_dict={}#......
  • python-错误和异常-主动抛
    1.抛异常有时,程序需要主动抛出异常,因为某些情况下,你需要反馈消息给更上层的调用者,告诉它有一些异常情况发生,而你抛出异常的地方,没有能力处理它,因此需要向上抛出异常。这......
  • Python常用命令最全合集
    文章目录一、Python环境配置命令二、Python常用命令三、pip管理工具命令四、发布包到pypi(官网)命令Mac电脑自带python2.x版本,终端输入python-V//查看当前......