数据准备:
# 导入模块
from bs4 import BeautifulSoup
# 查询数据文本
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title" id='id_xx' xx='zz'>lqz <b>The Dormouse's story <span>彭于晏</span></b> xx</p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
# 解析库选择
# soup=BeautifulSoup(html_doc,'html.parser')
soup = BeautifulSoup(html_doc, 'lxml') # pip3 install lxml
find:找第一个 和find_all:找所有
五种过滤器: 字符串、正则表达式、列表、True、方法
1 字符串 查找条件是字符串
res=soup.find(id='my_p')
res=soup.find(class_='boldest')
res=soup.find(href='http://example.com/elsie')
res=soup.find(name='a',href='http://example.com/elsie',id='link1',class_='sister') # 多个是and条件
# 可以写成
res=soup.find(attrs={'href':'http://example.com/elsie','id':'link1','class':'sister'})
print(res)
2 正则表达式
import re
res=soup.find_all(href=re.compile('^http'))
res=soup.find_all(name=re.compile('^b'))
res=soup.find_all(name=re.compile('^b'))
print(res)
3 列表
res=soup.find_all(name=['body','b','a'])
res=soup.find_all(class_=['sister','boldest'])
print(res)
4 布尔
res=soup.find_all(id=True)
res=soup.find_all(name='img',src=True)
print(res)
5 方法
def has_class_but_no_id(tag):
return tag.has_attr('class') and not tag.has_attr('id')
print(soup.find_all(has_class_but_no_id))
6 搜索文档树可以结合遍历文档树一起用
拿标签的属性,文本
res=soup.html.body.find_all('p')
res=soup.find_all('p')
print(res)
7 find 和find_all的区别
find 就是find_all只取第一个
8 recursive=True 和 limit=1
res=soup.find_all(name='p',limit=2) # 限制条数
res=soup.html.body.find_all(name='p',recursive=False) # 是否递归查找
print(res)
标签:name,bs4,res,id,soup,搜索,print,文档,find
From: https://www.cnblogs.com/wellplayed/p/18023427