今天学习了爬虫
# 导入所需库 import urllib.request from lxml import etree # 设置目标URL和请求头信息,模拟Chrome浏览器访问 url = 'https://www.baidu.com/' headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36' } # 创建一个Request对象并定制请求头 request = urllib.request.Request(url=url, headers=headers) # 使用urllib.request.urlopen()方法发送请求并获取服务器响应 response = urllib.request.urlopen(request) # 读取服务器返回的网页内容,并以UTF-8编码解码为字符串 webpage_content = response.read().decode('utf-8') # 使用lxml.etree库解析HTML内容 html_tree = etree.HTML(webpage_content) # 使用xpath语法定位到指定元素(例如:获取搜索框的value值) target_element_value = html_tree.xpath('//input[@id="su"]/@value')[0] target_element_values = html_tree.xpath('//input[@id="su"]/@type')[0] # 打印获取的结果 print(target_element_values)
标签:xpath,etree,07,tree,指导,request,urllib,寒假,target From: https://www.cnblogs.com/syhxx/p/17965137