urllib
1.1 urllib基本使用
# 使用urllib来访问百度首页的源码
import urllib.request
# 1.定义一个url,就是你要访问的地址
url = 'http://www.baidu.com'
# 2.模拟浏览器向服务器发送请求
response = urllib.request.urlopen(url)
# 3.获取响应中的页面的源码
content = response.read()
# 4.打印数据
print(content)
# 使用urllib来访问百度首页的源码
import urllib.request
# 1.定义一个url,就是你要访问的地址
url = 'http://www.baidu.com'
# 2.模拟浏览器向服务器发送请求
response = urllib.request.urlopen(url)
# 3.获取响应中的页面的源码
# content = response.read()
# b 是read方法,返回的是字节形式的二进制数据
# 我们要将二进制数据转换为字符串
# 二进制--》字符串==解码 decode('编码的格式')
# 一般编码的格式在html--charset后面的值
content = response.read().decode('utf-8')
# 4.打印数据
print(content)
1.2一个类型和六个方法
import urllib.request
url = 'http://www.baidu.com'
response = urllib.request.urlopen(url)
# 一个类型六个方法
print(type(response))
# response是HTTPResponse类型
# 1.按照一个字节一个字节的去读
# content = response.read()
# 括号内数字是多少就读多少字节的数据
content1 = response.read(10)
print(content1)
# 2.读一行
content2 = response.readline()
print(content2)
# 3.按行读完
content3 = response.readlines()
print(content3)
# 4.返回状态码,如果是200,就证明没错
print(response.getcode())
# 5.返回URL地址
print(response.geturl())
# 6.获取状态信息
print(response.getheaders())
1.3 下载
import urllib.request
# 下载网页
url_page = 'http://www.baidu.com'
urllib.request.urlretrieve(url_page,'baidu.html')
# 下载图片
url_img = 'https://tse4-mm.cn.bing.net/th/id/OIP-C.-A719q8dZGMnEBxUVJoL5AHaJQ?w=135&h=180&c=7&r=0&o=5&dpr=1.5&pid=1.7'
urllib.request.urlretrieve(url_img,'lisa.jpg')
# 下载视频
url_video = '视频网站'
urllib.request.urlretrieve(url_video,'视频网站')
1.4请求对象的定制
import urllib.request
url = 'https://www.baidu,com'
# https://cn.bing.com/search?q=周杰伦
# url的组成
# http/https cn.bing.com 80/443 search q=周杰伦 #
# 协议 主机 端口 路径 参数 锚点
# https是指该网站使用了ssl加密
headers = {'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0'
}
# UA是一个特殊字符串头,使服务器能够识别客户使用的操作系统及版本、CPU类型、浏览器及版本等等
# 查看UA:右键网页空白处-》检查-》network/网络-》刷新页面-》点击第一行(接口/域名)-》最后一行就是User-Agent
# 因为urlopen不能存储字典,所以headers不能储存进去
# 请求对象的定制
# 注意,因为参数顺序的问题,不能直接写url和headers,中间还有一个data,所以我们需要关键字传参
request = urllib.request.Request(url = url,headers = headers)
response = urllib.request.urlopen(request)
content = response.read().decode('utf-8')
print(content)
1.5 编解码
1.5.1get请求的quote方式
# 需求 获取 https://cn.bing.com/search?q=周杰伦 的网页源码
import urllib.request
import urllib.parse
url = 'https://cn.bing.com/search?q='
# 请求对象的定制是为了解决反爬的第一种手段
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0'
}
# 我们需要将周杰伦三个字变成Unicode编码
# 这依赖于urllib.parse
name = urllib.parse.quote('周杰伦')
url = url+name
# 请求对象的定制
request = urllib.request.Request(url = url,headers = headers)
# 模拟浏览器向服务器发送请求
response = urllib.request.urlopen(url)
# 获取响应的内容
content = response.read().decode('utf-8')
print(content)
1.5.2 get请求的urlencode方式
# urlencode应用场景:多个参数 相当于 &
# https://cn.bing.com/search?q=周杰伦&sex=男
import urllib.request
import urllib.parse
data = {
'q':'周杰伦',
'sex':'男',
}
a = urllib.parse.urlencode(data)
print(a)
# 输出q=%E5%91%A8%E6%9D%B0%E4%BC%A6&sex=%E7%94%B7
# 获取 https://cn.bing.com/search?q=%E5%91%A8%E6%9D%B0%E4%BC%A6&sex=%E7%94%B7 的网页源码
import urllib.request
import urllib.parse
base_url = 'https://cn.bing.com/search?'
data = {
'q':'周杰伦',
'sex':'男',
}
new_data = urllib.parse.urlencode(data)
# 请求资源路径
url = base_url + new_data
headers ={
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0'
}
request = urllib.request.Request(url = url,headers = headers)
response = urllib.request,urlopen(request)
content = response.read().decode('utf-8')
print(content)
1.5.3 post请求
这里以百度翻译为例子
在我们依次输入s p i d e r的时候,网页发生了很多次请求,其中我们需要找到我们每次输入字母之后的请求,这里是sug,里面有我们要的url,UA等等信息
import urllib.request
import urllib.parse
url = 'https://fanyi.baidu.com/sug'
headers ={
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0'
}
data = {
'kw' : 'spider'
}
# post请求的参数,必须进行编码
data = urllib.parse.urlencode(data).encode('utf-8')
# post请求的参数是不会拼接在url后面的,而是需要放在请求对象定制的参数中
request = urllib.request.Request(url = url,data = data,headers = headers)
# 模拟浏览器向服务器发送请求
response = urllib.request.urlopen((request))
# 获取响应的数据
content = response.read().decode('utf-8')
print(content)
# 这是字符串类型的
# 字符串-》json对象
import json
obj = json.loads(content)
print(obj)
# post请求方式的参数 必须编码 data = urllib.parse.urlencode(data)
# 编码之后必须调用encode方法 data = urllib.parse.urlencode(data).encode('utf-8')
# 参数是放在请求对象定制的方法中 request = urllib.request.Request(url = url,data = data,headers = headers)
1.5.3 post请求之百度翻译详细翻译
在这里找到表单数据,这是我们data里的数据
在找到请求标头,这里的Accept到X-...使我们需要的headers内容
editplus使用
由于data和headers都是字典内容,而我们输入的内容又太多不好一步一步操作,这里推荐使用editplus,详细下载汉化参考EditPlus下载安装和汉化_editplus汉化包-CSDN博客
进入后粘贴内容,按Ctrl+H之后按图输入,点击全部替换就可以得到我们需要的东西了。
import urllib.request
import urllib.parse
import json
url = 'https://fanyi.baidu.com/v2transapi?from=en&to=zh'
headers ={
# 'Accept': '*/*',
# # 'Accept-Encoding': 'gzip, deflate, br',
# # 这一句话必须要注释掉,不然编码会不成功
# 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
# 'Acs-Token': '1709985606131_1710051090138_6eV/ODO/3uTIQ4xMZONKhpt4UQEK7FUwa6f7PP5BN3PTFdLLNg89TOCh7I80d18en8PSVLX37yYqNvDupbLHI5P1kh4zNDC0DCQIHQVzpQVyBcKsiZ5gzmdtfRdMjBsx7OHcjwTQ+Q0tXqRpKycPrG7Jw5S802GJtwGUXEZ1TeEx7YzCHRzkRNUJr7B66s57GAYvSGSfHyhzds8UMMM/rxpiauYmNxy5OEa6ZEUMGKa5B/2urjJlPI/1EppAVWzFEtpaWyr683DluA/JboMCNwS8IAs4FHKA1mr7jf8/LX0m/AyoHCHnN+72ybwp/pOiIawk+thwS3UNf6DbosRI2O/GDXVMUwaUz2OyPpHmVERz2HNlapWZSjzthNBmZt35jAwjgHvzC9USKDSolVrWg28iw04A3MPPiWEw2Kenl/43aZ+RBCy7tuMnO8uyPWKm',
# 'Connection': 'keep-alive',
# 'Content-Length': '152',
# 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Cookie': 'BAIDUID_BFESS=21142DA9C902AF45599C14E458764454:FG=1; BDUSS=RZQ0c0NFFsUEhYcHp0cmFseFMwakR0V2twb29uTURVR3ZofmI1VTl6YW5DdGRsSVFBQUFBJCQAAAAAAQAAAAEAAABV1RsPx7DNvrXEw9TNvgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKd9r2Wnfa9lZU; BDUSS_BFESS=RZQ0c0NFFsUEhYcHp0cmFseFMwakR0V2twb29uTURVR3ZofmI1VTl6YW5DdGRsSVFBQUFBJCQAAAAAAQAAAAEAAABV1RsPx7DNvrXEw9TNvgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKd9r2Wnfa9lZU; ZFY=ilsG5NTXHpfgILIaA988z4S51DtFtE4q:Ab3zQ8c8mK4:C; APPGUIDE_10_7_0=1; REALTIME_TRANS_SWITCH=1; FANYI_WORD_SWITCH=1; HISTORY_SWITCH=1; SOUND_SPD_SWITCH=1; SOUND_PREFER_SWITCH=1; Hm_lvt_64ecd82404c51e03dc91cb9e8c025574=1709976202,1710051080; Hm_lpvt_64ecd82404c51e03dc91cb9e8c025574=1710051080; ab_sr=1.0.1_ODU1MGM0YjI5YzQ4YmRiYTMyNjQxNjI1NWNmZmE4OTZhM2VhZTQzMDY5MWJjNTQ0M2IyNDVjYzBiNGU3YzY3MjUwMTIwYjE2MDZhNWM3NDBhOWViOWVmY2YyYTUyYzc3OGY5YTA1OGY3MGU3ZGIwNDgxOTc4YTQyNGU0YjllZDgzNGNiZTdkODZmNzIwMTlhMTE3ZmFlZWZkMDFjY2RiMTc1NmNhNDljZmYxZWI3NzRiMmY3NGVhYzVhODgyOTI0',
# 'Host': 'fanyi.baidu.com',
# 'Origin': 'https://fanyi.baidu.com',
# 'Referer': 'https://fanyi.baidu.com/',
# 'Sec-Fetch-Dest': 'empty',
# 'Sec-Fetch-Mode': 'cors',
# 'Sec-Fetch-Site': 'same-origin',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0',
# 'X-Requested-With': 'XMLHttpRequest',
# 实际上其中最重要的只有Cookie和UA 其余即使全部注释掉也无所谓
}
data = {
'from': 'en',
'to': 'zh',
'query': 'love',
'transtype': 'realtime',
'simple_means_flag': '3',
'sign': '198772.518981',
'token': '14321551a8edf96a248e6f0299fcc54e',
'domain': 'common',
}
# post请求的参数要求编码
data = urllib.parse.urlencode(data).encode('utf-8')
# 请求对象的定制
request = urllib.request.Request(url = url,data = data,headers = headers)
# 模拟浏览器向服务器发送请求
response = urllib.request.urlopen(request)
# 获取响应的数据
content = response.read().decode('utf-8')
obj = json.loads(content)
print(obj)
1.6 ajax的get请求
(以获取豆瓣电影排行榜数据为例)
我们首先要找到获得排行榜数据的请求
从标头这里获得url和UA
import urllib.request
import urllib.parse
import json
# get 请求
# 我们要获得豆瓣电影的第一页数据 并且保存下来
url = 'https://movie.douban.com/j/chart/top_list?type=24&interval_id=100%3A90&action=&start=0&limit=20'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0',
}
request = urllib.request.Request(url = url,headers = headers)
response = urllib.request.urlopen(request)
content = response.read().decode('utf-8')
# 下载数据到本地
# open方法默认的是使用gbk的编码,如果我们想要保存中文,需要在open方法中指定编码格式为utf-
# fp = open('douban.json','w',encoding = 'utf-8')
# fp.write(content)
with open('douban.json','w',encoding = 'utf-8') as fp:
fp.write(content)
# 两种方法一样
获取前十页的数据
我们首先获得了第一页的url
首先清除所有的请求,之后下滑鼠标,来获得下一页的请求
以此类推获得多组url
# https://movie.douban.com/j/chart/top_list?type=24&interval_id=100%3A90&action=&
# start=0&limit=20
# https://movie.douban.com/j/chart/top_list?type=24&interval_id=100%3A90&action=&
# start=20&limit=20
# https://movie.douban.com/j/chart/top_list?type=24&interval_id=100%3A90&action=&
# start=40&limit=20
这里以三组为例,但是我们已经能看出来有什么顺序
import urllib.request
import urllib.parse
def create_request(page):
base_url = 'https://movie.douban.com/j/chart/top_list?type=24&interval_id=100%3A90&action=&'
data = {
'start': (page-1) * 20,
'limit': 20
}
data = urllib.parse.urlencode(data)
url = base_url + data
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0',
}
request = urllib.request.Request(url = url, headers = headers)
return request
def get_content(request):
response = urllib.request.urlopen(request)
content = response.read().decode('utf-8')
return content
def down_load(page,content):
# 这里要和字符串拼接,加号两边必须都是字符串
with open('douban_' + str(page) + '.json','w',encoding = 'utf-8') as fp:
fp.write(content)
# 一个程序的入口(main函数)
if __name__ == '__main__':
start_page = int(input('起始的页码'))
end_page = int(input('结束的页码'))
# for遍历是左闭右开,所以右边要加一
for page in range(start_page,end_page+1):
# 每一页都有自己请求对象的定制
request = create_request(page)
# 获取响应的数据
content = get_content(request)
# 下载
down_load(page,content)
请求肯德基官网
# X-Requested-With:XMLHttpRequest
# 一般有这个就代表是ajax请求 这是ajax的核心对象
# 1.2页这些数据都一致
# https://www.kfc.com.cn/kfccda/ashx/GetStoreList.ashx?op=cname
# post
# 这些是差别
# cname: 北京
# pid:
# pageIndex: 1
# pageSize: 10
# cname: 北京
# pid:
# pageIndex: 2
# pageSize: 10
import urllib.request
import urllib.parse
import json
def create_request(page):
base_url = 'https://www.kfc.com.cn/kfccda/ashx/GetStoreList.ashx?op=cname'
data = {
'cname': '北京',
'pid': '',
'pageIndex': page,
'pageSize': '10',
}
data = urllib.parse.urlencode(data).encode('utf-8')
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0',
}
request = urllib.request.Request(url = base_url,headers = headers,data = data,)
return request
def get_content(request):
response = urllib.request.urlopen(request)
content = response.read().decode('utf-8')
return content
def down_load(page,content):
with open('KFC' + str(page) + '.json','w',encoding = 'utf-8') as fp:
fp.write(content)
if __name__ == '__main__':
start_page = int(input('请输入开始'))
end_page = int(input('请输入结束'))
for page in range(start_page,end_page):
# 请求对象的定制
request = create_request(page)
# 获取内容数据
content = get_content(request)
# 下载
down_load(page,content)
1.7 URLError & HTTPError
- URLError类是HTTPError类的子类
- 导入的包 urllib.error.HTTPError urllib.error.URLError
- http错误:是针对浏览器无法连接到服务器而增加出来的错误提示。引导并告诉浏览者是哪里出了问题。
- 通过urllib发送请求的时候,有可能会发送失败,这个时候如果想让你的代码更加健硕,可以通过try-except进行捕获异常,异常有两类,HTTPError&URLError
HTTPError
import urllib.request
url = 'https://blog.csdn.net/Zombie166/article/details/1365403481'
# 以本网站为例,原本是https://blog.csdn.net/Zombie166/article/details/136540348,这里在最后加了一个1,结果就会报出以下错误
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0',
}
request = urllib.request.Request(url = url,headers = headers)
response = urllib.request.urlopen(request)
content = response.read().decode('utf-8')
print(content)
import urllib.request
import urllib.error
url = 'https://blog.csdn.net/Zombie166/article/details/1365403481'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0',
}
try:
request = urllib.request.Request(url = url,headers = headers)
response = urllib.request.urlopen(request)
content = response.read().decode('utf-8')
print(content)
except urllib.error.HTTPError:
print('系统出错,正在升级。。。。')
URLError
import urllib.request
import urllib.error
# url = 'https://blog.csdn.net/Zombie166/article/details/1365403481'
# 一般URLError是主机地址和参数这边出了问题
url = 'http://nihao.com'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0',
}
try:
request = urllib.request.Request(url = url,headers = headers)
response = urllib.request.urlopen(request)
content = response.read().decode('utf-8')
print(content)
except urllib.error.HTTPError:
print('系统出错,正在升级。。。。')
except urllib.error.URLError:
print('系统正在升级。。。')
1.8 微博的Cookie登录
# 这里拿微博主页测试
# 适用的场景: 数据采集的时候,需要绕过登录,然后进入某个页面
import urllib.request
url = 'https://weibo.com/u/5530696707'
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0'
}
request = urllib.request.Request(url = url, headers = headers)
response = urllib.request.urlopen(request)
content = response.read().decode('utf-8')
with open('weibo.html','w',encoding = 'utf-8')as fp:
fp.write(content)
# 个人信息页面是utf-8,但是还是报错了编码错误,因为并没有进入到个人信息页面,而是跳转到了登录页面
# 登录页面不是utf-8,所以报错
检查发现登录网页的编码是gb2312
# 这里拿微博主页测试
# 适用的场景: 数据采集的时候,需要绕过登录,然后进入某个页面
# 登录页面的编码是'gb2312'
import urllib.request
url = 'https://weibo.com/u/5530696707'
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0'
}
request = urllib.request.Request(url = url, headers = headers)
response = urllib.request.urlopen(request)
content = response.read().decode('gb2312')
with open('weibo.html','w',encoding = 'gb2312')as fp:
fp.write(content)
但是这样进入的只是登录页面,而我们需要绕过登录页面
# 这里拿微博主页测试
# 适用的场景: 数据采集的时候,需要绕过登录,然后进入某个页面
# 登录页面的编码是'gb2312'
# 什么情况下访问不成功??
# 因为请求头的信息不够,所以访问不成功
import urllib.request
url = 'https://weibo.cn/5530696707/info'
headers = {
# ':authority': 'weibo.cn',
# ':method': 'GET',
# ':path': '/5530696707/info',
# ':scheme': 'https',
# 带冒号的和 encoding 没有影响 注释掉
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
# 'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
'Cookie': 'SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9Wh.ybNaBNFIEk7mdjdN1dMm5JpX5KzhUgL.Fo-fe05c1KqNehM2dJLoIp7LxKML1KBLBKnLxKqL1hnLBoMfSKe7So.cS05N; _T_WM=0e3ca79a756ae4e188d7cad69e8cef14; SUB=_2A25I62f4DeRhGeNL6FIX-SjLyzuIHXVrieUwrDV6PUJbkdANLWukkW1NSQx3dD0dsdxF2yCz-0vF05XlyEg2XKNV; SCF=AgVRogNKqvqwWzxIY91fCaj26nkvKrjEeIVdCTYUt9RrOF4CfhHcQDw0cu1nl68LZT1v-J-BxibPSDSAfFvnSDs.; SSOLoginState=1710167976',
# cookie 中携带着你的登录信息,如果有登陆之后的Cookie 那么我们可以携带着Cookie进入登录后的任何页面
'Referer': 'https://weibo.cn/',
# referer 是判断当前路径是否由上一个路径(url)进来的 一般情况下做图片防盗链
'Sec-Ch-Ua': '"Not_A Brand";v="8", "Chromium";v="120", "Microsoft Edge";v="120"',
'Sec-Ch-Ua-Mobile': '?0',
'Sec-Ch-Ua-Platform': '"Windows"',
'Sec-Fetch-Dest': 'document',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-Site': 'same-origin',
'Sec-Fetch-User': '?1',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0',
}
request = urllib.request.Request(url = url, headers = headers)
response = urllib.request.urlopen(request)
# 登录之后的页面编码是 utf-8
content = response.read().decode('utf-8')
with open('weibo.html','w',encoding = 'utf-8')as fp:
fp.write(content)
1.9 handler处理器
# urllib.request.urlopen(url)
# 不能定制请求头
# urllib.request.Request(url,headers,data)
# 可以定制请求头
# Handler
# 定制更高级的请求头(随着业务逻辑的复杂,请求对象的定制已经不能满足我们的需求:动态Cookie和代理不能使用请求对象的定制)
# 使用Handler来访问百度获取网页源码
import urllib.request
url = 'http://www.baidu.com'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0',
}
request = urllib.request.Request(url = url,headers = headers)
# handler build_open open
# 1.获取handler对象
handler = urllib.request.HTTPHandler()
# 2.获取opener对象
opener = urllib.request.build_opener(handler)
# 3.调用open方法
response = opener.open(request)
content = response.read().decode('utf-8')
print(content)
1.10 代理服务器
代理的常用功能
- 突破自身IP访问限制,访问国外站点
- 访问一些单位或者团体内部资源
- 提高访问速度,隐藏真实IP
- 代码配置代理
创建
这里我们用到新函数
handler = urllib.request.ProxyHandler()
按住Ctrl点击ProxyHandler可以查看其信息
这里就是代理
这里我们可以上网搜索“快代理(免费私密代理IP_IP代理_HTTP代理 - 快代理 (kuaidaili.com))”有免费代理可以供我们测试,使用方法如下:
proxies = {
# 'key':'value'
'http':'114.237.202.212:15646'
}
handler = urllib.request.ProxyHandler(proxies=proxies)
import urllib.request
url = 'https://cn.bing.com/search?q=ip'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0',
}
request = urllib.request.Request(url = url,headers = headers)
proxies = {
# 'key':'value'
'http':'114.237.202.212:15646'
}
# handler build_opener open
handler = urllib.request.ProxyHandler(proxies=proxies)
opener = urllib.request.build_opener(handler)
response = opener.open(request)
content = response.read().decode('utf-8')
with open('daili.html','w',encoding = 'utf-8')as fp:
fp.write(content)
标签:headers,Python,request,爬虫,urllib,content,url,response
From: https://blog.csdn.net/Zombie166/article/details/136540348