requests高级用法
https和http的区别
- https = http + ssl 或者 tsl(ssl或tsl是加密的证书)
- 注意:没有认证的机构就是没有签发证书,访问的时候,浏览器会提示不安全的。
ssl认证(了解)
-
不认证证书
# ssl认证 import requests respone = requests.get('https://www.12306.cn', verify=False) # 不验证证书,不警告就返回200 print(respone.status_code) # 200
-
手动携带证书访问
# 手动携带证书访问(携带证书以及证书的秘钥) import requests respone = requests.get('https://www.12306.cn', cert=('/path/server.crt', '/path/key')) print(respone.status_code)
使用代理(重要)
-
频率限制:
- 访问次数过多就会封账号。通过ip或用户id限制爬虫用户,所以爬虫就要避免。
- 可以通过代理ip来解决封真正的ip。
- 通过注册很多小号也可以避免。
-
什么是代理?
- 正向代理:代理客户端(爬虫)
- 反向代理:代理服务端(nginx)
-
使用ip代理发送请求
import requests proxies = { 'http':'192.168.10.102:9003' } respon = requests.get('https://www.baidu.com', proxies=proxies) print(respon.text)
超时设置
respone = requests.get('https://www.baidu23.com', timeout=3)
print(respone) # 访问不存在的网址,超时报错
异常处理设置
import requests
from requests.exceptions import * # 可以查看requests.exception获取异常类型
try:
r = requests.get('https://www.cnblogs.com/bjyxxc/p/16913776.html', timeout=0.0010)
except ReadTimeout:
print('读取超时:')
except ConnectTimeout:
print('连接超时:') # 网络不通
except Timeout:
print('超时:')
上传文件
import requests
files = {'file':open('users.txt', 'rb')} # 'file'是请求数据的key值
respone = requests.post('http://httpbin.org/post', files=files)
print(respone.text)
代理池搭建
-
可以在gitthub上面拉取开源的代理池代码在本地运行。
https://github.com/jhao104/proxy_pool
-
爬虫技术:爬取免费的代理网站,获取免费的代理。验证过后保存到本地。
-
flask技术:搭建一个web后端,访问某个接口就可以随机返回一个可用的代理地址。
搭建步骤
-
git clone https://github.com/jhao104/proxy_pool.git
-
创建虚拟环境b并安装依赖:
pip install -r requirements.txt
-
修改配置文件
settings.py
-
# 配置API服务 HOST = "0.0.0.0" # IP PORT = 5000 # 监听端口 # 配置数据库 DB_CONN = 'redis://127.0.0.1:6379/0' # 配置 ProxyFetcher PROXY_FETCHER = [ "freeProxy01", "freeProxy02", ] 4 启动爬虫,启动web服务 # 启动调度程序 python proxyPool.py schedule # 启动webApi服务 python proxyPool.py server 5 随机获取ip 127.0.0.1:5000/get
-
开启redis服务
-
-
新建一个文件
import requests # http://127.0.0.1:5010/get/ # 获取一个随机ip res = requests.get('http://127.0.0.1:5000/get/').json() if res['https']: http = 'https' else: http = 'http' proxie = { http: res['proxy'] } print(proxie) res = requests.get('https://www.cnblogs.com/liuqingzheng/p/16005896.html', proxies=proxie) print(res.status_code)
django后端获取客户端的ip
-
写一个能返回用户ip的django程序
def ip_test(request): # 获取客户端ip ip=request.META.get('REMOTE_ADDR') return HttpResponse('您的ip是:%s'%ip)
-
部署在云服务器中
-
本地使用requests + 代理访问,查看是否访问代理的ip地址
import requests res = requests.get('http://127.0.0.1:5010/get/').json() if res['https']: http = 'https' else: http = 'http' proxie = { http: http+'://'+res['proxy'] } print(proxie)
-
服务器部署到本地是访问不到的,需要用到内网穿透或者部署在服务器上面
# res = requests.get('http://192.168.1.143:8000/ip/', proxies=proxie) # res = requests.get('https://46b3k95600.zicp.fun/ip/', proxies=proxie) # 不生效 res = requests.get('http://101.133.225.166/ip/', proxies=proxie) print(res.text)
-
如果代理不可用就不用代理了,就用自己本地的ip去访问本地的资源。
爬取某视频网站
- requests爬取多个网址回来是没办法解析的,所以我们可以用re正则匹配模块去做。
- 爬取视频
代码
import requests
import re
res = requests.get('https://www.pearvideo.com/category_loading.jsp?reqType=5&categoryId=4&start=20')
print(res.text)
# 使用正则,解析出该页面真牛逼个出现的所有视频地址
video_list = re.findall(' <a href="(.*?)" class="vervideo-lilink actplay">', res.text)
# print(video_list)
for video in video_list:
# 爬到视频所在的网页:
# video_url = 'https://www.pearvideo.com/' + video
# print(video_url)
# res = requests.get(video_url)
# print(res.text)
# 获取视频并且保存
video_id = video.split('_')[-1] # 获取视频id
header = {
'Referer': 'https://www.pearvideo.com/%s' % video,
}
print(video_id)
# 我们无法从网页获取视频因为该视频是前端通过ajax朝后端获取的视频,我们请求的时候无法执行js代码,所以我们可以找到浏览器发送js的网址
res1 = requests.get('https://www.pearvideo.com/videoStatus.jsp?contId=%s&mrd=0.37325969925271907' % video_id,
headers=header).json()
print(res1)
real_mp4_url = res1['videoInfo']['videos']['srcUrl']
real_mp4_url = real_mp4_url(real_mp4_url.rsplit('/', 1)[-1].split('-')[0], 'cont-%s' % video_id)
print(real_mp4_url)
res = requests.get(real_mp4_url)
with open('./video/%s.mp4' % video_id, 'wb') as f:
for line in res.iter_content():
f.write(line)
爬取新闻
- 用 requests + BeautifulSoup4(解析库:bs4、xml)
代码
import requests
# 解析库:bs4 pip install beautifulsoup4
from bs4 import BeautifulSoup
res = requests.get('https://www.autohome.com.cn/news/1/#liststart')
print(res.text) # 从返回的html中国查找,bs是解析html、xml格式的
soup = BeautifulSoup(res.text, 'html.parser')
# 查找:类名等于article的url标签
ul_list = soup.find_all(name='ul', class_='article')
print(len(ul_list)) # 4 个ul取出来了
for ul in ul_list:
# 找到ul下所有li的标签
li_list = ul.find_all(name='li')
for li in li_list:
h3 = li.find(name='h3')
if h3: # 获取h3标签的文本内容
title = h3.text
desco = li.find(name='p').text
url = 'https:' + li.find(name='a').attrs.get('href')
img = li.find(name='img').attrs.get('src')
if not img.startswith('http'):
img = 'https:' + img
print('''
文章标题:%s
文章摘要:%s
文章地址:%s
文章图片:%s
''' % (title, desco, url, img))
# 把数据库保存到mysql:创建库,创建表,pymysql、insert ===》conn.commit()
BautifulSoup4 介绍
- Beautiful Sooup 是一个可以从HTML或XML文件中提取数据的Python库
- 下载:
pip install BeautifulSoup4
- 解析库的解释
BeautifulSoup('要解析的内容:xml格式字符串', "html.parser") #内置解析库html.parser
BeautifulSoup('要解析的内容:xml格式字符串', "lxml") # 速度快 必须要装lxml pip install lxml
bs4 遍历文档树
from bs4 import BeautifulSoup
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title" id='id_p' name='lqz' xx='yy'>lqz is handsome <b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
soup = BeautifulSoup(html_doc, 'lxml')
# 基本用法===》直接.标签名字
res = soup.title
print(res)
res = soup.a
print(res)
res = soup.head.title
print(res)
# 获取标签的名称
# 拿到的所有的标签都是一个对象,tag对象,bs4.element.Tag
res = soup.head.title
print(res.name)
res = soup.body
print(res.name)
# 获取标签的属性
res = soup.p
print(res.text) # 把该标签下的所有的子孙内容都获取,拼接成字符串。
print(res.string) # None 必须该标签没有子标签的时候才能显示该标签下内容。
print(list(res.strings)) # generator 生成器,把该标签下的子孙的文本内容放到生成器中。
# 嵌套选择
res = soup.html.body.a
print(res.text)
# 子节点,子孙节点
print(soup.p.contents) # p下所有子节点
print(soup.p.children) # 得到一个迭代器,包含p下所有子节点
# 父节点、祖父节点
print(soup.a.parent) # 获取a标签下的父节点
print(list(soup.a.parent)) # 找到a标签所有的祖父节点,父亲的父亲,父亲的父亲的父亲...
# 兄弟节点
print(soup.a.next_sibling) # 下一个兄弟
print(soup.a.previous_sibling) # 上个兄弟
print('-----')
print(list(soup.a.next_sibling)) # 下面的兄弟们=》生成器对象
print('=====')
print(list(soup.a.previous_sibling)) # 上面的兄弟们=》生成器对象
多线程下载视频
from threading import Thread
import requests
import re
# 使用正则,解析出该页面真牛逼个出现的所有视频地址
def task(video_list):
# print(video_list)
for video in video_list:
# print(video)
# break
# 爬到视频所在的网页:
# video_url = 'https://www.pearvideo.com/' + video
# print(video_url)
# res = requests.get(video_url)
# print(res.text)
# 获取视频并且保存
video_id = video.split('_')[-1] # 获取视频id
header = {
'Referer': 'https://www.pearvideo.com/%s' % video,
'Cookies': 'aliyungf_tc=83abed4abffbc2b9674b0fc3dc173f99a0902275195cb8ef0a04f511c55cfc85; JSESSIONID=2AE58096A977F7492D6B7D720B0C44FE; PEAR_UUID=ff4a40a4-f7fc-4992-b2f8-404db41719b8; _uab_collina=166928079680742123446416; Hm_lvt_9707bc8d5f6bba210e7218b8496f076a=1669280797; p_h5_u=FA253F14-C2C6-4D4F-89A0-38D2EED92EA9; acw_tc=2f6fc10116692941898981859e4c6454cffcf5eb0a8c95e6836ef49f734026; Hm_lpvt_9707bc8d5f6bba210e7218b8496f076a=1669294209; SERVERID=bacac21aafa9027952fdc46518c0c74f|1669294225|1669280766'
}
print(video_id)
# 我们无法从网页获取视频因为该视频是前端通过ajax朝后端获取的视频,我们请求的时候无法执行js代码,所以我们可以找到浏览器发送js的网址
res1 = requests.get('https://www.pearvideo.com/videoStatus.jsp?contId=%s&mrd=0.37325969925271907' % video_id,
headers=header).json()
print(res1)
# break
real_mp4_url = res1['videoInfo']['videos']['srcUrl']
real_mp4_url = real_mp4_url.replace(real_mp4_url.rsplit('/', 1)[-1].split('-')[0], 'cont-%s' % video_id)
print(real_mp4_url)
res = requests.get(real_mp4_url)
with open('./video1/%s.mp4' % video_id, 'wb') as f:
for line in res.iter_content():
f.write(line)
if __name__ == '__main__':
res2 = requests.get('http://127.0.0.1:5000/get/').json()
if res2['https']:
http = 'https'
else:
http = 'http'
proxie = {
http: res2['proxy']
}
res = requests.get('https://www.pearvideo.com/category_loading.jsp?reqType=5&categoryId=4&start=20',
proxies=proxie)
# res = requests.get('https://www.pearvideo.com/category_loading.jsp?reqType=5&categoryId=4&start=20')
# print(res.text)
cookies = res.cookies
video_list = re.findall(' <a href="(.*?)" class="vervideo-lilink actplay">', res.text)
video_len = int(len(video_list) / 4)
print(video_len)
for i in range(4):
if i == 4:
t = Thread(target=task, args=(video_list[video_len*3:], ))
t.start()
t = Thread(target=task, args=(video_list[i*video_len: (i+1)*video_len], ))
t.start()
爬新闻放进数据库
import requests
# 解析库:bs4 pip install beautifulsoup4
from bs4 import BeautifulSoup
import pymysql
# 1.链接服务端
conn = pymysql.connect(
host='127.0.0.1', # 公网或者私网IP
port=3306,
user='root',
password='123',
database='patu',
charset='utf8mb4',
autocommit=True # 执行增、删、改操作自动执行conn.commit
)
res = requests.get('https://www.autohome.com.cn/news/1/#liststart')
#print(res.text) # 从返回的html中国查找,bs是解析html、xml格式的
soup = BeautifulSoup(res.text, 'html.parser')
# 查找:类名等于article的url标签
ul_list = soup.find_all(name='ul', class_='article')
print(ul_list)
#print(len(ul_list)) # 4 个ul取出来了
for ul in ul_list:
# 找到ul下所有li的标签
li_list = ul.find_all(name='li')
for li in li_list:
h3 = li.find(name='h3')
if h3: # 获取h3标签的文本内容
title = h3.text
desco = li.find(name='p').text
url = 'https:' + li.find(name='a').attrs.get('href')
img = li.find(name='img').attrs.get('src')
if not img.startswith('http'):
img = 'https:' + img
print('''
文章标题:%s
文章摘要:%s
文章地址:%s
文章图片:%s
''' % (title, desco, url, img))
# 2.产生一个游标对象(等待输入命令)
cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)
# 3.编写sql语句
sql1 = f"insert into patutu(title, desco, url, img) value ('{title}', '{desco}', '{url}', '{img}');"
# 4.发送给服务端
rest = cursor.execute(sql1)
#print(rest) #受影响的行数
conn.commit()
# # 5.获取命令的执行结果
# res = cursor.fetchall()
# cursor.scroll(0, mode='absolute')
# res = cursor.fetchone()
# print(res)
标签:get,python,res,学习,video,https,print,requests,Day92
From: https://www.cnblogs.com/bjyxxc/p/16923742.html