首页 > 编程语言 >python爬虫(四):文本、图片、视频爬取实例

python爬虫(四):文本、图片、视频爬取实例

时间:2023-04-12 13:02:54浏览次数:55  
标签:chapter name get python 爬虫 爬取 url html import


上篇讲了常用的python爬虫工具,可以快速支撑我们数据的爬取--解析--分析,这里将拆解几个爬虫程序实例进行学习,实例来自于https://cuijiahua.com/blog/2020/04/spider-6.html的系列教程或者其他小爬虫;

一、文本

图表数据抓取(编程语言排名)

#!/usr/bin/env python
#coding:utf-8
import requests
from requests.exceptions import RequestException
from lxml import etree
from lxml.etree import ParseError
import json

def one_to_page(html):
    headers={
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36'
    }
    try:
        response=requests.get(html,headers=headers)
        body=response.text  #获取网页内容
    except RequestException as e:
        print('request is error!',e)
    try:
        html=etree.HTML(body,etree.HTMLParser())  #解析HTML文本内容
        result=html.xpath('//table[contains(@class,"table-top20")]/tbody/tr//text()') #获取列表数据
        pos = 0
        for i in range(20):
            if i == 0:
                yield result[i:5]
            else:
                yield result[pos:pos+5]  #返回排名生成器数据
            pos+=5
    except ParseError as e:
         print(e.position)


def write_file(data):   #将数据重新组合成字典写入文件并输出
    for i in data:
        sul={
            '2018年6月排行':i[0],
            '2017年6排行':i[1],
            '开发语言':i[2],
            '评级':i[3],
            '变化率':i[4]
        }
        with open('test.txt','a',encoding='utf-8') as f:
            f.write(json.dumps(sul,ensure_ascii=False) + '\n') #必须格式化数据
            f.close()
        print(sul)
    return None


def main():
    url='https://www.tiobe.com/tiobe-index/'
    data=one_to_page(url)
    revaule=write_file(data)
    if revaule == None:
        print('ok')
        
        
if __name__ == '__main__':
    main()

纯文本数据(小说):

import requests
import time
from tqdm import tqdm
from bs4 import BeautifulSoup

def get_content(target):
    req = requests.get(url = target)
    req.encoding = 'utf-8'
    html = req.text
    bf = BeautifulSoup(html, 'lxml')
    texts = bf.find('div', id='content')
    content = texts.text.strip().split('\xa0'*4)
    return content

if __name__ == '__main__':
    server = 'https://www.xsbiquge.com'
    book_name = '诡秘之主.txt'
    target = 'https://www.xsbiquge.com/15_15338/'
    req = requests.get(url = target)
    req.encoding = 'utf-8'
    html = req.text
    chapter_bs = BeautifulSoup(html, 'lxml')
    chapters = chapter_bs.find('div', id='list')
    chapters = chapters.find_all('a')
    for chapter in tqdm(chapters):
        chapter_name = chapter.string
        url = server + chapter.get('href')
        content = get_content(url)
        with open(book_name, 'a', encoding='utf-8') as f:
            f.write(chapter_name)
            f.write('\n')
            f.write('\n'.join(content))
            f.write('\n')

二、图片

import requests
import os
import re
from bs4 import BeautifulSoup
from contextlib import closing
from tqdm import tqdm
import time

"""
    Author:
        Jack Cui
    Wechat:
        https://mp.weixin.qq.com/s/OCWwRVDFNslIuKyiCVUoTA
"""

# 创建保存目录
save_dir = '妖神记'
if save_dir not in os.listdir('./'):
    os.mkdir(save_dir)
    
target_url = "https://www.dmzj.com/info/yaoshenji.html"

# 获取动漫章节链接和章节名
r = requests.get(url = target_url)
bs = BeautifulSoup(r.text, 'lxml')
list_con_li = bs.find('ul', class_="list_con_li")
cartoon_list = list_con_li.find_all('a')
chapter_names = []
chapter_urls = []
for cartoon in cartoon_list:
    href = cartoon.get('href')
    name = cartoon.text
    chapter_names.insert(0, name)
    chapter_urls.insert(0, href)

# 下载漫画 
for i, url in enumerate(tqdm(chapter_urls)):
    download_header = {
        'Referer': url
    }
    name = chapter_names[i]
    # 去掉.
    while '.' in name:
        name = name.replace('.', '')
    chapter_save_dir = os.path.join(save_dir, name)
    if name not in os.listdir(save_dir):
        os.mkdir(chapter_save_dir)
        r = requests.get(url = url)
        html = BeautifulSoup(r.text, 'lxml')
        script_info = html.script
        pics = re.findall('\d{13,14}', str(script_info))
        for j, pic in enumerate(pics):
            if len(pic) == 13:
                pics[j] = pic + '0'
        pics = sorted(pics, key=lambda x:int(x))
        chapterpic_hou = re.findall('\|(\d{5})\|', str(script_info))[0]
        chapterpic_qian = re.findall('\|(\d{4})\|', str(script_info))[0]
        for idx, pic in enumerate(pics):
            if pic[-1] == '0':
                url = 'https://images.dmzj.com/img/chapterpic/' + chapterpic_qian + '/' + chapterpic_hou + '/' + pic[:-1] + '.jpg'
            else:
                url = 'https://images.dmzj.com/img/chapterpic/' + chapterpic_qian + '/' + chapterpic_hou + '/' + pic + '.jpg'
            pic_name = '%03d.jpg' % (idx + 1)
            pic_save_path = os.path.join(chapter_save_dir, pic_name)
            with closing(requests.get(url, headers = download_header, stream = True)) as response:  
                chunk_size = 1024  
                content_size = int(response.headers['content-length'])  
                if response.status_code == 200:
                    with open(pic_save_path, "wb") as file:  
                        for data in response.iter_content(chunk_size=chunk_size):  
                            file.write(data)  
                else:
                    print('链接异常')
        time.sleep(10)

三、视频

import os
import ffmpy3
import requests
from bs4 import BeautifulSoup
from multiprocessing.dummy import Pool as ThreadPool

search_keyword = '越狱第一季'
search_url = 'http://www.jisudhw.com/index.php'
serach_params = {
    'm': 'vod-search'
}
serach_headers = {
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36',
    'Referer': 'http://www.jisudhw.com/',
    'Origin': 'http://www.jisudhw.com',
    'Host': 'www.jisudhw.com'
}
serach_datas = {
    'wd': search_keyword,
    'submit': 'search'
}


video_dir = ''
    
r = requests.post(url=search_url, params=serach_params, headers=serach_headers, data=serach_datas)
r.encoding = 'utf-8'
server = 'http://www.jisudhw.com'
search_html = BeautifulSoup(r.text, 'lxml')
search_spans = search_html.find_all('span', class_='xing_vb4')
for span in search_spans:
    url = server + span.a.get('href')
    name = span.a.string
    print(name)
    print(url)
    video_dir = name
    if name not in os.listdir('./'):
        os.mkdir(name)
        
    detail_url = url
    r = requests.get(url = detail_url)
    r.encoding = 'utf-8'
    detail_bf = BeautifulSoup(r.text, 'lxml')
    num = 1
    serach_res = {}
    for each_url in detail_bf.find_all('input'):
        if 'm3u8' in each_url.get('value'):
            url = each_url.get('value')
            if url not in serach_res.keys():
                serach_res[url] = num
            print('第%03d集:' % num)
            print(url)
            num += 1

def downVideo(url):
    num = serach_res[url]
    name = os.path.join(video_dir, '第%03d集.mp4' % num)
    ffmpy3.FFmpeg(inputs={url: None}, outputs={name:None}).run()
            
# 开8个线程池
pool = ThreadPool(8)
results = pool.map(downVideo, serach_res.keys())
pool.close()
pool.join()

标签:chapter,name,get,python,爬虫,爬取,url,html,import
From: https://blog.51cto.com/u_16057845/6185491

相关文章

  • python中的pandas小试
    在实习的项目中,得到宽表后的后续工作是用R语言写的,包括数据探索,数据清洗,建模,模型分析。因为之前用过python,写过简单爬虫,就想着自己试着将R语言的脚本写成python,或许对于未来有帮组、然而,在pyhon3.5连接teradata的问题上一直搞不通、、、所以,只能先学一下pandas之类的基础了,本来想法......
  • Python script get date and time All In One
    PythonscriptgetdateandtimeAllInOnePythonshellscriptprintcurrentdatetimetologfile#✅......
  • python - html转pdf
    1.安装pdfkitpip3installpdfkit2.安装wkhtmltopdf下载wkhtmltopdf安装包https://wkhtmltopdf.org/downloads.html安装后在系统Path添加wkhtmltopdf的bin路径3.简单使用将本地html转pdfimportpdfkit#如果html里引用了外部的文件,则需要添加以下参数wkhtmltopdf_o......
  • windows环境下python3安装Crypto
    pycrypto、pycrytodome和crypto是一个东西,crypto在python上面的名字是pycrypto,它是一个第三方库,但是已经停止更新三年了,所以不建议安装这个库;这个时候pycryptodome就来了,它是pycrypto的延伸版本,用法和pycrypto是一模一样的;所以,我现在告诉大家一种解决方法--直接安装:pipinstallpy......
  • Python中tqdm模块进度条
    tqdm模块简单介绍tqdm是一个可以显示Python进度条的模块,可通过pip在终端安装pipinstalltqdmtqdm.tqdmfromtqdmimporttqdmforiintqdm(range(1,5)):print(i)或传入字符串list:fromtqdmimporttqdmforiintqdm(["a","b","c"]):print(i)tqdm......
  • Python爬虫之循环爬取多个网页
    之前的文中介绍了如何获取给定网址的网页信息,并解析其中的内容。本篇将更进一步,根据给定网址获取并解析给定网址及其相关联网址中的内容。要实现这些功能,我们需要解决以下问题:1、如何持续不断的获取url,并读取相关内容。2、如何判断网址是否已经读取过。文中用到的代码均已上传......
  • linux安装两个python版本
    1.下载python3安装包wgethttps://www.python.org/ftp/python/3.7.2/Python-3.7.2.tgz2.解压python的tgz压缩包文件tar-xzvfPython-3.7.2.tgz3.进入解压的文件cdPython-3.7.24.在python文件路径下编译pythonprefix=/usr/local/python37,指定python安装路径,这个路径......
  • 全网最详细中英文ChatGPT-GPT-4示例文档-场景问题智能生成从0到1快速入门——官网推荐
    目录Introduce简介setting设置Prompt提示Sampleresponse回复样本APIrequest接口请求python接口请求示例node.js接口请求示例curl命令示例json格式示例其它资料下载ChatGPT是目前最先进的AI聊天机器人,它能够理解图片和文字,生成流畅和有趣的回答。如果你想跟上AI时代的潮流......
  • 全网最详细中英文ChatGPT-GPT-4示例文档-智能多功能学习机从0到1快速入门——官网推荐
    目录Introduce简介setting设置Prompt提示Sampleresponse回复样本APIrequest接口请求python接口请求示例node.js接口请求示例curl命令示例json格式示例其它资料下载ChatGPT是目前最先进的AI聊天机器人,它能够理解图片和文字,生成流畅和有趣的回答。如果你想跟上AI时代的潮流......
  • 全网最详细中英文ChatGPT-GPT-4示例文档-智能评论创建从0到1快速入门——官网推荐的48
    目录Introduce简介setting设置Prompt提示Sampleresponse回复样本APIrequest接口请求python接口请求示例node.js接口请求示例curl命令示例json格式示例其它资料下载ChatGPT是目前最先进的AI聊天机器人,它能够理解图片和文字,生成流畅和有趣的回答。如果你想跟上AI时代的潮流......