一个比较简单的爬虫源码,爬取 gk-design 网站作品信息,包括图片及文字内容信息,几乎没有限制,适合新人学习练手使用,文末附上源码供参考学习。
小日子的网站随便爬,加大力度,使劲搞,适合 Python 爬虫新人练手使用和学习,如果你正在找练手网站,不妨尝试爬取下载数据。
这里分享几个简单的数据整理:
- 标题获取及处理 使用了 split() 函数获取标题内容数据。
h1=tree.xpath('//title/text()')[0]
h1=h1.split('|')[0]
h1=h1.strip()
pattern = r"[\/\\\:\*\?\"\<\>\|]"
h1=re.sub(pattern, "_", h1) # 替换为下划线
print(h1)
- 图片数据添加 使用了 insert() 函数添加图片到列表头部。
img=tree.xpath('//section[@class="cover"]/div/img/@data-src')[0]
#print(img)
imgs=tree.xpath('//section[@class="gallery"]/div[@class="frame"]/img/@data-src')
imgs.insert(0,img)
print(len(imgs))
print(imgs)
附完整源码参考:
# -*- coding: UTF-8 -*-
# @公众号:eryeji
# https://www.gk-design.co.jp/works/product-design/
import requests
from lxml import etree
import time
import random
import re
import threading
import os
def get_ua():
ua_list = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36Chrome 17.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0Firefox 4.0.1',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11',
]
ua=random.choice(ua_list)
return ua
def get_hrefs():
url="https://www.gk-design.co.jp/works/product-design/"
headers={
"User-Agent":get_ua()
}
response=requests.get(url=url,headers=headers,timeout=6)
print(response.status_code)
html = response.content.decode('utf-8')
#print(html)
tree = etree.HTML(html)
hrefs = tree.xpath('//ul[@class="works_list list clearfix"]/li/a/@href')
print(len(hrefs))
print(hrefs)
for href in hrefs:
get_detail(href)
time.sleep(3)
def get_detail(href):
headers = {
"User-Agent": get_ua()
}
response = requests.get(url=href, headers=headers, timeout=6)
print(response.status_code)
html = response.content.decode('utf-8')
#print(html)
tree = etree.HTML(html)
h1=tree.xpath('//title/text()')[0]
h1=h1.split('|')[0]
h1=h1.strip()
pattern = r"[\/\\\:\*\?\"\<\>\|]"
h1=re.sub(pattern, "_", h1) # 替换为下划线
print(h1)
path = f'{h1}/'
os.makedirs(path, exist_ok=True)
print(f">> 生成保存目录 {h1} 文件夹成功!")
ptexts=tree.xpath('//div[@class="row clearfix"]//text()')
ptext=''.join(ptexts)
print(ptext)
with open(f'{path}{h1}.txt','w',encoding='utf-8') as f:
f.write(f'{h1}\n{ptext}')
print(f">> 保存 {h1}.txt 文件成功!")
img=tree.xpath('//section[@class="cover"]/div/img/@data-src')[0]
#print(img)
imgs=tree.xpath('//section[@class="gallery"]/div[@class="frame"]/img/@data-src')
imgs.insert(0,img)
print(len(imgs))
print(imgs)
down_imgs(path, imgs)
# 3次重试
def get_resp(url):
i = 0
while i < 4:
try:
headers = {
"User-Agent":get_ua()
}
response = requests.get(url, headers=headers, timeout=10)
print(response.status_code)
return response
except requests.exceptions.RequestException:
i += 1
print(f">> 获取网页出错,6S后将重试获取第:{i} 次")
time.sleep(i * 2)
def down_imgs(path,imgs):
threadings = []
for img in imgs:
t = threading.Thread(target=get_img, args=(path,img))
threadings.append(t)
t.start()
for x in threadings:
x.join()
print(f"恭喜,多线程下载图片完成!")
#下载图片
def get_img(path,img_url):
img_name = img_url.split('/')[-1]
r = get_resp(img_url)
time.sleep(1)
with open(f'{path}{img_name}', 'wb')as f:
f.write(r.content)
print(f">> {img_name}下载图片成功")
def main():
get_hrefs()
if __name__=='__main__':
main()
往期推荐
微博爬虫,python微博用户主页小姐姐图片内容采集爬虫
图片爬虫,手把手教你Python多线程下载获取图片
Python下载爬虫,解析跳转真实链接下载文件
Python爬虫,B站视频下载源码脚本工具助手附exe
·················END·················
你好,我是二大爷,
革命老区外出进城务工人员,
互联网非早期非专业站长,
喜好python,写作,阅读,英语
不入流程序,自媒体,seo . . .
关注我的都变秃了
说错了,都变强了!
不信你试试
扫码关注最新动态
公众号ID:eryeji
标签:img,get,Python,h1,tree,爬虫,imgs,源码,print From: https://blog.51cto.com/u_15200177/8963607