如果代码不可用了,可能是因为header里面的cookie失效了,可以找图片中的修改试试
QQ截图20221227195909.png (771.86 KB, 下载次数: 0)
下载附件
2022-12-27 19:59 上传
# !/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName :wallhaven_demo.py
# @Time :2022/12/27 11:51
# @Author :Mr_Ren
import os
import time
import requests
from pyquery import PyQuery
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# os.environ["TF_CPP_MIN_LOG_LEVEL"]='1' # 这是默认的显示等级,忽略所有信息
os.environ["TF_CPP_MIN_LOG_LEVEL"] = '2' # 忽略 warning 和 Error
# os.environ["TF_CPP_MIN_LOG_LEVEL"]='3' # 忽略 Error
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36",
"cookie":"_pk_id.1.01b8=a8be8d7f7ae37b1a.1672112746.; _pk_ses.1.01b8=1; XSRF-TOKEN=eyJpdiI6InJkYVFyZ0tJZHlUMmxKZCtDRjNhS0E9PSIsInZhbHVlIjoiR3paVG5KUnNOaTg1a3RmNHF6NXJqaFpsVHNUeXo5YWY2RzlDOFJ4b1dCeDdvV29CMDNPZ29zWWhqUld3TEgyUiIsIm1hYyI6ImQyM2EwZjQwYWU4YjM1N2E2ZDk2MTk1ZDZmMTY4YWQ2ZDVhZjRkZTE4NDM4ZDFmN2E1OGQ5YWU3NjQ1NzE4ODUifQ==; wallhaven_session=eyJpdiI6IndSZHJXYklNME00R25ScVFhNEN5cUE9PSIsInZhbHVlIjoiZGxJa290QkpkTmRaZ3NWZW5GbkR1aE8zZ1hMNE02bjFzWHJISkRBdGVqaW1XUUFFRGFLNDlcL2lQTlZpWVZnOFoiLCJtYWMiOiIyYTg4Y2MzODY2ZTEwYjE4ZjkyMzEzMjNjNDE3ZmNjNGZiYzhiMWY1NDdjYWJiZDA3YWM2ZWU2OTc5YjkwZjA0In0="
}
#设置参数 比如分辨率 其他的可自行探索
params = {
"resolutions":"1920x1080"
}
# 获取html
def get_html_info(page):
# f是format函数的缩写, 用于格式化输出。
url = f'https://wallhaven.cc/search?categories=111&purity=100&topRange=1M&sorting=toplist&order=desc&page={page}'
resp = requests.get(url,headers=headers,params=params)
resp_html = resp.content
# print(resp_html)
return PyQuery(resp_html)
if __name__ == "__main__":
#要爬取的页数
count = 10
#图片名
cnt = 1
for i in range(count):
html = get_html_info(i)
pic_urls = []
# 获取当前页所有缩略图包含的信息
items = html("#thumbs > .thumb-listing-page > ul > li").items()
for item in items:
# 获取存放在缩略图信息中的缩略图原图网址
url = item("a").attr("href")
pic_urls.append(url)
# print(url)
for pic_url in pic_urls:
resp = requests.get(pic_url, headers=headers)
html = PyQuery(resp.content)
# 获取id为wallpaper的img标签里面是src属性 也就是原始图片
final_url = html("#wallpaper").attr("src")
pic = requests.get(final_url, headers=headers).content
if not os.path.exists("Wallhaven"):
os.mkdir("Wallhaven")
# 因为爬取的图片名字是乱字母,就以数字作为名
with open("Wallhaven/" + str(cnt) + final_url[-4:], "wb") as f:
f.write(pic)
print("第" + str(cnt) + "张图片下载完毕")
cnt = cnt + 1
#防止一次性爬太多 ip被封了
time.sleep(2)
print("爬取结束")