Python爬虫,美图录精品美图,身体遭不住

import requests import re from lxml import etree from bs4 import BeautifulSoup import random import os import time def GetHtmlText(url):     ua = ['Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:70.0) Gecko/20100101 Firefox/70.0',           'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1',           'Opera/9.80 (Windows NT 6.1; U; zh-cn) Presto/2.9.168 Version/11.50',           'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',           'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; .NET4.0E)'           ]     Usera = random.choice(ua)     headers = {'User-Agent': Usera,                'Accept': "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,/;q=0.8"}     try:         r = requests.get(url,headers=headers, timeout = 30)         r.raise_for_status()         r.encoding = r.apparent_encoding         return r.text     except:         return "" 在此输入爬取区间 def main(a,b):     try:         for i in range(a,b):             url = "https://www.meitulu.com/item/%d_4.html"%i             html = GetHtmlText(url)             soup = BeautifulSoup(html,"html.parser")             name = soup.find('title').string.split('')[0]   #获取标题             maxpage = int(re.sub(r'\D','',soup.find('title').string.split('/')[1])) #获取页数             print(name,maxpage)             for j in range(1,maxpage+1):                 if j <= 1:                     Url = "https://www.meitulu.com/item/%d.html" %i                 else:                     Url = "https://www.meitulu.com/item/%d%d.html" %(i,j)                 Html = GetHtmlText(Url)                 Html_etree =etree.HTML(Html)                 Html_xpath = Html_etree.xpath('//center/img/@src')                 os.chdir('D:/pics')                 if os.path.exists(name):  …