Scrape Center爬虫平台之ssr4案例

编程入门 行业动态 更新时间:2024-10-14 18:15:04

Scrape Center<a href=https://www.elefans.com/category/jswz/34/1770264.html style=爬虫平台之ssr4案例"/>

Scrape Center爬虫平台之ssr4案例

#异步爬取详情页
import time
from requests.exceptions import Timeout
t1=time.time()
import requests
from lxml import etree
#异步爬取详情页
import asyncio
import aiohttp
template = '/detail/{page}'
async def get(session, queue):while True:try:page = queue.get_nowait()except asyncio.QueueEmpty:returnurl = template.format(page=page)resp = await session.get(url,timeout=60,verify_ssl=False)r=await resp.text(encoding='utf-8')selector = etree.HTML(r)x1='//*[@id="detail"]/div[1]/div/div/div[1]/div/div[2]/div[4]/p/text()'s2=selector.xpath(x1)[0]print(page,s2)        
async def main():async with aiohttp.ClientSession() as session:queue = asyncio.Queue()for page in range(1,101):queue.put_nowait(page)tasks = []for _ in range(10):task = get(session, queue)tasks.append(task)await asyncio.wait(tasks)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
print(time.time()-t1)

用异步爬了N次,老报错:
aiohttp.client_exceprins.ClientOSError:[WinError 64] 指定的网络名不再可用。

#多线程
import time
start_time=time.time()
import requests
from lxml import etree
from concurrent.futures import ThreadPoolExecutor,ProcessPoolExecutor
def download_detail(p):try:url=f'/detail/{p}'r=requests.get(url,timeout=60)r.encoding="utf-8"r=r.textselector = etree.HTML(r)x1='//*[@id="detail"]/div[1]/div/div/div[1]/div/div[2]/div[4]/p/text()'a=selector.xpath(x1)[0]print(p,a)        except:print("error",p)
if __name__=='__main__':with ThreadPoolExecutor(16) as t:for p in range(1,101):t.submit(download_detail,p)print("done")
print('下载所需时间为:',time.time()-start_time)   
#异步爬取详情页--改良版
import time
from requests.exceptions import Timeout
t1=time.time()
import requests
from lxml import etree
#异步爬取详情页
import asyncio
import aiohttp
template = '/detail/{page}'
async def get(session, queue):while True:try:page = queue.get_nowait()except asyncio.QueueEmpty:return        try:url = template.format(page=page)resp = await session.get(url,timeout=120)r=await resp.text(encoding='utf-8')selector = etree.HTML(r)x1='//*[@id="detail"]/div[1]/div/div/div[1]/div/div[2]/div[4]/p/text()'s2=selector.xpath(x1)[0]print(page,s2)except:print('error:',page)        
async def main():async with aiohttp.ClientSession() as session:queue = asyncio.Queue()for page in range(1,101):queue.put_nowait(page)tasks = []for _ in range(101):task = get(session, queue)tasks.append(task)await asyncio.wait(tasks)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
print(time.time()-t1)

更多推荐

Scrape Center爬虫平台之ssr4案例

本文发布于:2024-02-07 06:26:40,感谢您对本站的认可!
本文链接:https://www.elefans.com/category/jswz/34/1753879.html
版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系,我们将在24小时内删除。
本文标签:爬虫   案例   平台   Scrape   Center

发布评论

评论列表 (有 0 条评论)
草根站长

>www.elefans.com

编程频道|电子爱好者 - 技术资讯及电子产品介绍!