表情包爬虫

编程入门 行业动态 更新时间:2024-10-27 12:35:19

表情包<a href=https://www.elefans.com/category/jswz/34/1770264.html style=爬虫"/>

表情包爬虫

一、同步爬虫

import requests
from lxml import etree
from urllib import request
import os
import re
import timedef get_page_source(link):headers = {'Referer': '/?page=23','User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0','Cookie': '__cfduid=d74bb1bdede33ae5fa88970198604232f1570874777; XSRF-TOKEN=eyJpdiI6IjE3ZUNSS1VJWXp2MzRENEhOdmlPSXc9PSIsInZhbHVlIjoiUFBVM25OSVBhZDRsSEhheGhGaVpLdFgyWU1TUmdoUGY2TFFxQ0ZkQUZvNjBONW94MmtmdDVHTEZ0TmMzWW5GNyIsIm1hYyI6IjY4NTQ5Yjk0MDVlOGViMWI1NTA4YWYyODI1N2NhNGJhMWFjMWQwMjI5NTEyMGQ2NTlmYWUzNGI4ZmVhMzkzNjQifQ%3D%3D; doutula_session=eyJpdiI6ImxmeFwvcDR1UVR0OTcrOVFPbnM4eCtnPT0iLCJ2YWx1ZSI6IjV4c3liSTF2VUtBellnbHJhNWxjWWk1QmZnRllRR0wwYnRvZjFzeTNjMFJkWEZlcWZiTlA4aEVXRUh6OWZKV3giLCJtYWMiOiJiZGU2ZTFkOTFhMTkyNjFkYmUwMTU1MGFiMWY0MDgxNWQ3MzQ4MDBmNmE4NjEyMzc1ODFjMDRjYmM2NGYxYjk0In0%3D; UM_distinctid=16dbf6ee8e4417-0d482538bfe3688-14377a40-144000-16dbf6ee8e652a; CNZZDATA1256911977=144637179-1570873422-%7C1570878822; Hm_lvt_24b7d5cc1b26f24f256b6869b069278e=1570881136; Hm_lpvt_24b7d5cc1b26f24f256b6869b069278e=1570881170'}resp = requests.get(link, headers=headers)html = etree.HTML(resp.text)imgs = html.xpath("//div[@class='page-content text-center']//a//img[@class!='gif']")for img in imgs:img_url = img.get("data-original")  # 图片链接alt = img.get("alt")  # 图片名字alt = re.sub(r'[*?。,?,\.,!!]', "", alt)  # 替换非法命名字符suffix = os.path.splitext(img_url)[-1]  # 获取后缀# print(img_url, alt, suffix)filename = alt + suffixrequest.urlretrieve(img_url, 'images/' + filename)  # 保存图片# print(etree.tostring(img))def main():for i in range(1, 20):url = '/?page={}'.format(i)get_page_source(url)if __name__ == '__main__':start = time.time()main()end = time.time()print(end - start)

二、异步模式

多线程用法:.html


import re
import time
from lxml import etree
import requests
import os
from urllib import request
from queue import Queue
import threading'''
Queue是线程安全的
'''class Get_Link(threading.Thread):def __init__(self, page_queue, image_queue, *args, **kwargs):super().__init__(*args, **kwargs)self.page_queue = page_queueself.image_queue = image_queuedef run(self):while True:if self.page_queue.empty():  # 如果为空,证明20页已抓取完breakurl = self.page_queue.get()  # 获取每页的urlself.get_link(url)  # 调用get_link,获取每页中所有图片的链接,put队列中def get_link(self, url):headers = {'Referer': '/?page=23','User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0','Cookie': '__cfduid=d74bb1bdede33ae5fa88970198604232f1570874777; XSRF-TOKEN=eyJpdiI6IjE3ZUNSS1VJWXp2MzRENEhOdmlPSXc9PSIsInZhbHVlIjoiUFBVM25OSVBhZDRsSEhheGhGaVpLdFgyWU1TUmdoUGY2TFFxQ0ZkQUZvNjBONW94MmtmdDVHTEZ0TmMzWW5GNyIsIm1hYyI6IjY4NTQ5Yjk0MDVlOGViMWI1NTA4YWYyODI1N2NhNGJhMWFjMWQwMjI5NTEyMGQ2NTlmYWUzNGI4ZmVhMzkzNjQifQ%3D%3D; doutula_session=eyJpdiI6ImxmeFwvcDR1UVR0OTcrOVFPbnM4eCtnPT0iLCJ2YWx1ZSI6IjV4c3liSTF2VUtBellnbHJhNWxjWWk1QmZnRllRR0wwYnRvZjFzeTNjMFJkWEZlcWZiTlA4aEVXRUh6OWZKV3giLCJtYWMiOiJiZGU2ZTFkOTFhMTkyNjFkYmUwMTU1MGFiMWY0MDgxNWQ3MzQ4MDBmNmE4NjEyMzc1ODFjMDRjYmM2NGYxYjk0In0%3D; UM_distinctid=16dbf6ee8e4417-0d482538bfe3688-14377a40-144000-16dbf6ee8e652a; CNZZDATA1256911977=144637179-1570873422-%7C1570878822; Hm_lvt_24b7d5cc1b26f24f256b6869b069278e=1570881136; Hm_lpvt_24b7d5cc1b26f24f256b6869b069278e=1570881170'}resp = requests.get(url, headers=headers)html = etree.HTML(resp.text)  # 使用XPath解析imgs = html.xpath("//div[@class='page-content text-center']//a//img[@class!='gif']")for img in imgs:img_url = img.get("data-original")  # 图片链接alt = img.get("alt")  # 图片名字alt = re.sub(r'[*?。,?,\.,!!\/]', "", alt)  # 替换非法命名字符suffix = os.path.splitext(img_url)[-1]  # 获取后缀# print(img_url, alt, suffix)filename = alt + suffix  # 图片名self.image_queue.put((img_url, filename))  # 以元组形式放入队列class Download_Image(threading.Thread):def __init__(self, page_queue, image_queue, *args, **kwargs):super().__init__(*args, **kwargs)self.page_queue = page_queueself.image_queue = image_queuedef run(self):# 执行图片下载start = time.time()while True:if self.image_queue.empty():if self.page_queue.empty():end = time.time()print(end - start)  # 运行时间returnimg_url, filename = self.image_queue.get()  # 获取队列中的图片链接和名字request.urlretrieve(img_url, 'images2/' + filename)  # 下载print('over')def main():page_queue = Queue(100)  # 页面队列image_queue = Queue(500)  # 图片队列for i in range(21, 40):url = '/?page={}'.format(i)  # 下载21~41页的图片page_queue.put(url)for x in range(5):  # 开启5个线程t = Get_Link(page_queue, image_queue)  # 线程安全队列t.start()for x in range(5):  # 5个下载线程t = Download_Image(page_queue, image_queue)t.start()if __name__ == '__main__':main()

同步运行时间约120s,异步开启5个线程,运行时间约44s,异步大幅节约了时间。

运行截图:

更多推荐

表情包爬虫

本文发布于:2024-03-14 03:13:22,感谢您对本站的认可!
本文链接:https://www.elefans.com/category/jswz/34/1735504.html
版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系,我们将在24小时内删除。
本文标签:爬虫   表情

发布评论

评论列表 (有 0 条评论)
草根站长

>www.elefans.com

编程频道|电子爱好者 - 技术资讯及电子产品介绍!