本文實例講述了Python基于分析Ajax請求實現抓取今日頭條街拍圖集功能。分享給大家供大家參考,具體如下:
代碼:
import osimport reimport jsonimport timefrom hashlib import md5from multiprocessing import Poolimport requestsfrom requests.exceptions import RequestExceptionfrom pymongo import MongoClient# 配置信息OFFSET_START = 0 # 爬去頁面的起始下標OFFSET_END = 20 # 爬去頁面的結束下標KEYWORD = '街拍' # 搜索的關鍵字# mongodb相關配置MONGO_URL = 'localhost'MONGO_DB = 'toutiao' # 數據庫名稱MONGO_TABLE = 'jiepai' # 集合名稱# 圖片保存的文件夾名稱IMAGE_PATH = 'images'headers = { "User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'}client = MongoClient(host=MONGO_URL)db = client[MONGO_DB]jiepai_table = db[MONGO_TABLE]if not os.path.exists(IMAGE_PATH): os.mkdir(IMAGE_PATH)def get_html(url, params=None): try: response = requests.get(url, params=params, headers=headers) if response.status_code == 200: return response.text return None except RequestException as e: print("請求%s失敗: " % url, e) return None# 獲取索引頁內容def get_index_page(offset, keyword): basic_url = 'http://www.toutiao.com/search_content/' params = { 'offset': offset, 'format': 'json', 'keyword': keyword, 'autoload': 'true', 'count': 20, 'cur_tab': 3 } return get_html(basic_url, params)def parse_index_page(html): ''' 解析索引頁內容 返回: 索引頁中包含的所有詳情頁url ''' if not html: return data = json.loads(html) if 'data' in data: for item in data['data']: article_url = item['article_url'] if 'toutiao.com/group' in article_url: yield article_url# 獲取詳情頁def get_detail_page(url): return get_html(url)# 解析詳情頁def parse_detail_page(url, html): ''' 解析詳情頁 返回對應的標題,url和包含的圖片url ''' title_reg = re.compile('<title>(.*?)</title>') title = title_reg.search(html).group(1) gallery_reg = re.compile('var gallery = (.*?);') gallery = gallery_reg.search(html) if gallery and 'sub_images' in gallery.group(1): images = json.loads(gallery.group(1))['sub_images'] image_list = [image['url'] for image in images] return { 'title': title, 'url': url, 'images': image_list } return Nonedef save_to_mongodb(content): jiepai_table.insert(content) print("存儲到mongdob成功", content)def download_images(image_list): for image_url in image_list: try: response = requests.get(image_url) if response.status_code == 200: save_image(response.content) except RequestException as e: print("下載圖片失敗: ", e)def save_image(content): ''' 對圖片的二進制內容做hash,構造圖片路徑,以此保證圖片不重復 ''' file_path = '{0}/{1}/{2}.{3}'.format(os.getcwd(), IMAGE_PATH, md5(content).hexdigest(), 'jpg') # 去除重復的圖片 if not os.path.exists(file_path): with open(file_path, 'wb') as f: f.write(content)def jiepai(offset): html = get_index_page(offset, KEYWORD) if html is None: return page_urls = list(parse_index_page(html)) # print("詳情頁url列表:" ) # for page_url in page_urls: # print(page_url) for page in page_urls: print('get detail page:', page) html = get_detail_page(page) if html is None: continue content = parse_detail_page(page, html) if content: save_to_mongodb(content) download_images(content['images']) time.sleep(1) print('-------------------------------------')if __name__ == '__main__': offset_list = range(OFFSET_START, OFFSET_END) pool = Pool() pool.map(jiepai, offset_list)
新聞熱點
疑難解答