轉自:http://lizhenliang.blog.51cto.com/7876557/1872538
urllib.urlopen(url, data=None, PRoxies=None) | 讀取指定URL,創建類文件對象。data是隨著URL提交的數據(POST) |
urllib/urllib2.quote(s, safe='/') | |
urllib/urllib2.unquote(s) | 與quote相反 |
urllib.urlencode(query, doseq=0) | 將序列中的兩個元素(元組或字典)轉換為URL查詢字符串 |
urllib.urlretrieve(url, filename=None, reporthook=None, data=None) | 將返回結果保存到文件,filename是文件名 |
urllib2.Request(url, data=None, headers={}, origin_req_host=None, unverifiable=False) | |
urllib2.urlopen(url, data=None, timeout=<object object>) | timeout 超時時間,單位秒 |
urllib2.build_opener(*handlers) | 構造opener |
urllib2.install_opener(opener) | 把新構造的opener安裝到默認的opener中,以后urlopen()會自動調用 |
urllib2.HTTPCookieProcessor(cookiejar=None) | Cookie處理器 |
urllib2.HTTPBasicAuthHandler | 認證處理器 |
urllib2.ProxyHandler | 代理處理器 |
方法 | 描述 |
getcode() | 獲取HTTP狀態碼 |
geturl() | 返回真實URL。有可能URL3xx跳轉,那么這個將獲得跳轉后的URL |
info() | 返回服務器返回的header信息??梢酝ㄟ^它的方法獲取相關值 |
next() | 獲取下一行,沒有數據拋出異常 |
read(size=-1) | 默認讀取所有內容。size正整數指定讀取多少字節 |
readline(size=-1) | 默認讀取下一行。size正整數指定讀取多少字節 |
readlines(sizehint=0) | 默認讀取所有內容,以列表形式返回。sizehint正整數指定讀取多少字節 |
>>> import urllib, urllib2>>> response = urllib.urlopen("http://www.baidu.com") # 獲取的網站頁面源碼>>> response.readline()'<!DOCTYPE html>/n'>>> response.getcode()200>>> response.geturl()'http://www.baidu.com'2)偽裝Chrome瀏覽器訪問>>> user_agent = "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36">>> header = {"User-Agent": user_agent}>>> request = urllib2.Request("http://www.baidu.com", headers=header) # 也可以通過request.add_header('User-Agent', 'Mozilla...')方式添加 >>> response = urllib2.urlopen(request)>>> response.geturl()'https://www.baidu.com/'>>> print respose.info() # 查看服務器返回的header信息Server: bfe/1.0.8.18Date: Sat, 12 Nov 2016 06:34:54 GMTContent-Type: text/html; charset=utf-8Transfer-Encoding: chunkedConnection: closeVary: Accept-EncodingSet-Cookie: BAIDUID=5979A74F742651531360C08F3BE06754:FG=1; expires=Thu, 31-Dec-37 23:55:55 GMT; max-age=2147483647; path=/; domain=.baidu.comSet-Cookie: BIDUPSID=5979A74F742651531360C08F3BE06754; expires=Thu, 31-Dec-37 23:55:55 GMT; max-age=2147483647; path=/; domain=.baidu.comSet-Cookie: PSTM=1478932494; expires=Thu, 31-Dec-37 23:55:55 GMT; max-age=2147483647; path=/; domain=.baidu.comSet-Cookie: BDSVRTM=0; path=/Set-Cookie: BD_HOME=0; path=/Set-Cookie: H_PS_PSSID=1426_18240_17945_21118_17001_21454_21408_21394_21377_21525_21192; path=/; domain=.baidu.comP3P: CP=" OTI DSP COR IVA OUR IND COM "Cache-Control: privateCxy_all: baidu+a24af77d41154f5fc0d314a73fd4c48fExpires: Sat, 12 Nov 2016 06:34:17 GMTX-Powered-By: HphpX-UA-Compatible: IE=Edge,chrome=1Strict-Transport-Security: max-age=604800BDPAGETYPE: 1BDQID: 0xf51e0c970000d938BDUSERID: 0Set-Cookie: __bsi=12824513216883597638_00_24_N_N_3_0303_C02F_N_N_N_0; expires=Sat, 12-Nov-16 06:34:59 GMT; domain=www.baidu.com; path=/>>> post_data = {"loginform-username":"test","loginform-passWord":"123456"}>>> response = urllib2.urlopen("http://home.51cto.com/index", data=(urllib.urlencode(post_data)))>>> response.read() # 登錄后網頁內容>>> urllib.urlencode(post_data)'loginform-password=123456&loginform-username=test'4)保存cookie到變量中#!/usr/bin/python# -*- coding: utf-8 -*-import urllib, urllib2import cookielib# 實例化CookieJar對象來保存cookiecookie = cookielib.CookieJar()# 創建cookie處理器handler = urllib2.HTTPCookieProcessor(cookie)# 通過handler構造openeropener = urllib2.build_opener(handler)response = opener.open("http://www.baidu.com")for item in cookie: print item.name, item.value # python test.pyBAIDUID EB4BF619C95630EFD619B99C596744B0:FG=1BIDUPSID EB4BF619C95630EFD619B99C596744B0H_PS_PSSID 1437_20795_21099_21455_21408_21395_21377_21526_21190_21306PSTM 1478936429BDSVRTM 0BD_HOME 0#!/usr/bin/python# -*- coding: utf-8 -*-import urllib, urllib2import cookielibcookie_file = 'cookie.txt'# 保存cookie到文件cookie = cookielib.MozillaCookieJar(cookie_file)# 創建cookie處理器handler = urllib2.HTTPCookieProcessor(cookie)# 通過handler構造openeropener = urllib2.build_opener(handler)response = opener.open("http://www.baidu.com")# 保存cookie.save(ignore_discard=True, ignore_expires=True) # ignore_discard默認是false,不保存將被丟失的。ignore_expires默認flase,如果cookie存在,則不寫入。 # python test.py# cat cookie.txt # Netscape HTTP Cookie File# http://curl.haxx.se/rfc/cookie_spec.html# This is a generated file! Do not edit..baidu.com TRUE / FALSE 3626420835 BAIDUID 687544519EA906BD0DE5AE02FB25A5B3:FG=1.baidu.com TRUE / FALSE 3626420835 BIDUPSID 687544519EA906BD0DE5AE02FB25A5B3.baidu.com TRUE / FALSE H_PS_PSSID 1420_21450_21097_18560_21455_21408_21395_21377_21526_21192_20927.baidu.com TRUE / FALSE 3626420835 PSTM 1478937189www.baidu.com FALSE / FALSE BDSVRTM 0www.baidu.com FALSE / FALSE BD_HOME 0#!/usr/bin/python# -*- coding: utf-8 -*-import urllib2import cookielib# 實例化對象cookie = cookielib.MozillaCookieJar()# 從文件中讀取cookiecookie.load("cookie.txt", ignore_discard=True, ignore_expires=True)# 創建cookie處理器handler = urllib2.HTTPCookieProcessor(cookie)# 通過handler構造openeropener = urllib2.build_opener(handler)# request = urllib2.Request("http://www.baidu.com")response = opener.open("http://www.baidu.com")7)使用代理服務器訪問URL
import urllib2proxy_address = {"http": "http://218.17.252.34:3128"}handler = urllib2.ProxyHandler(proxy_address)opener = urllib2.build_opener(handler)response = opener.open("http://www.baidu.com")print response.read()8)URL訪問認證
import urllib2auth = urllib2.HTTPBasicAuthHandler()# (realm, uri, user, passwd)auth.add_password(None, 'http://www.example.com','user','123456')opener = urllib2.build_opener(auth)response = opener.open('http://www.example.com/test.html')
新聞熱點
疑難解答