urllib2下载网页方法:最简洁方法
# -*- coding: utf-8 -*-
import urllib2
#直接请求打开百度页面
response = urllib2.urlopen('http://www.baidu.com')
#获取状态吗,如果是200表示获取成功
print response.getcode()
#读取内容
con = response.read()
urllib2下载网页方法:添加data、http header
201605301464581634515793.png
# -*- coding: utf-8 -*-
import urllib2
# 创建Request对象
request = urllib2.Request(url)
# 添加数据
request.add_data('a','1')
#添加htt的header
request.add_header('User-Agent','Mozilla/5.0')
#发送请求获取结果
response = urllib2.urlopen(request)
urllib2下载网页方法3:添加特殊情景的处理器
HTTPCookieProcessor cookie
ProxyHandler 代理
HTTPSHandler https加密访问
HTTPRedirectHandler url跳转
201605301464586725739149.png
# -*- coding: utf-8 -*-
import urllib2,cookielib
# 创建cookie容器
cj = cookielib.CookieJar()
#创建1个opner
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
#给urllib2安装opner
urllib2.install_opener(opener)
#使用带有cookie的urllib2访问网页
response = urllib2.urlopen('http://www.baidu.com')
|