importtime
import leveldb
from urllib.parse import quote_plus
import re
import json
import itertools
import sys
import requests
from queue import Queue
from threading import Thread
URL_BASE = 'http://s.m.taobao.com/search?q={}&n=200&m=api4h5&style=list&page={}'
def url_get(url):
# print('GET ' + url)
header = dict()
header['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
header['Accept-Encoding'] = 'gzip,deflate,sdch'
header['Accept-Language'] = 'en-US,en;q=0.8'
header['Connection'] = 'keep-alive'
header['DNT'] = '1'
#header['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.71 Safari/537.36'
header['User-Agent'] = 'Mozilla/12.0 (compatible; MSIE 8.0; Windows NT)'
return requests.get(url, timeout = 5, headers = header).text
def item_thread(cate_queue, db_cate, db_item):
while True:
try:
cate = cate_queue.get()
post_exist = True
try:
state = db_cate.Get(cate.encode('utf-8'))
if state != b'OK': post_exist = False
except:
post_exist = False
if post_exist == True:
print('cate-{}: {} already exists ... Ignore'.format(cate, title))
continue
db_cate.Put(cate.encode('utf-8'), b'crawling')
for item_page in itertools.count(1):
url = URL_BASE.format(quote_plus(cate), item_page)
for tr in range(5):
try:
items_obj = json.loads(url_get(url))
break
except KeyboardInterrupt:
quit()
except Exception as e:
if tr == 4: raise e
if len(items_obj['listItem']) == 0: break
for item in items_obj['listItem']:
item_obj = dict(
_id = int(item['itemNumId']),
name = item['name'],
price = float(item['price']),
query = cate,
category = int(item['category']) if item['category'] != '' else 0,
nick = item['nick'],
area = item['area'])
db_item.Put(str(item_obj['_id']).encode('utf-8'),
json.dumps(item_obj, ensure_ascii = False).encode('utf-8'))
print('Get {} items from {}: {}'.format(len(items_obj['listItem']), cate, item_page))
if 'nav' in items_obj:
for na in items_obj['nav']['navCatList']:
try:
db_cate.Get(na['name'].encode('utf-8'))
except:
db_cate.Put(na['name'].encode('utf-8'), b'waiting')
db_cate.Put(cate.encode('utf-8'), b'OK')
print(cate, 'OK')
except KeyboardInterrupt:
break
except Exception as e:
print('An {} exception occured'.format(e))
def cate_thread(cate_queue, db_cate):
while True:
try:
for key, value in db_cate.RangeIter():
if value != b'OK':
print('CateThread: put {} into queue'.format(key.decode('utf-8')))
cate_queue.put(key.decode('utf-8'))
time.sleep(10)
except KeyboardInterrupt:
break
except Exception as e:
print('CateThread: {}'.format(e))
if __name__ == '__main__':
db_cate = leveldb.LevelDB('./taobao-cate')
db_item = leveldb.LevelDB('./taobao-item')
orig_cate = '正装'
try:
db_cate.Get(orig_cate.encode('utf-8'))
except:
db_cate.Put(orig_cate.encode('utf-8'), b'waiting')
cate_queue = Queue(maxsize = 1000)
cate_th = Thread(target = cate_thread, args = (cate_queue, db_cate))
cate_th.start()
item_th = [Thread(target = item_thread, args = (cate_queue, db_cate, db_item)) for _ in range(5)]
for item_t in item_th:
item_t.start()
cate_th.join()
分享到:
相关推荐
去哪儿酒店数据爬取,需要自己更换cookie,更换代理。输入要爬取的城市。 更换URL、data、headers同理可以爬取其他的数据~
Python,通过Reauests_html库,爬取51jobs网站python岗位招聘信息,实例代码。
python爬取cnnvd漏洞脚本,注释详尽
自己写的基于Python的淘宝评论爬取,并得到商品的图片
Python 实例 - 爬取某网站评论 Python源码Python 实例 - 爬取某网站评论 Python源码Python 实例 - 爬取某网站评论 Python源码Python 实例 - 爬取某网站评论 Python源码Python 实例 - 爬取某网站评论 Python源码...
面向短视频的流量数据爬取和分析系统python源码+项目说明(高分毕设).zip 面向短视频的流量数据爬取和分析系统python源码+项目说明(高分毕设).zip 面向短视频的流量数据爬取和分析系统python源码+项目说明(高分...
使用python爬取一些在线文档
爬取淘宝手机月销售数据,以excel表格形式存储,以条形图形式展示不同品牌手机在淘宝的月销售量。可以通过更改关键字手机实现对其他商品的爬取。详细介绍...
python爬取微博关键词搜索博文,只需要修改cookie和url参数
Python爬取动态网站;Python爬取微信公众号文章以及评论源代码!
python爬取淘宝网页
python爬虫,爬取贴吧python爬虫,爬取贴吧python爬虫,爬取贴吧python爬虫,爬取贴吧python爬虫,爬取贴吧python爬虫,爬取贴吧python爬虫,爬取贴吧python爬虫,爬取贴吧python爬虫,爬取贴吧python爬虫,爬取贴吧...
基于python简单的爬取各种网站图片的源代码,简单的爬取网站图片的python
Python 批量爬取小视频 Python源码Python 批量爬取小视频 Python源码Python 批量爬取小视频 Python源码Python 批量爬取小视频 Python源码Python 批量爬取小视频 Python源码Python 批量爬取小视频 Python源码Python ...
可以自行输入想要爬取的博主的用户名,下载主页的视频,可以自行输入想要爬取的博主的用户名,下载主页的视频
python爬虫,爬取糗事百科python爬虫,爬取糗事百科python爬虫,爬取糗事百科python爬虫,爬取糗事百科python爬虫,爬取糗事百科python爬虫,爬取糗事百科python爬虫,爬取糗事百科python爬虫,爬取糗事百科python...
python数据爬取和分析,爬取51job的python词条数据并进行数据清理
python对豆瓣电影的爬取,以及把爬取到的电影保存到本地的excel中
基于python的影评数据爬取和分析系统基于python的影评数据爬取和分析系统基于python的影评数据爬取和分析系统基于python的影评数据爬取和分析系统基于python的影评数据爬取和分析系统基于python的影评数据爬取和分析...
个人写的爬虫脚本,可以爬取全国省市信息脚本,格式已经设定好,乱码bug也修复了,目前看来非常完美,分享给大家参考