cat 1.sh get.sh get.py 
wget -r -nc -np http://m.qiwen.la/tupian/meinv/
#grep -oP '(?<=src=")http://waptp.bieke.cc:81/uploads/.*.jpg' * -r | grep -v lazysrc | awk -F"html:" '{print $2}'

days="0905"
cd m.qiwen.la/
find tupian/meinv/2017/$days | grep html > ../aaa.txt
sed -i 's/^/http:\/\/m.qiwen.la\//g' ../aaa.txt
mkdir img
sort url.txt uniq -c | wc -l
cp get.py url.txt img/
cd img/
python get.py
---------------------------------------------
while read line
do
    curl -s $line | grep -oP '(?<=src=")http://waptp.bieke.cc:81/uploads/.*.jpg' | head -n 1 >> url.txt
    for i in `seq 2 100`
    do
        url=`echo $line| sed "s/.html/_$i.html/g"`
        img=`curl -s $url | grep -oP '(?<=src=")http://waptp.bieke.cc:81/uploads/.*.jpg' | head -n 1`
        if [ -z $img ];then
            break
        else
            echo $img >> url.txt
        fi
    done
done < aaa.txt
---------------------------------------------
# coding=utf-8
import urllib2

def down_img(url):
    getHeaders = {
            'Host': 'waptp.bieke.cc:81',
             #'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)
              Chrome/53.0.2785.104 Safari/537.36 Core/1.53.3408.400 QQBrowser/9.6.12028.400',
            'Referer':'http://m.qiwen.la/tupian/meinv/'
    }
    req = urllib2.Request(url, headers=getHeaders)
    res_data = urllib2.urlopen(req)
    res = res_data.read()
    name = url.split('/')[-1].replace("\n", "")
    # print name
    fp = open(name, 'wb')
    fp.write(res)
    fp.close()

with open('url.txt') as f:
    for i in f.readlines():
        try:
            down_img(i)
        except Exception as e:
            print e
            continue

results matching ""

    No results matching ""