# coding: utf-8
-from __future__ import unicode_literals, print_function
-import sys
import os
import re
import time
try:
response = request('get', url)
- if response.status_code in (200,):
+ if response.status_code in (200, ):
response = response.content
+ elif response.status_code in (404,):
+ logger.error("Doujinshi with id {0} cannot be found".format(id_))
+ return []
else:
logger.debug('Slow down and retry ({}) ...'.format(id_))
time.sleep(1)
return doujinshi_parser(str(id_))
except Exception as e:
- logger.warn('Error: {}, ignored'.format(str(e)))
+ logger.warning('Error: {}, ignored'.format(str(e)))
return None
html = BeautifulSoup(response, 'html.parser')
doujinshi_info = html.find('div', attrs={'id': 'info'})
title = doujinshi_info.find('h1').text
+ pretty_name = doujinshi_info.find('h1').find('span', attrs={'class': 'pretty'}).text
subtitle = doujinshi_info.find('h2')
doujinshi['name'] = title
+ doujinshi['pretty_name'] = pretty_name
doujinshi['subtitle'] = subtitle.text if subtitle else ''
doujinshi_cover = html.find('div', attrs={'id': 'cover'})
- img_id = re.search('/galleries/([\d]+)/cover\.(jpg|png|gif)$', doujinshi_cover.a.img.attrs['data-src'])
+ img_id = re.search('/galleries/([0-9]+)/cover.(jpg|png|gif)$',
+ doujinshi_cover.a.img.attrs['data-src'])
ext = []
for i in html.find_all('div', attrs={'class': 'thumb-container'}):
result = _get_title_and_id(response)
if not result:
- logger.warn('Not found anything of keyword {}'.format(keyword))
+ logger.warning('Not found anything of keyword {}'.format(keyword))
return result
return
doujinshi_list = [(i['id'], i['title']) for i in doujinshi_list]
headers = ['id', 'doujinshi']
- logger.info('Search Result\n' +
+ logger.info('Search Result || Found %i doujinshis \n' % doujinshi_list.__len__() +
tabulate(tabular_data=doujinshi_list, headers=headers, tablefmt='rst'))
-def search_parser(keyword, sorting, page):
+def search_parser(keyword, sorting, page, is_page_all=False):
# keyword = '+'.join([i.strip().replace(' ', '-').lower() for i in keyword.split(',')])
result = []
+ response = None
if not page:
page = [1]
+ if is_page_all:
+ url = request('get', url=constant.SEARCH_URL, params={'query': keyword}).url
+ init_response = request('get', url.replace('%2B', '+')).json()
+ page = range(1, init_response['num_pages']+1)
+
+ total = '/{0}'.format(page[-1]) if is_page_all else ''
+ not_exists_persist = False
for p in page:
i = 0
- logger.info('Searching doujinshis using keywords "{0}" on page {1}'.format(keyword, p))
+
+ logger.info('Searching doujinshis using keywords "{0}" on page {1}{2}'.format(keyword, p, total))
while i < 3:
try:
url = request('get', url=constant.SEARCH_URL, params={'query': keyword,
response = request('get', url.replace('%2B', '+')).json()
except Exception as e:
logger.critical(str(e))
-
+ response = None
break
- if 'result' not in response:
- logger.warn('No result in response in page {}'.format(p))
- break
+ if response is None or 'result' not in response:
+ logger.warning('No result in response in page {}'.format(p))
+ if not_exists_persist is True:
+ break
+ continue
for row in response['result']:
title = row['title']['english']
title = title[:85] + '..' if len(title) > 85 else title
result.append({'id': row['id'], 'title': title})
+ not_exists_persist = False
if not result:
- logger.warn('No results for keywords {}'.format(keyword))
+ logger.warning('No results for keywords {}'.format(keyword))
return result