help='list or download your favorites.')
# page options
- parser.add_option('--page', type='int', dest='page', action='store', default=1,
- help='page number of search results')
- parser.add_option('--page-range', type='string', dest='page_range', action='store',
- help='page range of favorites. e.g. 1,2-5,14')
+ parser.add_option('--page-all', dest='page_all', action='store_true', default=False,
+ help='all search results')
+ parser.add_option('--page', '--page-range', type='string', dest='page', action='store', default='',
+ help='page number of search results. e.g. 1,2-5,14')
parser.add_option('--sorting', dest='sorting', action='store', default='recent',
help='sorting of doujinshi (recent / popular / popular-[today|week])',
choices=['recent', 'popular', 'popular-today', 'popular-week'])
if not options.is_download:
logger.warning('You do not specify --download option')
- doujinshis = favorites_parser(options.page_range)
+ doujinshis = favorites_parser(page=page_list)
+ elif options.keyword and options.page_all:
+ from nhentai.constant import LANGUAGE
+ if LANGUAGE:
+ logger.info('Using default language: {0}'.format(LANGUAGE))
+ options.keyword += ', language:{}'.format(LANGUAGE)
+ doujinshis = search_parser_all(options.keyword)
+
elif options.keyword:
from nhentai.constant import LANGUAGE
if LANGUAGE:
def search_parser(keyword, sorting, page):
- logger.debug('Searching doujinshis using keywords {0}'.format(keyword))
# keyword = '+'.join([i.strip().replace(' ', '-').lower() for i in keyword.split(',')])
result = []
- i = 0
- while i < 5:
- try:
- url = request('get', url=constant.SEARCH_URL, params={'query': keyword, 'page': page, 'sort': sorting}).url
- response = request('get', url.replace('%2B', '+')).json()
- except Exception as e:
- logger.critical(str(e))
+ if not page:
+ page = [1]
- break
+ for p in page:
+ i = 0
+ logger.info('Searching doujinshis using keywords "{0}" on page {1}'.format(keyword, p))
+ while i < 3:
+ try:
+ url = request('get', url=constant.SEARCH_URL, params={'query': keyword,
+ 'page': p, 'sort': sorting}).url
+ response = request('get', url.replace('%2B', '+')).json()
+ except Exception as e:
+ logger.critical(str(e))
- if 'result' not in response:
- raise Exception('No result in response')
+ break
- for row in response['result']:
- title = row['title']['english']
- title = title[:85] + '..' if len(title) > 85 else title
- result.append({'id': row['id'], 'title': title})
+ if 'result' not in response:
+ logger.warn('No result in response in page {}'.format(p))
+ break
- if not result:
- logger.warn('No results for keywords {}'.format(keyword))
+ for row in response['result']:
++ title = row['title']['english']
++ title = title[:85] + '..' if len(title) > 85 else title
++ result.append({'id': row['id'], 'title': title})
++
++ if not result:
++ logger.warn('No results for keywords {}'.format(keyword))
+
+ return result
+
+
+def search_parser_all(keyword):
+ logger.debug('Searching doujinshis using keywords {0}'.format(keyword))
+
+ result = []
+
+ url = request('get', url=constant.SEARCH_URL, params={'query': keyword}).url
+ init_response = request('get', url.replace('%2B', '+')).json()
+
+ for page in range(init_response['num_pages']):
+ try:
+ url = request('get', url=constant.SEARCH_URL, params={'query': keyword, 'page': page+1}).url
+ response = request('get', url.replace('%2B', '+')).json()
+
+ print('Obtained %d / %d pages.' % (page+1, init_response['num_pages']), end='\r')
+
+ except Exception as e:
+ logger.critical(str(e))
+
+ if 'result' not in response:
+ raise Exception('No result in response')
+
+ for row in response['result']:
title = row['title']['english']
title = title[:85] + '..' if len(title) > 85 else title
result.append({'id': row['id'], 'title': title})