X-Git-Url: https://git.lizzy.rs/?a=blobdiff_plain;f=nhentai%2Fparser.py;h=190c3e4692265b2fcc40d205e41e94653fea534f;hb=2cf4e6718ecf5012d613cbc226696d3d88f2cdb7;hp=fd08d33af98483e3cb772963b96e780345b6dc55;hpb=74b0df26a968a176daa049eb86e87eb5d1326b98;p=nhentai.git diff --git a/nhentai/parser.py b/nhentai/parser.py index fd08d33..190c3e4 100644 --- a/nhentai/parser.py +++ b/nhentai/parser.py @@ -1,7 +1,5 @@ # coding: utf-8 -from __future__ import unicode_literals, print_function -import sys import os import re import time @@ -117,15 +115,18 @@ def doujinshi_parser(id_): try: response = request('get', url) - if response.status_code in (200,): + if response.status_code in (200, ): response = response.content + elif response.status_code in (404,): + logger.error("Doujinshi with id {0} cannot be found".format(id_)) + return [] else: logger.debug('Slow down and retry ({}) ...'.format(id_)) time.sleep(1) return doujinshi_parser(str(id_)) except Exception as e: - logger.warn('Error: {}, ignored'.format(str(e))) + logger.warning('Error: {}, ignored'.format(str(e))) return None html = BeautifulSoup(response, 'html.parser') @@ -179,7 +180,7 @@ def old_search_parser(keyword, sorting='date', page=1): result = _get_title_and_id(response) if not result: - logger.warn('Not found anything of keyword {}'.format(keyword)) + logger.warning('Not found anything of keyword {}'.format(keyword)) return result @@ -189,19 +190,26 @@ def print_doujinshi(doujinshi_list): return doujinshi_list = [(i['id'], i['title']) for i in doujinshi_list] headers = ['id', 'doujinshi'] - logger.info('Search Result\n' + + logger.info('Search Result || Found %i doujinshis \n' % doujinshi_list.__len__() + tabulate(tabular_data=doujinshi_list, headers=headers, tablefmt='rst')) -def search_parser(keyword, sorting, page): +def search_parser(keyword, sorting, page, is_page_all=False): # keyword = '+'.join([i.strip().replace(' ', '-').lower() for i in keyword.split(',')]) result = [] if not page: page = [1] + if is_page_all: + url = request('get', url=constant.SEARCH_URL, params={'query': keyword}).url + init_response = request('get', url.replace('%2B', '+')).json() + page = range(1, init_response['num_pages']+1) + + total = '/{0}'.format(page[-1]) if is_page_all else '' for p in page: i = 0 - logger.info('Searching doujinshis using keywords "{0}" on page {1}'.format(keyword, p)) + + logger.info('Searching doujinshis using keywords "{0}" on page {1}{2}'.format(keyword, p, total)) while i < 3: try: url = request('get', url=constant.SEARCH_URL, params={'query': keyword, @@ -213,7 +221,7 @@ def search_parser(keyword, sorting, page): break if 'result' not in response: - logger.warn('No result in response in page {}'.format(p)) + logger.warning('No result in response in page {}'.format(p)) break for row in response['result']: @@ -222,7 +230,7 @@ def search_parser(keyword, sorting, page): result.append({'id': row['id'], 'title': title}) if not result: - logger.warn('No results for keywords {}'.format(keyword)) + logger.warning('No results for keywords {}'.format(keyword)) return result