]> git.lizzy.rs Git - nhentai.git/blob - nhentai/serializer.py
a987c99694453a66c50c556dc5580712326efcca
[nhentai.git] / nhentai / serializer.py
1 # coding: utf-8
2 import json
3 import os
4 from xml.sax.saxutils import escape
5 from nhentai.constant import LANGUAGEISO
6
7 def serialize_json(doujinshi, dir):
8     metadata = {'title': doujinshi.name,
9                 'subtitle': doujinshi.info.subtitle}
10     if doujinshi.info.date:
11         metadata['upload_date'] = doujinshi.info.date
12     if doujinshi.info.parodies:
13         metadata['parody'] = [i.strip() for i in doujinshi.info.parodies.split(',')]
14     if doujinshi.info.characters:
15         metadata['character'] = [i.strip() for i in doujinshi.info.characters.split(',')]
16     if doujinshi.info.tags:
17         metadata['tag'] = [i.strip() for i in doujinshi.info.tags.split(',')]
18     if doujinshi.info.artists:
19         metadata['artist'] = [i.strip() for i in doujinshi.info.artists.split(',')]
20     if doujinshi.info.groups:
21         metadata['group'] = [i.strip() for i in doujinshi.info.groups.split(',')]
22     if doujinshi.info.languages:
23         metadata['language'] = [i.strip() for i in doujinshi.info.languages.split(',')]
24     metadata['category'] = doujinshi.info.categories
25     metadata['URL'] = doujinshi.url
26     metadata['Pages'] = doujinshi.pages
27
28     with open(os.path.join(dir, 'metadata.json'), 'w') as f:
29         json.dump(metadata, f, separators=','':')
30
31
32 def serialize_comicxml(doujinshi, dir):
33     from iso8601 import parse_date
34     with open(os.path.join(dir, 'ComicInfo.xml'), 'w') as f:
35         f.write('<?xml version="1.0" encoding="utf-8"?>\n')
36         f.write('<ComicInfo xmlns:xsd="http://www.w3.org/2001/XMLSchema" '
37                 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">\n')
38
39         xml_write_simple_tag(f, 'Manga', 'Yes')
40
41         xml_write_simple_tag(f, 'Title', doujinshi.name)
42         xml_write_simple_tag(f, 'Summary', doujinshi.info.subtitle)
43         xml_write_simple_tag(f, 'PageCount', doujinshi.pages)
44         xml_write_simple_tag(f, 'URL', doujinshi.url)
45         xml_write_simple_tag(f, 'NhentaiId', doujinshi.id)
46         xml_write_simple_tag(f, 'Genre', doujinshi.info.categories)
47
48         xml_write_simple_tag(f, 'BlackAndWhite', 'No' if doujinshi.info.tags and 'full color' in doujinshi.info.tags else 'Yes')
49
50         if doujinshi.info.date:
51             dt = parse_date(doujinshi.info.date)
52             xml_write_simple_tag(f, 'Year', dt.year)
53             xml_write_simple_tag(f, 'Month', dt.month)
54             xml_write_simple_tag(f, 'Day', dt.day)
55         if doujinshi.info.parodies:
56             xml_write_simple_tag(f, 'Series', doujinshi.info.parodies)
57         if doujinshi.info.characters:
58             xml_write_simple_tag(f, 'Characters', doujinshi.info.characters)
59         if doujinshi.info.tags:
60             xml_write_simple_tag(f, 'Tags', doujinshi.info.tags)
61         if doujinshi.info.artists:
62             xml_write_simple_tag(f, 'Writer', ' & '.join([i.strip() for i in doujinshi.info.artists.split(',')]))
63         # if doujinshi.info.groups:
64         #     metadata['group'] = [i.strip() for i in doujinshi.info.groups.split(',')]
65         if doujinshi.info.languages:
66             languages = [i.strip() for i in doujinshi.info.languages.split(',')]
67             xml_write_simple_tag(f, 'Translated', 'Yes' if 'translated' in languages else 'No')
68             [xml_write_simple_tag(f, 'LanguageISO', LANGUAGEISO[i]) for i in languages \
69                 if (i != 'translated' and i in LANGUAGEISO)]
70
71         f.write('</ComicInfo>')
72
73
74 def xml_write_simple_tag(f, name, val, indent=1):
75     f.write('{}<{}>{}</{}>\n'.format(' ' * indent, name, escape(str(val)), name))
76
77
78 def merge_json():
79     lst = []
80     output_dir = "./"
81     os.chdir(output_dir)
82     doujinshi_dirs = next(os.walk('.'))[1]
83     for folder in doujinshi_dirs:
84         files = os.listdir(folder)
85         if 'metadata.json' not in files:
86             continue
87         data_folder = output_dir + folder + '/' + 'metadata.json'
88         json_file = open(data_folder, 'r')
89         json_dict = json.load(json_file)
90         json_dict['Folder'] = folder
91         lst.append(json_dict)
92     return lst
93
94
95 def serialize_unique(lst):
96     dictionary = {}
97     parody = []
98     character = []
99     tag = []
100     artist = []
101     group = []
102     for dic in lst:
103         if 'parody' in dic:
104             parody.extend([i for i in dic['parody']])
105         if 'character' in dic:
106             character.extend([i for i in dic['character']])
107         if 'tag' in dic:
108             tag.extend([i for i in dic['tag']])
109         if 'artist' in dic:
110             artist.extend([i for i in dic['artist']])
111         if 'group' in dic:
112             group.extend([i for i in dic['group']])
113     dictionary['parody'] = list(set(parody))
114     dictionary['character'] = list(set(character))
115     dictionary['tag'] = list(set(tag))
116     dictionary['artist'] = list(set(artist))
117     dictionary['group'] = list(set(group))
118     return dictionary
119
120
121 def set_js_database():
122     with open('data.js', 'w') as f:
123         indexed_json = merge_json()
124         unique_json = json.dumps(serialize_unique(indexed_json), separators=','':')
125         indexed_json = json.dumps(indexed_json, separators=','':')
126         f.write('var data = ' + indexed_json)
127         f.write(';\nvar tags = ' + unique_json)