3 # Copyright 2011-2013 The Rust Project Developers. See the COPYRIGHT
4 # file at the top-level directory of this distribution and at
5 # http://rust-lang.org/COPYRIGHT.
7 # Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
8 # http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
9 # <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
10 # option. This file may not be copied, modified, or distributed
11 # except according to those terms.
13 # This script uses the following Unicode tables:
14 # - DerivedCoreProperties.txt
15 # - EastAsianWidth.txt
20 # Since this should not require frequent updates, we just store this
21 # out-of-line and check the unicode.rs file into git.
23 import fileinput, re, os, sys, operator
25 preamble = '''// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
26 // file at the top-level directory of this distribution and at
27 // http://rust-lang.org/COPYRIGHT.
29 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
30 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
31 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
32 // option. This file may not be copied, modified, or distributed
33 // except according to those terms.
35 // NOTE: The following code was generated by "src/etc/unicode.py", do not edit directly
37 #![allow(missing_docs, non_upper_case_globals, non_snake_case)]
40 # Mapping taken from Table 12 from:
41 # http://www.unicode.org/reports/tr44/#General_Category_Values
42 expanded_categories = {
43 'Lu': ['LC', 'L'], 'Ll': ['LC', 'L'], 'Lt': ['LC', 'L'],
44 'Lm': ['L'], 'Lo': ['L'],
45 'Mn': ['M'], 'Mc': ['M'], 'Me': ['M'],
46 'Nd': ['N'], 'Nl': ['N'], 'No': ['No'],
47 'Pc': ['P'], 'Pd': ['P'], 'Ps': ['P'], 'Pe': ['P'],
48 'Pi': ['P'], 'Pf': ['P'], 'Po': ['P'],
49 'Sm': ['S'], 'Sc': ['S'], 'Sk': ['S'], 'So': ['S'],
50 'Zs': ['Z'], 'Zl': ['Z'], 'Zp': ['Z'],
51 'Cc': ['C'], 'Cf': ['C'], 'Cs': ['C'], 'Co': ['C'], 'Cn': ['C'],
55 # Grapheme cluster data
56 # taken from UAX29, http://www.unicode.org/reports/tr29/
57 # these code points are excluded from the Control category
58 # NOTE: CR and LF are also technically excluded, but for
59 # the sake of convenience we leave them in the Control group
60 # and manually check them in the appropriate place. This is
61 # still compliant with the implementation requirements.
62 grapheme_control_exceptions = set([0x200c, 0x200d])
64 # the Regional_Indicator category
65 grapheme_regional_indicator = [(0x1f1e6, 0x1f1ff)]
67 # "The following ... are specifically excluded" from the SpacingMark category
68 # http://www.unicode.org/reports/tr29/#SpacingMark
69 grapheme_spacingmark_exceptions = [(0x102b, 0x102c), (0x1038, 0x1038),
70 (0x1062, 0x1064), (0x1067, 0x106d), (0x1083, 0x1083), (0x1087, 0x108c),
71 (0x108f, 0x108f), (0x109a, 0x109c), (0x19b0, 0x19b4), (0x19b8, 0x19b9),
72 (0x19bb, 0x19c0), (0x19c8, 0x19c9), (0x1a61, 0x1a61), (0x1a63, 0x1a64),
73 (0xaa7b, 0xaa7b), (0xaa7d, 0xaa7d)]
75 # these are included in the SpacingMark category
76 grapheme_spacingmark_extra = set([0xe33, 0xeb3])
79 if not os.path.exists(f):
80 os.system("curl -O http://www.unicode.org/Public/UNIDATA/%s"
83 if not os.path.exists(f):
84 sys.stderr.write("cannot load %s" % f)
87 def is_valid_unicode(n):
88 return 0 <= n <= 0xD7FF or 0xE000 <= n <= 0x10FFFF
90 def load_unicode_data(f):
99 for line in fileinput.input(f):
100 fields = line.split(";")
101 if len(fields) != 15:
103 [code, name, gencat, combine, bidi,
104 decomp, deci, digit, num, mirror,
105 old, iso, upcase, lowcase, titlecase ] = fields
110 if not is_valid_unicode(code):
113 # generate char to char direct common and simple conversions
114 # uppercase to lowercase
115 if gencat == "Lu" and lowcase != "" and code_org != lowcase:
116 upperlower[code] = int(lowcase, 16)
118 # lowercase to uppercase
119 if gencat == "Ll" and upcase != "" and code_org != upcase:
120 lowerupper[code] = int(upcase, 16)
122 # store decomposition, if given
124 if decomp.startswith('<'):
126 for i in decomp.split()[1:]:
127 seq.append(int(i, 16))
128 compat_decomp[code] = seq
131 for i in decomp.split():
132 seq.append(int(i, 16))
133 canon_decomp[code] = seq
135 # place letter in categories as appropriate
136 for cat in [gencat, "Assigned"] + expanded_categories.get(gencat, []):
137 if cat not in gencats:
139 gencats[cat].append(code)
141 # record combining class, if any
143 if combine not in combines:
144 combines[combine] = []
145 combines[combine].append(code)
147 # generate Not_Assigned from Assigned
148 gencats["Cn"] = gen_unassigned(gencats["Assigned"])
149 # Assigned is not a real category
150 del(gencats["Assigned"])
151 # Other contains Not_Assigned
152 gencats["C"].extend(gencats["Cn"])
153 gencats = group_cats(gencats)
154 combines = to_combines(group_cats(combines))
156 return (canon_decomp, compat_decomp, gencats, combines, lowerupper, upperlower)
158 def group_cats(cats):
161 cats_out[cat] = group_cat(cats[cat])
166 letters = sorted(set(cat))
167 cur_start = letters.pop(0)
169 for letter in letters:
170 assert letter > cur_end, \
171 "cur_end: %s, letter: %s" % (hex(cur_end), hex(letter))
172 if letter == cur_end + 1:
175 cat_out.append((cur_start, cur_end))
176 cur_start = cur_end = letter
177 cat_out.append((cur_start, cur_end))
180 def ungroup_cat(cat):
188 def gen_unassigned(assigned):
189 assigned = set(assigned)
190 return ([i for i in range(0, 0xd800) if i not in assigned] +
191 [i for i in range(0xe000, 0x110000) if i not in assigned])
193 def to_combines(combs):
196 for (lo, hi) in combs[comb]:
197 combs_out.append((lo, hi, comb))
198 combs_out.sort(key=lambda comb: comb[0])
201 def format_table_content(f, content, indent):
204 for chunk in content.split(","):
205 if len(line) + len(chunk) < 98:
212 f.write(line + ",\n")
213 line = " "*indent + chunk
216 def load_properties(f, interestingprops):
219 re1 = re.compile("^([0-9A-F]+) +; (\w+)")
220 re2 = re.compile("^([0-9A-F]+)\.\.([0-9A-F]+) +; (\w+)")
222 for line in fileinput.input(f):
239 if interestingprops and prop not in interestingprops:
243 if prop not in props:
245 props[prop].append((d_lo, d_hi))
248 # load all widths of want_widths, except those in except_cats
249 def load_east_asian_width(want_widths, except_cats):
250 f = "EastAsianWidth.txt"
253 re1 = re.compile("^([0-9A-F]+);(\w+) +# (\w+)")
254 re2 = re.compile("^([0-9A-F]+)\.\.([0-9A-F]+);(\w+) +# (\w+)")
256 for line in fileinput.input(f):
276 if cat in except_cats or width not in want_widths:
280 if width not in widths:
282 widths[width].append((d_lo, d_hi))
287 return "'\\x%2.2x'" % c
289 return "'\\u%4.4x'" % c
290 return "'\\U%8.8x'" % c
292 def emit_bsearch_range_table(f):
294 fn bsearch_range_table(c: char, r: &'static [(char,char)]) -> bool {
295 use core::cmp::Ordering::{Equal, Less, Greater};
296 use core::slice::SlicePrelude;
297 r.binary_search(|&(lo,hi)| {
298 if lo <= c && c <= hi { Equal }
299 else if hi < c { Less }
305 def emit_table(f, name, t_data, t_type = "&'static [(char, char)]", is_pub=True,
306 pfun=lambda x: "(%s,%s)" % (escape_char(x[0]), escape_char(x[1]))):
310 f.write(" %sstatic %s: %s = &[\n" % (pub_string, name, t_type))
318 format_table_content(f, data, 8)
321 def emit_property_module(f, mod, tbl, emit_fn):
322 f.write("pub mod %s {\n" % mod)
326 emit_table(f, "%s_table" % cat, tbl[cat])
328 f.write(" pub fn %s(c: char) -> bool {\n" % cat)
329 f.write(" super::bsearch_range_table(c, %s_table)\n" % cat)
333 def emit_regex_module(f, cats, w_data):
334 f.write("pub mod regex {\n")
335 regex_class = "&'static [(char, char)]"
336 class_table = "&'static [(&'static str, &'static %s)]" % regex_class
338 emit_table(f, "UNICODE_CLASSES", cats, class_table,
339 pfun=lambda x: "(\"%s\",&super::%s::%s_table)" % (x[0], x[1], x[0]))
341 f.write(" pub static PERLD: &'static %s = &super::general_category::Nd_table;\n\n"
343 f.write(" pub static PERLS: &'static %s = &super::property::White_Space_table;\n\n"
346 emit_table(f, "PERLW", w_data, regex_class)
350 def emit_conversions_module(f, lowerupper, upperlower):
351 f.write("pub mod conversions {")
353 use core::cmp::Ordering::{Equal, Less, Greater};
354 use core::slice::SlicePrelude;
355 use core::tuple::Tuple2;
356 use core::option::Option;
357 use core::option::Option::{Some, None};
360 pub fn to_lower(c: char) -> char {
361 match bsearch_case_table(c, LuLl_table) {
363 Some(index) => LuLl_table[index].val1()
367 pub fn to_upper(c: char) -> char {
368 match bsearch_case_table(c, LlLu_table) {
370 Some(index) => LlLu_table[index].val1()
374 fn bsearch_case_table(c: char, table: &'static [(char, char)]) -> Option<uint> {
375 match table.binary_search(|&(key, _)| {
376 if c == key { Equal }
377 else if key < c { Less }
380 slice::Found(i) => Some(i),
381 slice::NotFound(_) => None,
386 emit_table(f, "LuLl_table",
387 sorted(upperlower.iteritems(), key=operator.itemgetter(0)), is_pub=False)
388 emit_table(f, "LlLu_table",
389 sorted(lowerupper.iteritems(), key=operator.itemgetter(0)), is_pub=False)
392 def emit_grapheme_module(f, grapheme_table, grapheme_cats):
393 f.write("""pub mod grapheme {
394 use core::slice::SlicePrelude;
395 pub use self::GraphemeCat::*;
398 #[allow(non_camel_case_types)]
400 pub enum GraphemeCat {
402 for cat in grapheme_cats + ["Any"]:
403 f.write(" GC_" + cat + ",\n")
406 fn bsearch_range_value_table(c: char, r: &'static [(char, char, GraphemeCat)]) -> GraphemeCat {
407 use core::cmp::Ordering::{Equal, Less, Greater};
408 match r.binary_search(|&(lo, hi, _)| {
409 if lo <= c && c <= hi { Equal }
410 else if hi < c { Less }
413 slice::Found(idx) => {
414 let (_, _, cat) = r[idx];
417 slice::NotFound(_) => GC_Any
421 pub fn grapheme_category(c: char) -> GraphemeCat {
422 bsearch_range_value_table(c, grapheme_cat_table)
427 emit_table(f, "grapheme_cat_table", grapheme_table, "&'static [(char, char, GraphemeCat)]",
428 pfun=lambda x: "(%s,%s,GC_%s)" % (escape_char(x[0]), escape_char(x[1]), x[2]),
432 def emit_charwidth_module(f, width_table):
433 f.write("pub mod charwidth {\n")
434 f.write(" use core::option::Option;\n")
435 f.write(" use core::option::Option::{Some, None};\n")
436 f.write(" use core::slice::SlicePrelude;\n")
437 f.write(" use core::slice;\n")
439 fn bsearch_range_value_table(c: char, is_cjk: bool, r: &'static [(char, char, u8, u8)]) -> u8 {
440 use core::cmp::Ordering::{Equal, Less, Greater};
441 match r.binary_search(|&(lo, hi, _, _)| {
442 if lo <= c && c <= hi { Equal }
443 else if hi < c { Less }
446 slice::Found(idx) => {
447 let (_, _, r_ncjk, r_cjk) = r[idx];
448 if is_cjk { r_cjk } else { r_ncjk }
450 slice::NotFound(_) => 1
456 pub fn width(c: char, is_cjk: bool) -> Option<uint> {
458 _c @ 0 => Some(0), // null is zero width
459 cu if cu < 0x20 => None, // control sequences have no width
460 cu if cu < 0x7F => Some(1), // ASCII
461 cu if cu < 0xA0 => None, // more control sequences
462 _ => Some(bsearch_range_value_table(c, is_cjk, charwidth_table) as uint)
468 f.write(" // character width table. Based on Markus Kuhn's free wcwidth() implementation,\n")
469 f.write(" // http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c\n")
470 emit_table(f, "charwidth_table", width_table, "&'static [(char, char, u8, u8)]", is_pub=False,
471 pfun=lambda x: "(%s,%s,%s,%s)" % (escape_char(x[0]), escape_char(x[1]), x[2], x[3]))
474 def emit_norm_module(f, canon, compat, combine, norm_props):
475 canon_keys = canon.keys()
478 compat_keys = compat.keys()
482 comp_exclusions = norm_props["Full_Composition_Exclusion"]
483 for char in canon_keys:
484 if True in map(lambda (lo, hi): lo <= char <= hi, comp_exclusions):
488 if not canon_comp.has_key(decomp[0]):
489 canon_comp[decomp[0]] = []
490 canon_comp[decomp[0]].append( (decomp[1], char) )
491 canon_comp_keys = canon_comp.keys()
492 canon_comp_keys.sort()
494 f.write("pub mod normalization {\n")
496 def mkdata_fun(table):
498 data = "(%s,&[" % escape_char(char)
500 for d in table[char]:
504 data += escape_char(d)
509 f.write(" // Canonical decompositions\n")
510 emit_table(f, "canonical_table", canon_keys, "&'static [(char, &'static [char])]",
511 pfun=mkdata_fun(canon))
513 f.write(" // Compatibility decompositions\n")
514 emit_table(f, "compatibility_table", compat_keys, "&'static [(char, &'static [char])]",
515 pfun=mkdata_fun(compat))
518 data = "(%s,&[" % escape_char(char)
519 canon_comp[char].sort(lambda x, y: x[0] - y[0])
521 for pair in canon_comp[char]:
525 data += "(%s,%s)" % (escape_char(pair[0]), escape_char(pair[1]))
529 f.write(" // Canonical compositions\n")
530 emit_table(f, "composition_table", canon_comp_keys,
531 "&'static [(char, &'static [(char, char)])]", pfun=comp_pfun)
534 fn bsearch_range_value_table(c: char, r: &'static [(char, char, u8)]) -> u8 {
535 use core::cmp::Ordering::{Equal, Less, Greater};
536 use core::slice::SlicePrelude;
538 match r.binary_search(|&(lo, hi, _)| {
539 if lo <= c && c <= hi { Equal }
540 else if hi < c { Less }
543 slice::Found(idx) => {
544 let (_, _, result) = r[idx];
547 slice::NotFound(_) => 0
552 emit_table(f, "combining_class_table", combine, "&'static [(char, char, u8)]", is_pub=False,
553 pfun=lambda x: "(%s,%s,%s)" % (escape_char(x[0]), escape_char(x[1]), x[2]))
555 f.write(" pub fn canonical_combining_class(c: char) -> u8 {\n"
556 + " bsearch_range_value_table(c, combining_class_table)\n"
564 def remove_from_wtable(wtable, val):
567 if wtable[0][1] < val:
568 wtable_out.append(wtable.pop(0))
569 elif wtable[0][0] > val:
572 (wt_lo, wt_hi, width, width_cjk) = wtable.pop(0)
573 if wt_lo == wt_hi == val:
576 wtable_out.append((wt_lo+1, wt_hi, width, width_cjk))
578 wtable_out.append((wt_lo, wt_hi-1, width, width_cjk))
580 wtable_out.append((wt_lo, val-1, width, width_cjk))
581 wtable_out.append((val+1, wt_hi, width, width_cjk))
583 wtable_out.extend(wtable)
588 def optimize_width_table(wtable):
590 w_this = wtable.pop(0)
592 if w_this[1] == wtable[0][0] - 1 and w_this[2:3] == wtable[0][2:3]:
593 w_tmp = wtable.pop(0)
594 w_this = (w_this[0], w_tmp[1], w_tmp[2], w_tmp[3])
596 wtable_out.append(w_this)
597 w_this = wtable.pop(0)
598 wtable_out.append(w_this)
601 if __name__ == "__main__":
603 if os.path.exists(r):
605 with open(r, "w") as rf:
606 # write the file's preamble
609 # download and parse all the data
611 with open("ReadMe.txt") as readme:
612 pattern = "for Version (\d+)\.(\d+)\.(\d+) of the Unicode"
613 unicode_version = re.search(pattern, readme.read()).groups()
615 /// The version of [Unicode](http://www.unicode.org/)
616 /// that the `UnicodeChar` and `UnicodeStrSlice` traits are based on.
617 pub const UNICODE_VERSION: (uint, uint, uint) = (%s, %s, %s);
618 """ % unicode_version)
619 (canon_decomp, compat_decomp, gencats, combines,
620 lowerupper, upperlower) = load_unicode_data("UnicodeData.txt")
621 want_derived = ["XID_Start", "XID_Continue", "Alphabetic", "Lowercase", "Uppercase"]
622 other_derived = ["Default_Ignorable_Code_Point", "Grapheme_Extend"]
623 derived = load_properties("DerivedCoreProperties.txt", want_derived + other_derived)
624 scripts = load_properties("Scripts.txt", [])
625 props = load_properties("PropList.txt",
626 ["White_Space", "Join_Control", "Noncharacter_Code_Point"])
627 norm_props = load_properties("DerivedNormalizationProps.txt",
628 ["Full_Composition_Exclusion"])
630 # grapheme cluster category from DerivedCoreProperties
631 # the rest are defined below
633 grapheme_cats["Extend"] = derived["Grapheme_Extend"]
634 del(derived["Grapheme_Extend"])
636 # bsearch_range_table is used in all the property modules below
637 emit_bsearch_range_table(rf)
639 # all of these categories will also be available as \p{} in libregex
641 for (name, cat, pfuns) in ("general_category", gencats, ["N", "Cc"]), \
642 ("derived_property", derived, want_derived), \
643 ("script", scripts, []), \
644 ("property", props, ["White_Space"]):
645 emit_property_module(rf, name, cat, pfuns)
646 allcats.extend(map(lambda x: (x, name), cat))
647 allcats.sort(key=lambda c: c[0])
649 # the \w regex corresponds to Alphabetic + Mark + Decimal_Number +
650 # Connector_Punctuation + Join-Control according to UTS#18
651 # http://www.unicode.org/reports/tr18/#Compatibility_Properties
653 for cat in derived["Alphabetic"], gencats["M"], gencats["Nd"], \
654 gencats["Pc"], props["Join_Control"]:
655 perl_words.extend(ungroup_cat(cat))
656 perl_words = group_cat(perl_words)
658 # emit lookup tables for \p{}, along with \d, \w, and \s for libregex
659 emit_regex_module(rf, allcats, perl_words)
661 # normalizations and conversions module
662 emit_norm_module(rf, canon_decomp, compat_decomp, combines, norm_props)
663 emit_conversions_module(rf, lowerupper, upperlower)
665 ### character width module
667 for zwcat in ["Me", "Mn", "Cf"]:
668 width_table.extend(map(lambda (lo, hi): (lo, hi, 0, 0), gencats[zwcat]))
669 width_table.append((4448, 4607, 0, 0))
671 # get widths, except those that are explicitly marked zero-width above
672 ea_widths = load_east_asian_width(["W", "F", "A"], ["Me", "Mn", "Cf"])
673 # these are doublewidth
674 for dwcat in ["W", "F"]:
675 width_table.extend(map(lambda (lo, hi): (lo, hi, 2, 2), ea_widths[dwcat]))
676 width_table.extend(map(lambda (lo, hi): (lo, hi, 1, 2), ea_widths["A"]))
678 width_table.sort(key=lambda w: w[0])
680 # soft hyphen is not zero width in preformatted text; it's used to indicate
681 # a hyphen inserted to facilitate a linebreak.
682 width_table = remove_from_wtable(width_table, 173)
684 # optimize the width table by collapsing adjacent entities when possible
685 width_table = optimize_width_table(width_table)
686 emit_charwidth_module(rf, width_table)
688 ### grapheme cluster module
689 # from http://www.unicode.org/reports/tr29/#Grapheme_Cluster_Break_Property_Values
690 # Hangul syllable categories
691 want_hangul = ["L", "V", "T", "LV", "LVT"]
692 grapheme_cats.update(load_properties("HangulSyllableType.txt", want_hangul))
695 # This category also includes Cs (surrogate codepoints), but Rust's `char`s are
696 # Unicode Scalar Values only, and surrogates are thus invalid `char`s.
697 grapheme_cats["Control"] = set()
698 for cat in ["Zl", "Zp", "Cc", "Cf"]:
699 grapheme_cats["Control"] |= set(ungroup_cat(gencats[cat]))
700 grapheme_cats["Control"] = group_cat(list(
701 grapheme_cats["Control"]
702 - grapheme_control_exceptions
703 | (set(ungroup_cat(gencats["Cn"]))
704 & set(ungroup_cat(derived["Default_Ignorable_Code_Point"])))))
707 grapheme_cats["RegionalIndicator"] = grapheme_regional_indicator
709 # Prepend - "Currently there are no characters with this value"
710 # (from UAX#29, Unicode 7.0)
713 grapheme_cats["SpacingMark"] = group_cat(list(
714 set(ungroup_cat(gencats["Mc"]))
715 - set(ungroup_cat(grapheme_cats["Extend"]))
716 | grapheme_spacingmark_extra
717 - set(ungroup_cat(grapheme_spacingmark_exceptions))))
720 for cat in grapheme_cats:
721 grapheme_table.extend([(x, y, cat) for (x, y) in grapheme_cats[cat]])
722 grapheme_table.sort(key=lambda w: w[0])
723 emit_grapheme_module(rf, grapheme_table, grapheme_cats.keys())