--- /dev/null
+// Low-level wrapper around the AniDB HTTP API.
+// Only implements the 'anime' and 'categorylist' requests.
+//
+// This wrapper does not implement caching. The API requires
+// aggressive caching.
+//
+// http://wiki.anidb.info/w/HTTP_API_Definition
+package httpapi
+
+import (
+ "encoding/xml"
+ "fmt"
+ "log"
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+const (
+ AniDBImageBaseURL = "http://img7.anidb.net/pics/anime/" // Base URL for the various Pictures in the response
+
+ DateFormat = "2006-01-02" // Use to convert the various YYYY-MM-DD timestamps to a time.Time.
+)
+
+const (
+ aniDBHTTPAPIBaseURL = "http://api.anidb.net:9001/httpapi"
+ aniDBProtoVer = 1
+ clientStr = "goanidbhttp"
+ clientVer = 1
+)
+
+// Requests information about the given Anime ID.
+func GetAnime(AID int) (a Anime, err error) {
+ if res, err := doRequest("anime", reqMap{"aid": AID}); err != nil {
+ return a, err
+ } else {
+ dec := xml.NewDecoder(res.Body)
+ err = dec.Decode(&a)
+ res.Body.Close()
+
+ a.Error = strings.TrimSpace(a.Error)
+
+ return a, err
+ }
+}
+
+type reqMap map[string]interface{}
+
+func doRequest(request string, reqMap reqMap) (*http.Response, error) {
+ v := url.Values{}
+ v.Set("protover", fmt.Sprint(aniDBProtoVer))
+ v.Set("client", clientStr)
+ v.Set("clientver", fmt.Sprint(clientVer))
+ v.Set("request", request)
+
+ for k, val := range reqMap {
+ v.Add(k, fmt.Sprint(val))
+ }
+
+ u, _ := url.Parse(aniDBHTTPAPIBaseURL)
+ u.RawQuery = v.Encode()
+ log.Println(u.String())
+ return http.Get(u.String())
+}
+
+// Title with language and type identifier.
+//
+// Title with Lang = ja, Type = official is the official Kanji title.
+//
+// Title with Lang = x-jat, Type = main is the romanized version, also known in other APIs as the Primary Title.
+type AnimeTitle struct {
+ Lang string `xml:"lang,attr"` // Language in ISO-ish format
+ Type string `xml:"type,attr"` // "official", "short", etc
+ Title string `xml:",chardata"`
+}
+
+type RelatedAnime struct {
+ ID int `xml:"id,attr"` // AID of the related anime
+ Type string `xml:"type,attr"` // "prequel", "sequel", etc
+ Title string `xml:",chardata"` // Primary title of the related anime
+}
+
+type SimilarAnime struct {
+ ID int `xml:"id,attr"` // AID of the similar anime
+ Approval int `xml:"approval,attr"` // How many users have approved of this connection
+ Total int `xml:"total,attr"` // Total of votes in this connection
+ Title string `xml:",chardata"` // Primary title of the recommended anime
+}
+
+type Recommendation struct {
+ Type string `xml:"type,attr"` // "Recommended", "Must See", etc
+ ID int `xml:"uid,attr"` // User ID of the recommending user
+ Text string `xml:",chardata"` // Text of the user's recommendation
+}
+
+type Creator struct {
+ ID int `xml:"id,attr"` // Creator ID
+ Type string `xml:"type,attr"`
+ Name string `xml:",chardata"` // Always romaji
+}
+
+// Separate from regular Rating because the XML structure is different.
+type AnimeRating struct {
+ Count int `xml:"count,attr"` // Amount of votes/reviews
+ Rating float32 `xml:",chardata"` // Average
+}
+
+type AnimeRatings struct {
+ Permanent AnimeRating `xml:"permanent"` // Votes from people who watched everything
+ Temporary AnimeRating `xml:"temporary"` // Votes from people who are still watching it
+ Review AnimeRating `xml:"review"` // Votes from reviews
+}
+
+type Category struct {
+ ID int `xml:"id,attr"` // Category ID
+ ParentID int `xml:"parentid,attr"` // ID of the parent category
+ R18 bool `xml:"hentai,attr"` // Whether the category represents porn works or not
+ Weight int `xml:"weight,attr"` // Weight of the category for this anime
+
+ Name string `xml:"name"` // Category name
+ Description string `xml:"description"` // Category description
+}
+
+// Completely undocumented.
+// Most entries just have one or two numbers as Identifiers.
+//
+// Type 4 appears to have the official URL in .URL[0]
+//
+// Type 7 appears to have the official name in .Identifiers[0]
+type Resource struct {
+ Type int `xml:"type,attr"`
+ Identifiers []string `xml:"externalentity>identifier"`
+ URL []string `xml:"externalentity>url"`
+}
+
+type Tag struct {
+ ID int `xml:"id,attr"` // Tag ID
+ Approval int `xml:"approval,attr"` // How many users have approved of the tag
+ Spoiler bool `xml:"localspoiler,attr"` // undocumented
+ GlobalSpoiler bool `xml:"globalspoiler,attr"` // undocumented
+ Updated string `xml:"update,attr"` // YYYY-MM-DD
+
+ Name string `xml:"name"` // Tag name
+ Count int `xml:"count"` // undocumented
+}
+
+type Seiyuu struct {
+ ID int `xml:"id,attr"` // Creator ID
+ Name string `xml:",chardata"` // Always romaji
+ Picture string `xml:"picture,attr"` // Picture basename; combine with AniDBImageBaseURL for full URL
+}
+
+type Character struct {
+ ID int `xml:"id,attr"` // Character ID
+ Type string `xml:"type,attr"` // "main character in", "secondary cast in", "appears in"
+ Updated string `xml:"update,attr"` // YYYY-MM-DD
+
+ Rating Rating `xml:"rating"`
+ Name string `xml:"name"` // Always romaji
+ Gender string `xml:"gender"` // "male", "female", "unknown", sometimes blank
+ Description string `xml:"description"`
+ CharacterType string `xml:"charactertype"` // "Character", "Organization", "Vessel", etc
+ Episodes string `xml:"episodes"` // List of episodes where character appears
+ Picture string `xml:"picture"` // Picture basename; combine with AniDBImageBaseURL for full URL
+
+ Seiyuu *Seiyuu `xml:"seiyuu"` // The voice actor, if present
+}
+
+type Characters []Character // Implements sort.Interface; groups by Type and sorts by Name
+
+type EpisodeTitle struct {
+ Lang string `xml:"lang,attr"`
+ Title string `xml:",chardata"`
+}
+
+type Rating struct {
+ Votes int `xml:"votes,attr"`
+ Rating float32 `xml:",chardata"`
+}
+
+type EpNo struct {
+ Type int `xml:"type,attr"` // 1 for regular episodes, 2 for specials, etc
+ EpNo string `xml:",chardata"` // Not necessarily a plain integer; may be prefixed by a single letter indicating the Type
+}
+
+type Episode struct {
+ ID int `xml:"id,attr"` // Episode ID
+ Updated string `xml:"update,attr"` // YYYY-MM-DD
+
+ EpNo EpNo `xml:"epno"`
+ Length int `xml:"length"` // Length in minutes (rounding method undocumented)
+ AirDate string `xml:"airdate"` // YYYY-MM-DD
+ Rating Rating `xml:"rating"`
+ Titles []EpisodeTitle `xml:"title"`
+}
+
+type Episodes []Episode // Implements sort.Interface; groups by EpNo.Type, orders by the integer portion of EpNo.EpNo
+
+type Anime struct {
+ Error string `xml:",chardata"` // API request encountered an error if this is not ""
+
+ ID int `xml:"id,attr"` // AID of the anime
+ R18 bool `xml:"restricted,attr"` // Whether the anime is considered porn
+
+ Type string `xml:"type"` // "TV Series", "Movie", "OVA", etc
+ EpisodeCount int `xml:"episodecount"` // Unreliable, has a set value even when the total number is unknown
+ StartDate string `xml:"startdate"` // YYYY-MM-DD
+ EndDate string `xml:"enddate"` // YYYY-MM-DD
+
+ Titles []AnimeTitle `xml:"titles>title"`
+ RelatedAnime []RelatedAnime `xml:"relatedanime>anime"`
+ SimilarAnime []SimilarAnime `xml:"similaranime>anime"`
+
+ Recommendations []Recommendation `xml:"recommendations>recommendation"`
+
+ URL string `xml:"url"` // Official URL
+
+ Creators []Creator `xml:"creators>name"`
+
+ Description string `xml:"description"`
+
+ Ratings AnimeRatings `xml:"ratings"`
+
+ Picture string `xml:"picture"` // Picture basename; combine with AniDBImageBaseURL for full URL
+
+ Categories []Category `xml:"categories>category"` // Unsorted
+ Resources []Resource `xml:"resources>resource"` // undocumented
+ Tags []Tag `xml:"tags>tag"` // Unsorted
+ Characters Characters `xml:"characters>character"` // Unsorted
+ Episodes Episodes `xml:"episodes>episode"` // Unsorted
+}
--- /dev/null
+package httpapi
+
+import (
+ "encoding/xml"
+ "strings"
+)
+
+// Separate from regular Category because of different XML structure.
+type CLCategory struct {
+ ID int `xml:"id,attr"` // Category ID
+ ParentID int `xml:"parentid,attr"` // ID of the parent category
+ R18 bool `xml:"ishentai,attr"` // Whether the category is associated with porn or not
+
+ Name string `xml:"name"` // Category name
+ Description string `xml:"description"` // Category description
+}
+
+type CategoryList struct {
+ Error string `xml:",chardata"`
+ Categories []CLCategory `xml:"category"`
+}
+
+func GetCategoryList() (cl CategoryList, err error) {
+ if res, err := doRequest("categorylist", reqMap{}); err != nil {
+ return cl, err
+ } else {
+ dec := xml.NewDecoder(res.Body)
+ err = dec.Decode(&cl)
+ res.Body.Close()
+
+ cl.Error = strings.TrimSpace(cl.Error)
+
+ return cl, err
+ }
+}
--- /dev/null
+package httpapi
+
+import (
+ "sort"
+ "strconv"
+)
+
+func (es Episodes) Len() int {
+ return len(es)
+}
+
+func (es Episodes) Less(i, j int) bool {
+ if es[i].EpNo.Type == es[j].EpNo.Type {
+ if es[i].EpNo.Type == 1 {
+ a, _ := strconv.ParseInt(es[i].EpNo.EpNo, 10, 32)
+ b, _ := strconv.ParseInt(es[j].EpNo.EpNo, 10, 32)
+ return a < b
+ } else {
+ a, _ := strconv.ParseInt(es[i].EpNo.EpNo[1:], 10, 32)
+ b, _ := strconv.ParseInt(es[j].EpNo.EpNo[1:], 10, 32)
+ return a < b
+ }
+ }
+ return es[i].EpNo.Type < es[j].EpNo.Type
+}
+
+func (es Episodes) Swap(i, j int) {
+ es[i], es[j] = es[j], es[i]
+}
+
+func (cs Characters) Len() int {
+ return len(cs)
+}
+
+func (cs Characters) Less(i, j int) bool {
+ if cs[i].Type == cs[j].Type {
+ return sort.StringSlice{cs[i].Name, cs[j].Name}.Less(0, 1)
+ }
+
+ a := 0
+ switch cs[i].Type {
+ case "main character in":
+ a = 0
+ case "secondary cast in":
+ a = 1
+ case "appears in":
+ a = 2
+ default:
+ a = 3
+ }
+
+ b := 0
+ switch cs[j].Type {
+ case "main character in":
+ b = 0
+ case "secondary cast in":
+ b = 1
+ case "appears in":
+ b = 2
+ default:
+ b = 3
+ }
+
+ return a < b
+}
+
+func (cs Characters) Swap(i, j int) {
+ cs[i], cs[j] = cs[j], cs[i]
+}
--- /dev/null
+package titles
+
+import (
+ "sort"
+)
+
+type ResultSet map[int]Anime
+type Results []Anime
+
+func (res Results) AIDList() (aid []int) {
+ aid = make([]int, 0, len(res))
+ for _, r := range res {
+ aid = append(aid, r.AID)
+ }
+ return
+}
+
+func (matches SearchMatches) ToResultSet(db *TitlesDatabase) (rs ResultSet) {
+ if matches == nil {
+ return nil
+ }
+ db.RLock()
+ defer db.RUnlock()
+
+ rs = ResultSet{}
+ for _, m := range matches {
+ rs[m.AID] = *db.AnimeMap[m.AID]
+ }
+ return
+}
+
+func (rs ResultSet) unsortedResults() (res Results) {
+ res = make(Results, 0, len(rs))
+ for _, r := range rs {
+ res = append(res, r)
+ }
+ return
+}
+
+// Returns true if the first parameter is less than the second parameter
+type ResultComparer func(*Anime, *Anime) bool
+
+var (
+ aidSort = func(a *Anime, b *Anime) bool {
+ return a.AID < b.AID
+ }
+ titleSort = func(a *Anime, b *Anime) bool {
+ return sort.StringSlice{a.PrimaryTitle, b.PrimaryTitle}.Less(0, 1)
+ }
+)
+
+func (rs ResultSet) ResultsByAID() (res Results) {
+ return rs.ResultsByFunc(aidSort)
+}
+
+func (rs ResultSet) ReverseResultsByAID() (res Results) {
+ return rs.ReverseResultsByFunc(aidSort)
+}
+
+func (rs ResultSet) ResultsByPrimaryTitle() (res Results) {
+ return rs.ResultsByFunc(titleSort)
+}
+
+func (rs ResultSet) ReverseResultsByPrimaryTitle() (res Results) {
+ return rs.ReverseResultsByFunc(titleSort)
+}
+
+func (rs ResultSet) ResultsByFunc(f ResultComparer) (res Results) {
+ res = rs.unsortedResults()
+ f.Sort(res)
+ return
+}
+
+func (rs ResultSet) ReverseResultsByFunc(f ResultComparer) (res Results) {
+ res = rs.unsortedResults()
+ f.Sort(res)
+ return
+}
--- /dev/null
+package titles
+
+// import "sync"
+
+type ResultFilter func(*Anime) bool
+
+type TitleComparer func(string) bool
+
+func (rs ResultSet) FilterByTitles(cmp TitleComparer) ResultSet {
+ return rs.Filter(
+ func(a *Anime) bool {
+ if cmp(a.PrimaryTitle) {
+ return true
+ }
+
+ for _, m := range []map[string][]Name{
+ a.OfficialNames, a.ShortNames, a.Synonyms,
+ } {
+ for _, names := range m {
+ for _, name := range names {
+ if cmp(name.Title) {
+ return true
+ }
+ }
+ }
+ }
+ return false
+ })
+}
+
+func (rs ResultSet) Filter(filter ResultFilter) ResultSet {
+ ret := ResultSet{}
+ for _, a := range rs {
+ if filter(&a) {
+ ret[a.AID] = a
+ }
+ }
+
+ return ret
+}
--- /dev/null
+package titles
+
+import (
+ "regexp"
+ "strings"
+)
+
+func (db *TitlesDatabase) ExactSearchN(s string, n int) (matches SearchMatches) {
+ return db.doSearchN(func(k string) bool { return s == k }, n)
+}
+
+func (db *TitlesDatabase) ExactSearchAll(s string) (matches SearchMatches) {
+ return db.ExactSearchN(s, -1)
+}
+
+func (db *TitlesDatabase) ExactSearch(s string) (m SearchMatch) {
+ return firstMatch(db.ExactSearchN(s, 1))
+}
+
+func (db *TitlesDatabase) ExactSearchFoldN(s string, n int) (matches SearchMatches) {
+ return db.doSearchN(func(k string) bool { return strings.EqualFold(k, s) }, n)
+}
+
+func (db *TitlesDatabase) ExactSearchFoldAll(s string) (matches SearchMatches) {
+ return db.ExactSearchFoldN(s, -1)
+}
+
+func (db *TitlesDatabase) ExactSearchFold(s string) (m SearchMatch) {
+ return firstMatch(db.ExactSearchFoldN(s, 1))
+}
+
+func (db *TitlesDatabase) RegexpSearchN(re *regexp.Regexp, n int) (matches SearchMatches) {
+ return db.doSearchN(func(k string) bool { return re.MatchString(k) }, n)
+}
+
+func (db *TitlesDatabase) RegexpSearchAll(re *regexp.Regexp) (matches SearchMatches) {
+ return db.RegexpSearchN(re, -1)
+}
+
+func (db *TitlesDatabase) RegexpSearch(re *regexp.Regexp) (m SearchMatch) {
+ return firstMatch(db.RegexpSearchN(re, 1))
+}
+
+func (db *TitlesDatabase) PrefixSearchN(s string, n int) (matches SearchMatches) {
+ return db.doSearchN(func(k string) bool { return strings.HasPrefix(k, s) }, n)
+}
+
+func (db *TitlesDatabase) PrefixSearchAll(s string) (matches SearchMatches) {
+ return db.PrefixSearchN(s, -1)
+}
+
+func (db *TitlesDatabase) PrefixSearch(s string) (m SearchMatch) {
+ return firstMatch(db.PrefixSearchN(s, 1))
+}
+
+func (db *TitlesDatabase) SuffixSearchN(s string, n int) (matches SearchMatches) {
+ return db.doSearchN(func(k string) bool { return strings.HasSuffix(k, s) }, n)
+}
+
+func (db *TitlesDatabase) SuffixSearchAll(s string) (matches SearchMatches) {
+ return db.SuffixSearchN(s, -1)
+}
+
+func (db *TitlesDatabase) SuffixSearch(s string) (m SearchMatch) {
+ return firstMatch(db.SuffixSearchN(s, 1))
+}
+
+func (db *TitlesDatabase) PrefixSearchFoldN(s string, n int) (matches SearchMatches) {
+ s = strings.ToLower(s)
+ return db.doSearchN(func(k string) bool { return strings.HasPrefix(strings.ToLower(k), s) }, n)
+}
+
+func (db *TitlesDatabase) PrefixSearchFoldAll(s string) (matches SearchMatches) {
+ return db.PrefixSearchFoldN(s, -1)
+}
+
+func (db *TitlesDatabase) PrefixSearchFold(s string) (m SearchMatch) {
+ return firstMatch(db.PrefixSearchFoldN(s, 1))
+}
+
+func (db *TitlesDatabase) SuffixSearchFoldN(s string, n int) (matches SearchMatches) {
+ s = strings.ToLower(s)
+ return db.doSearchN(func(k string) bool { return strings.HasSuffix(strings.ToLower(k), s) }, n)
+}
+
+func (db *TitlesDatabase) SuffixSearchFoldAll(s string) (matches SearchMatches) {
+ return db.SuffixSearchFoldN(s, -1)
+}
+
+func (db *TitlesDatabase) SuffixSearchFold(s string) (m SearchMatch) {
+ return firstMatch(db.SuffixSearchFoldN(s, 1))
+}
+
+// \b doesn't consider the boundary between e.g. '.' and ' ' in ". "
+// to be a word boundary, but . may be significant in a title
+const wordBound = ` `
+
+func (db *TitlesDatabase) FuzzySearch(s string) (rs ResultSet) {
+ // whole title
+ if matches := db.ExactSearchAll(s); len(matches) > 0 {
+ // log.Printf("best case: %q", s)
+ return matches.ToResultSet(db)
+ }
+
+ // all regexes are guaranteed to compile:
+ // the user-supplied token already went through regexp.QuoteMeta
+ // all other tokens are hardcoded, so a compilation failure is reason for panic
+
+ words := strings.Fields(regexp.QuoteMeta(s))
+ q := strings.Join(words, `.*`)
+
+ candidates := db.RegexpSearchAll(regexp.MustCompile(q)).ToResultSet(db)
+ if len(candidates) == 0 {
+ // log.Printf("no results: %q", s)
+ return nil
+ }
+ q = strings.Join(words, ` `)
+
+ // initial words (prefix, but ending at word boundary)
+ re := regexp.MustCompile(`\A` + q + wordBound)
+ reCmp := func(k string) bool { return re.MatchString(k) }
+ if rs = candidates.FilterByTitles(reCmp); len(rs) > 0 {
+ // log.Printf("1st case: %q", s)
+ return
+ }
+
+ // final words (suffix, but starting at a word boundary)
+ re = regexp.MustCompile(wordBound + q + `\z`)
+ if rs = candidates.FilterByTitles(reCmp); len(rs) > 0 {
+ // log.Printf("2nd case: %q", s)
+ return
+ }
+
+ // infix words
+ re = regexp.MustCompile(wordBound + q + wordBound)
+ if rs = candidates.FilterByTitles(reCmp); len(rs) > 0 {
+ // log.Printf("3rd case: %q", s)
+ return
+ }
+
+ // initial substring
+ if rs = candidates.FilterByTitles(
+ func(k string) bool {
+ return strings.HasPrefix(k, s)
+ }); len(rs) > 0 {
+ // log.Printf("4th case: %q", s)
+ return
+ }
+
+ // terminal substring
+ if rs = candidates.FilterByTitles(
+ func(k string) bool {
+ return strings.HasSuffix(k, s)
+ }); len(rs) > 0 {
+ // log.Printf("5th case: %q", s)
+ return
+ }
+
+ // words in that order, but with possible words between them...
+ q = strings.Join(words, ` +(?:[^ ]+ +)*`)
+
+ // ... initial ...
+ re = regexp.MustCompile(`\A` + q + wordBound)
+ if rs = candidates.FilterByTitles(reCmp); len(rs) > 0 {
+ // log.Printf("6th case: %q", s)
+ return
+ }
+
+ // ... then final ...
+ re = regexp.MustCompile(wordBound + q + `\z`)
+ if rs = candidates.FilterByTitles(reCmp); len(rs) > 0 {
+ // log.Printf("7th case: %q", s)
+ return
+ }
+
+ // ... then anywhere
+ re = regexp.MustCompile(wordBound + q + wordBound)
+ if rs = candidates.FilterByTitles(reCmp); len(rs) > 0 {
+ // log.Printf("8th case: %q", s)
+ return
+ }
+
+ // then it's that, but with any or no characters between the input words...
+ q = strings.Join(words, `.*`)
+
+ // and the same priority order as for the substring case
+ // initial
+ re = regexp.MustCompile(`\A` + q)
+ if rs = candidates.FilterByTitles(reCmp); len(rs) > 0 {
+ // log.Printf("9th case: %q", s)
+ return
+ }
+
+ // final
+ re = regexp.MustCompile(q + `\z`)
+ if rs = candidates.FilterByTitles(reCmp); len(rs) > 0 {
+ // log.Printf("10th case: %q", s)
+ return
+ }
+
+ // no result better than the inital candidates
+ // log.Printf("worst case: %q", s)
+ return candidates
+}
+
+func (db *TitlesDatabase) FuzzySearchFold(s string) (rs ResultSet) {
+ // whole title
+ if matches := db.ExactSearchFoldAll(s); len(matches) > 0 {
+ return matches.ToResultSet(db)
+ }
+
+ words := strings.Fields(`(?i:` + regexp.QuoteMeta(s) + `)`)
+ q := strings.Join(words, `.*`)
+
+ candidates := db.RegexpSearchAll(regexp.MustCompile(q)).ToResultSet(db)
+ if len(candidates) == 0 {
+ // log.Printf("no results: %q", s)
+ return nil
+ }
+ q = strings.Join(words, `\s+`)
+
+ // initial words (prefix, but ending at word boundary)
+ re := regexp.MustCompile(`\A` + q + wordBound)
+ reCmp := func(k string) bool { return re.MatchString(k) }
+ if rs = candidates.FilterByTitles(reCmp); len(rs) > 0 {
+ // log.Printf("1st case: %q", s)
+ return
+ }
+
+ // final words (suffix, but starting at a word boundary)
+ re = regexp.MustCompile(wordBound + q + `\z`)
+ if rs = candidates.FilterByTitles(reCmp); len(rs) > 0 {
+ // log.Printf("2nd case: %q", s)
+ return
+ }
+
+ // infix words
+ re = regexp.MustCompile(wordBound + q + wordBound)
+ if rs = candidates.FilterByTitles(reCmp); len(rs) > 0 {
+ // log.Printf("3rd case: %q", s)
+ return
+ }
+
+ // initial substring
+ ls := strings.ToLower(s)
+ if rs = candidates.FilterByTitles(
+ func(k string) bool {
+ return strings.HasPrefix(strings.ToLower(k), ls)
+ }); len(rs) > 0 {
+ // log.Printf("4th case: %q", s)
+ return
+ }
+
+ // terminal substring
+ if rs = candidates.FilterByTitles(
+ func(k string) bool {
+ return strings.HasSuffix(strings.ToLower(k), ls)
+ }); len(rs) > 0 {
+ // log.Printf("5th case: %q", s)
+ return
+ }
+
+ // words in that order, but with possible words between them...
+ q = strings.Join(words, `\s+(?:\S+\s+)*`)
+
+ // ... initial ...
+ re = regexp.MustCompile(`\A` + q + wordBound)
+ if rs = candidates.FilterByTitles(reCmp); len(rs) > 0 {
+ // log.Printf("6th case: %q", s)
+ return
+ }
+
+ // ... then final ...
+ re = regexp.MustCompile(wordBound + q + `\z`)
+ if rs = candidates.FilterByTitles(reCmp); len(rs) > 0 {
+ // log.Printf("7th case: %q", s)
+ return
+ }
+
+ // ... then anywhere
+ re = regexp.MustCompile(wordBound + q + wordBound)
+ if rs = candidates.FilterByTitles(reCmp); len(rs) > 0 {
+ // log.Printf("8th case: %q", s)
+ return
+ }
+
+ // then it's that, but with any or no characters between the input words...
+ q = strings.Join(words, `.*`)
+
+ // and the same priority order as for the substring case
+ // initial
+ re = regexp.MustCompile(`\A` + q)
+ if rs = candidates.FilterByTitles(reCmp); len(rs) > 0 {
+ // log.Printf("9th case: %q", s)
+ return
+ }
+
+ // final
+ re = regexp.MustCompile(q + `\z`)
+ if rs = candidates.FilterByTitles(reCmp); len(rs) > 0 {
+ // log.Printf("10th case: %q", s)
+ return
+ }
+
+ // no result better than the inital candidates
+ // log.Printf("worst case: %q", s)
+ return candidates
+}
--- /dev/null
+package titles_test
+
+import (
+ "fmt"
+ "github.com/Kovensky/go-anidb/titles"
+ "os"
+ "testing"
+)
+
+var db = &titles.TitlesDatabase{}
+
+func init() {
+ if fh, err := os.Open("anime-titles.dat.gz"); err != nil {
+ if fh, err = os.Open("anime-titles.dat"); err != nil {
+ panic(err)
+ }
+
+ db.LoadDB(fh)
+ }
+}
+
+type TestVector struct {
+ Input string
+ Limit int
+ AIDs []int
+}
+
+func TestFuzzySearch(T *testing.T) {
+ // Each vector goes one step deeper in the fuzzy search stack
+ vec := []TestVector{
+ // no match
+ TestVector{Input: "\x00", Limit: -1, AIDs: []int{}},
+ // exact
+ TestVector{Input: "SAC2", Limit: 1, AIDs: []int{1176}},
+ // exact, but in hungarian!
+ TestVector{Input: "Varázslatos álmok", Limit: -1, AIDs: []int{235}},
+ // prefix words
+ TestVector{Input: "Varázslatos", Limit: 3, AIDs: []int{235, 2152, 2538}},
+ // suffix words
+ TestVector{Input: "A rózsa ígérete", Limit: -1, AIDs: []int{2152}},
+ // infix words
+ TestVector{Input: "Stand Alone", Limit: 1, AIDs: []int{247}},
+ // prefix
+ TestVector{Input: "Ghost in t", Limit: 1, AIDs: []int{61}},
+ // suffix
+ TestVector{Input: "flowne", Limit: 1, AIDs: []int{184}},
+ // words, first word first in name
+ TestVector{Input: "Kumo Mukou", Limit: -1, AIDs: []int{469}},
+ // words, last word last in name
+ TestVector{Input: "A titka", Limit: 1, AIDs: []int{303}},
+ // words, infix but not contiguous
+ TestVector{Input: "Kidoutai 2nd", Limit: 1, AIDs: []int{1176}},
+ // strings, first string first in name
+ TestVector{Input: "Kouka Kidou", Limit: 1, AIDs: []int{61}},
+ // strings, last string last in name
+ TestVector{Input: "app Princess", Limit: 1, AIDs: []int{640}},
+ // strings, anywhere in this order
+ TestVector{Input: "ouka douta", Limit: 2, AIDs: []int{61, 247}},
+ // match everything
+ TestVector{Input: "", Limit: 1, AIDs: []int{1}},
+ }
+
+ for i, v := range vec {
+ res := db.FuzzySearch(v.Input).ResultsByAID()
+ if v.Limit > 0 && len(res) > v.Limit {
+ res = res[:v.Limit]
+ }
+
+ wrong := false
+ if len(v.AIDs) != len(res) {
+ wrong = true
+ } else {
+ for j, r := range res {
+ if v.AIDs[j] != r.AID {
+ wrong = true
+ }
+ }
+ }
+
+ if wrong {
+ list := make([]string, 0, len(res))
+ for _, r := range res {
+ list = append(list, fmt.Sprintf("%d (%s)", r.AID, r.PrimaryTitle))
+ }
+ T.Errorf("Vector #%d: Expected AID list %v, got AID list %v", i+1, v.AIDs, list)
+ }
+ }
+}
+
+func TestFuzzySearchFold(T *testing.T) {
+ // Same vector as the previous one, but with disturbed word cases
+ vec := []TestVector{
+ // exact
+ TestVector{Input: "sac2", Limit: 1, AIDs: []int{1176}},
+ // exact, but in hungarian!
+ TestVector{Input: "VarÁzslatos Álmok", Limit: -1, AIDs: []int{235}},
+ // prefix words
+ TestVector{Input: "varázslatos", Limit: 3, AIDs: []int{235, 2152, 2538}},
+ // suffix words
+ TestVector{Input: "a rÓzsa ígérete", Limit: -1, AIDs: []int{2152}},
+ // infix words
+ TestVector{Input: "Stand Alone", Limit: 1, AIDs: []int{247}},
+ // prefix
+ TestVector{Input: "ghost in t", Limit: 1, AIDs: []int{61}},
+ // suffix
+ TestVector{Input: "FlownE", Limit: 1, AIDs: []int{184}},
+ // words, first word first in name
+ TestVector{Input: "kumo mukou", Limit: -1, AIDs: []int{469}},
+ // words, last word last in name
+ TestVector{Input: "a titka", Limit: -1, AIDs: []int{303}},
+ // words, infix but not contiguous
+ TestVector{Input: "kidoutai 2nd", Limit: 1, AIDs: []int{1176}},
+ // strings, first string first in name
+ TestVector{Input: "Kouka kidou", Limit: 1, AIDs: []int{61}},
+ // strings, last string last in name
+ TestVector{Input: "app princess", Limit: 1, AIDs: []int{640}},
+ // strings, anywhere in this order
+ TestVector{Input: "Ouka Douta", Limit: 2, AIDs: []int{61, 247}},
+ // no match
+ TestVector{Input: "\x00", Limit: -1, AIDs: []int{}},
+ }
+
+ for i, v := range vec {
+ res := db.FuzzySearchFold(v.Input).ResultsByAID()
+ if v.Limit > 0 && len(res) > v.Limit {
+ res = res[:v.Limit]
+ }
+
+ wrong := false
+ if len(v.AIDs) != len(res) {
+ wrong = true
+ } else {
+ for j, r := range res {
+ if v.AIDs[j] != r.AID {
+ wrong = true
+ }
+ }
+ }
+
+ if wrong {
+ list := make([]string, 0, len(res))
+ for _, r := range res {
+ list = append(list, fmt.Sprintf("%d (%s)", r.AID, r.PrimaryTitle))
+ }
+ T.Errorf("Vector #%d: Expected AID list %v, got AID list %v", i+1, v.AIDs, list)
+ }
+ }
+}
+
+// exact match of primary title
+func BenchmarkFuzzySearch_bestCase(B *testing.B) {
+ // grep '|1|' anime-titles.dat | cut -d'|' -f4 | sort -R | sed 's/\(.*\)/"\1",/' | \
+ // head -n 30
+ vec := []string{
+ "Shin Tennis no Ouji-sama", "Shimai Ningyou", "Aniyome",
+ "Dragon Ball Z: Kyokugen Battle!! Sandai Super Saiyajin", "Uchuu Kuubo Blue Noah",
+ "Hotaru no Haka", "First Kiss Story: Kiss Kara Hajimaru Monogatari", "Seikai no Senki III",
+ "Ikkitousen: Xtreme Xecutor", "Houkago Ren`ai Club: Koi no Etude",
+ "DNA2: Dokoka de Nakushita Aitsu no Aitsu (1995)", "Bamboo Blade", "Accelerando",
+ "Soukyuu no Fafner: Dead Aggressor", "Eiga Futari wa Precure Max Heart",
+ "Kyoufu no Kyou-chan", "Shin Taketori Monogatari: 1000-nen Joou", "Fresh Precure!",
+ "Grope: Yami no Naka no Kotori-tachi", "Seitokai Yakuindomo", "Chikyuu Shoujo Arjuna",
+ "Choukou Tenshi Escalayer", "Dragon Ball Kai", "Dragon League", "Hatsukoi Limited",
+ "Sexfriend", "Ao no Exorcist", "Futatsu no Spica", "Adesugata Mahou no Sannin Musume",
+ "Yawara! A Fashionable Judo Girl",
+ }
+
+ B.ResetTimer()
+ for i := 0; i < B.N; i++ {
+ db.FuzzySearch(vec[i%len(vec)])
+ }
+}
+
+// // exact match of x-jat, en or ja non-primary title
+// func BenchmarkFuzzySearch_secondBestCase(B *testing.B) {
+// // grep -E '\|3\|(x-jat|en|ja)\|' anime-titles.dat | cut -d'|' -f4 | sort -R | \
+// // sed 's/\(.*\)/"\1",/' | head -n 30
+// vec := []string{
+// "yosusora", "heartcatch", "chuunibyou", "Stringendo", "おれいも", "yamato 2199",
+// "mai otome zwei", "cg r1", "harem", "Dorvack", "Natsume 1", "SMJA", "SM", "J2",
+// "amstv2", "BJ Movie (2005)", "munto2", "nyc", "MT", "DBZ Movie 2",
+// "Zatch Bell Movie 2", "Armitage", "J0ker", "CH", "sugar", "vga", "Nadesico",
+// "dgc nyo", "setv", "D.g", "マジプリ", "myyour", "Haruhi 2009", "bantorra", "yamato2",
+// "bakuhan", "vk2", "BBB", "5-2", "GSD SE III", "akasaka", "GS SE II", "F3", "おれつば",
+// "sencolle", "wellber", "SailorMoon", "ay", "HCPC", "kxstv", "Shana III",
+// }
+
+// B.ResetTimer()
+// for i := 0; i < B.N; i++ {
+// db.FuzzySearch(vec[i%len(vec)])
+// }
+// }
+
+// // exact match of non-primary title in any other language
+// func BenchmarkFuzzySearch_thirdBestCase(B *testing.B) {
+// // grep '|2|' anime-titles.dat | grep -Ev '(x-jat|en|ja)' | cut -d'|' -f4 | \
+// // sort -R | sed 's/\(.*\)/"\1",/' | head -n 30
+// vec := []string{
+// "Зірка☆Щастя", "La ilusión de triunfar", "La scomparsa di Haruhi Suzumiya",
+// "Код Геас: Бунтът на Люлюш 2", "我的女神 剧场版", "Lamu - Un rêve sans fin",
+// "Lupin III: La cospirazione dei Fuma", "Адовая Девочка дубль 2", "夏娃的时间",
+// "Дівчинка, що стрибала крізь всесвіт", "Мій сусід Тоторо", "机巧魔神",
+// "City Hunter - Flash spécial !? La mort de Ryo Saeba", "Ateştopu", "مسدس×سيف",
+// "Gli amici animali", "沉默的未知", "忧伤大人二之宫", "Пита-Тен", "Глава-гора", "高校龍中龍",
+// "Яблочное зернышко (фильм второй)", "پروکسی مابعد", "青之花", "Heidi, la fille des Alpes",
+// "银盘万花筒", "Temi d`amore tra i banchi di scuola", "Съюзът на Среброкрилите", "Аякаши",
+// "Дух в оболонці: комплекс окремості", "贫乏姊妹物语", "La rose de Versailles",
+// "แฮปปี้ เลสซั่น", "Juodasis Dievas", "Ерата Сенгоку: Последното парти",
+// "Белина: Чезнеща в тъмнината", "Пламенный лабиринт", "Капризный Робот", "Kovboy Bebop: Film",
+// "Bavel`in Kitabı", "东京魔人学院剑风帖 龙龙", "سكول رمبل الفصل الثاني", "青之驱魔师", "سايكانو",
+// "神的记事本", "死神的歌谣", "Angel e a Flor de Sete Cores", "ماگی: هزارتوی جادو", "Spirală",
+// "Chié la petite peste",
+// }
+
+// B.ResetTimer()
+// for i := 0; i < B.N; i++ {
+// db.FuzzySearch(vec[i%len(vec)])
+// }
+// }
+
+// match of initial words
+func BenchmarkFuzzySearch_initialWords(B *testing.B) {
+ // cat anime-titles.dat | cut -d'|' -f4 | grep -E '[^ ]+ [^ ]+ [^ ]+' | \
+ // sort -R | cut -d' ' -f1,2 | sed 's/\(.*\)/"\1",/' | head -n 30
+ vec := []string{
+ "To Love", "Utawarerumono -", "Eden of", "D.C.if ~ダ・カーポ", "Вечност над",
+ "Rupan Sansei:", "Los Caballeros", "Neko Hiki", "LoGH: A", "Arcadia of",
+ "Pokémon 4Ever:", "Lenda Lunar", "Transformers: Master", "Tάρο, ο", "El Puño",
+ "El taxi", "Lupin the", "Ah! My", "Le journal", "Odin: Koushi", "Amazing-man: The",
+ "Legend of", "Youka no", "Я люблю", "Abe George", "Sisters of", "Ouran High",
+ "Batman: Gotham", "Dantalian no", "Koi to", "Night Shift",
+ }
+
+ B.ResetTimer()
+ for i := 0; i < B.N; i++ {
+ db.FuzzySearch(vec[i%len(vec)])
+ }
+}
+
+// match of final words
+func BenchmarkFuzzySearch_finalWords(B *testing.B) {
+ // cat anime-titles.dat | cut -d'|' -f4 | grep -E '^[^ ]+ [^ ]+ [^ ]+ [^ ]+$' | \
+ // sort -R | cut -d' ' -f3,4 | sed 's/\(.*\)/"\1",/' | head -n 30
+ vec := []string{
+ "do Zodíaco", "Formula 91", "Shuto Houkai", "Deadly Sins", "gui lai",
+ "muistoja tulevaisuudesta", "Mission 1-3", "スペシャルエディションII それぞれの剣", "Một Giây",
+ "Meia-Lua Acima", "Mighty: Decode", "To Screw", "do Tênis", "(Duke Fleed)", "Olympic Taikai",
+ "Драма ангелов", "Shihosha Judge", "демонов Йоко", "Shoujo Club", "Family (2)", "do Tesouro",
+ "Witte Leeuw", "von Mandraguar", "Jin Xia", "Tabi Movie", "Symphonia 2", "no Tenkousei",
+ "Movie (2011)", "Guardian Signs", "Você 2",
+ }
+
+ B.ResetTimer()
+ for i := 0; i < B.N; i++ {
+ db.FuzzySearch(vec[i%len(vec)])
+ }
+}
+
+// XXX: This is somehow the most time-consuming case, despite terminating several
+// regular expressions earlier than the next two benchmarks.
+//
+// All regular expressions checked here (besides the .*-peppered one for initial condidate search)
+// have no metacharacters at all besides the trivial \A and \z; while the ones for the following
+// cases include more complicated grouped expressions...
+func BenchmarkFuzzySearch_infixWords(B *testing.B) {
+ // cat anime-titles.dat | cut -d'|' -f4 | grep -E '^[^ ]+ [^ ]+ [^ ]+ [^ ]+$' | \
+ // sort -R | cut -d' ' -f2,3 | sed 's/\(.*\)/"\1",/' | head -n 30
+ vec := []string{
+ "Yes! プリキュア5GoGo!", "Grime X-Rated", "Diễn Ngàn", "Super-Refined Ninja",
+ "o Haita", "Conan: 14.", "the Seagulls", "009 Kaijuu", "Monogatari Daini-hen:",
+ "no Haha", "по Ловец", "Centimeters per", "wang gui", "the Wandering", "Saru Kani",
+ "Dark Red", "Pair: Project", "Охотник на", "trois petits", "of Teacher", "wa Suitai",
+ "Lolita Fantasy", "εκατοστά το", "Eri-sama Katsudou", "希望の学園と絶望の高校生 The",
+ "Comet SPT", "HUNTER スペシャル", "no Makemono", "Kızı: İkinci", "Pirate Captain",
+ }
+
+ B.ResetTimer()
+ for i := 0; i < B.N; i++ {
+ db.FuzzySearch(vec[i%len(vec)])
+ }
+}
+
+func BenchmarkFuzzySearch_alternatingWords(B *testing.B) {
+ // cat anime-titles.dat | cut -d'|' -f4 | grep -E '^[^ ]+ [^ ]+ [^ ]+ [^ ]+ [^ ]+$' | \
+ // sort -R | cut -d' ' -f2,4 | sed 's/\(.*\)/"\1",/' | head -n 30
+ vec := []string{
+ "of Millennium", "Kreuz: und", "для Літнє", "Saikyou Deshi", "Hearts: no", "Roh Wolf",
+ "III: Columbus", "Shin-chan Film", "Ball Superandroid", "恋のステージ=HEART FIRE!",
+ "Disease Moon", "Corps Mecha", "BLOOD-C Last", "- trésor", "Lover a", "dievčati, preskočilo",
+ "Star: Szomorú", "Ai Marchen", "Kishin &", "Seiya: Goddess", "Orange Shiroi", "Punch Sekai:",
+ "No.1: no", "ο του", "プリキュアオールスターズ Stage", "Ankoku Hakai", "8-ма по", "II Ultimate",
+ "Tenma Kuro", "Grade Kakusei",
+ }
+
+ B.ResetTimer()
+ for i := 0; i < B.N; i++ {
+ db.FuzzySearch(vec[i%len(vec)])
+ }
+}
+
+func BenchmarkFuzzySearch_worstCase(B *testing.B) {
+ // cat anime-titles.dat | cut -d'|' -f4 | \
+ // perl -MEncode \
+ // -pe'chomp; $_ = encode_utf8(substr(decode_utf8($_), 1, -1) . "\n")' | \
+ // sort -R | sed 's/\(.*\)/"\1",/' | head -n 30
+ // further perturbed by hand
+ vec := []string{
+ "ig ray S in han: Den tsu o Yob Amig",
+ "ar Ben th Sea: 20.00 Mil for Lov",
+ "eminin Famil",
+ "界の断",
+ "凹内かっぱまつ",
+ "ゅーぶら!",
+ "unog",
+ "aji no ppo: pion Roa",
+ "etect boy ma",
+ "aruto Movi",
+ "光のピア ユメミと銀 バラ騎士",
+ "ki ru Sh j",
+ "aint : Ο Χαμέ μβάς - Μυθολογία Άδ",
+ "as Camarer s Mágica",
+ "oll Be Foreve",
+ "RAG BALL SODE of BAR",
+ "ero eroppi no ken: Pink no",
+ "acre east chin Cyg",
+ "ister Princes",
+ "PRINTS IN SAND",
+ "е й хазяї",
+ "quent in Dra",
+ "inoc chio Bouke",
+ "rm Libra : Banto",
+ "2 sk sbrutna pojkar äventyrens",
+ "タス",
+ "last kinė Mažyl",
+ "女チャングム 夢 第二",
+ "錬金術師 嘆きの丘 の聖なる",
+ "hou Rouge Lip"}
+
+ B.ResetTimer()
+ for i := 0; i < B.N; i++ {
+ db.FuzzySearch(vec[i%len(vec)])
+ }
+}
--- /dev/null
+package titles
+
+import (
+ "sync"
+)
+
+type SearchMatch struct {
+ Matched string
+ AID int
+}
+
+type SearchMatches []SearchMatch
+
+func searchFunc(wg *sync.WaitGroup, ret chan SearchMatch, t *TitleMap, cmp func(string) bool) {
+ defer wg.Done()
+
+ for _, m := range []map[string]int{t.ShortMap, t.OfficialMap, t.SynonymMap} {
+ for k, v := range m {
+ if cmp(k) {
+ ret <- SearchMatch{Matched: k, AID: v}
+ }
+ }
+ }
+}
+
+func (db *TitlesDatabase) multiSearch(cmp func(string) bool) (matches chan SearchMatch) {
+ db.RLock()
+
+ matches = make(chan SearchMatch, 100)
+
+ go func() {
+ defer db.RUnlock()
+
+ match := make(chan SearchMatch, 100)
+
+ for k, v := range db.PrimaryMap {
+ if cmp(k) {
+ matches <- SearchMatch{Matched: k, AID: v}
+ }
+ }
+
+ wg := &sync.WaitGroup{}
+
+ for _, a := range db.LanguageMap {
+ wg.Add(1)
+ go searchFunc(wg, match, a, cmp)
+ }
+ go func() { wg.Wait(); close(match) }()
+
+ for m := range match {
+ matches <- m
+ }
+ close(matches)
+ }()
+ return matches
+}
+
+func (db *TitlesDatabase) doSearchN(cmp func(string) bool, n int) (matches SearchMatches) {
+ if n == 0 {
+ return nil
+ }
+
+ ch := db.multiSearch(cmp)
+ if n > 0 {
+ matches = make(SearchMatches, 0, n)
+ for m := range ch {
+ matches = append(matches, m)
+ if len(matches) == n {
+ go func() {
+ for _ = range ch {
+ // drain channel
+ }
+ }()
+ return matches[:n]
+ }
+ }
+ } else {
+ for m := range ch {
+ matches = append(matches, m)
+ }
+ }
+ return
+}
+
+func firstMatch(matches SearchMatches) (m SearchMatch) {
+ if len(matches) > 0 {
+ m = matches[0]
+ }
+ return
+}
+
+func (db *TitlesDatabase) doSearch1(cmp func(string) bool) (m SearchMatch) {
+ return firstMatch(db.doSearchN(cmp, 1))
+}
--- /dev/null
+package titles
+
+import (
+ "sort"
+)
+
+func (cmp ResultComparer) Sort(res Results) {
+ sorter := &resultSorter{
+ res: res,
+ by: cmp,
+ }
+ sort.Sort(sorter)
+}
+
+type resultSorter struct {
+ by ResultComparer
+ res Results
+}
+
+func (f *resultSorter) Len() int {
+ return len(f.res)
+}
+
+func (f *resultSorter) Less(i, j int) bool {
+ return f.by(&f.res[i], &f.res[j])
+}
+
+func (f *resultSorter) Swap(i, j int) {
+ f.res[i], f.res[j] = f.res[j], f.res[i]
+}
--- /dev/null
+package titles
+
+import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
+ "encoding/json"
+ "io"
+ "io/ioutil"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+var _ = json.Decoder{}
+
+const (
+ DataDumpURL = "http://anidb.net/api/anime-titles.dat.gz"
+)
+
+type Name struct {
+ Language string
+ Title string
+}
+
+type Anime struct {
+ AID int
+ PrimaryTitle string
+
+ OfficialNames map[string][]Name
+ Synonyms map[string][]Name
+ ShortNames map[string][]Name
+}
+
+type TitleMap struct {
+ Language string
+
+ OfficialMap map[string]int
+ SynonymMap map[string]int
+ ShortMap map[string]int
+}
+
+type TitlesDatabase struct {
+ sync.RWMutex
+ UpdateTime time.Time
+ Languages []string
+
+ LanguageMap map[string]*TitleMap
+ PrimaryMap map[string]int
+
+ AnimeMap map[int]*Anime
+}
+
+var createdRegexp = regexp.MustCompile(`^# created: (.*)$`)
+
+func (db *TitlesDatabase) LoadDB(r io.ReadCloser) {
+ db.Lock()
+ defer db.Unlock()
+
+ all, _ := ioutil.ReadAll(r)
+ r.Close()
+
+ var rd io.Reader
+ if gz, err := gzip.NewReader(bytes.NewReader(all)); err == nil {
+ defer gz.Close()
+ rd = gz
+ } else {
+ rd = bytes.NewReader(all)
+ }
+ sc := bufio.NewScanner(rd)
+
+ if db.PrimaryMap == nil {
+ db.PrimaryMap = map[string]int{}
+ }
+ if db.LanguageMap == nil {
+ db.LanguageMap = map[string]*TitleMap{}
+ }
+ if db.AnimeMap == nil {
+ db.AnimeMap = map[int]*Anime{}
+ }
+
+ allLangs := map[string]struct{}{}
+ for sc.Scan() {
+ s := sc.Text()
+
+ if s[0] == '#' {
+ cr := createdRegexp.FindStringSubmatch(s)
+
+ if len(cr) > 1 && cr[1] != "" {
+ db.UpdateTime, _ = time.Parse(time.ANSIC, cr[1])
+ }
+ continue
+ }
+
+ parts := strings.Split(s, "|")
+ if len(parts) < 4 {
+ continue
+ }
+
+ aid, _ := strconv.ParseInt(parts[0], 10, 32)
+ typ, _ := strconv.ParseInt(parts[1], 10, 8)
+
+ if _, ok := db.AnimeMap[int(aid)]; !ok {
+ db.AnimeMap[int(aid)] = &Anime{
+ AID: int(aid),
+ OfficialNames: map[string][]Name{},
+ Synonyms: map[string][]Name{},
+ ShortNames: map[string][]Name{},
+ }
+ }
+
+ lang, title := parts[2], parts[3]
+ allLangs[lang] = struct{}{}
+
+ switch typ {
+ case 1: // primary
+ db.PrimaryMap[title] = int(aid)
+
+ db.AnimeMap[int(aid)].PrimaryTitle = strings.Replace(title, "`", "'", -1)
+ case 2: // synonym
+ lm, ok := db.LanguageMap[lang]
+ if !ok {
+ lm = db.makeLangMap(lang)
+ }
+ lm.SynonymMap[title] = int(aid)
+
+ db.AnimeMap[int(aid)].Synonyms[lang] = append(db.AnimeMap[int(aid)].Synonyms[lang],
+ Name{Language: lang, Title: strings.Replace(title, "`", "'", -1)})
+ case 3: // short
+ lm, ok := db.LanguageMap[lang]
+ if !ok {
+ lm = db.makeLangMap(lang)
+ }
+ lm.ShortMap[title] = int(aid)
+
+ db.AnimeMap[int(aid)].ShortNames[lang] = append(db.AnimeMap[int(aid)].Synonyms[lang],
+ Name{Language: lang, Title: strings.Replace(title, "`", "'", -1)})
+ case 4: // official
+ lm, ok := db.LanguageMap[lang]
+ if !ok {
+ lm = db.makeLangMap(lang)
+ }
+ lm.OfficialMap[title] = int(aid)
+
+ db.AnimeMap[int(aid)].OfficialNames[lang] = append(db.AnimeMap[int(aid)].Synonyms[lang],
+ Name{Language: lang, Title: strings.Replace(title, "`", "'", -1)})
+ }
+ }
+ langs := make([]string, 0, len(allLangs))
+ for k, _ := range allLangs {
+ langs = append(langs, k)
+ }
+ sort.Strings(langs)
+ db.Languages = langs
+}
+
+func (db *TitlesDatabase) makeLangMap(lang string) *TitleMap {
+ tm := &TitleMap{
+ Language: lang,
+ OfficialMap: map[string]int{},
+ SynonymMap: map[string]int{},
+ ShortMap: map[string]int{},
+ }
+ db.LanguageMap[lang] = tm
+ return tm
+}
--- /dev/null
+package udp
+
+import (
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/md5"
+)
+
+// Yes, AniDB works in ECB mode
+type ecbState struct {
+ udpKey string
+ aes cipher.Block
+}
+
+func newECBState(udpKey string, salt []byte) *ecbState {
+ ecb := &ecbState{udpKey: udpKey}
+ ecb.Init(salt)
+ return ecb
+}
+
+func (ecb *ecbState) Init(salt []byte) {
+ h := md5.New()
+ h.Write([]byte(ecb.udpKey))
+ h.Write(salt)
+
+ key := h.Sum(nil)
+
+ ecb.aes, _ = aes.NewCipher(key)
+}
+
+func (ecb *ecbState) BlockSize() int {
+ return aes.BlockSize
+}
+
+func (ecb *ecbState) Encrypt(p []byte) (c []byte) {
+ if ecb == nil {
+ return p
+ }
+
+ padded := pkcs7Pad(p, aes.BlockSize)
+ c = make([]byte, 0, len(padded))
+
+ for i := 0; i < len(padded); i += aes.BlockSize {
+ ecb.aes.Encrypt(c[i:i+aes.BlockSize], padded[i:i+aes.BlockSize])
+ }
+ return c
+}
+
+func (ecb *ecbState) Decrypt(c []byte) (p []byte) {
+ if ecb == nil {
+ return c
+ }
+
+ for i := 0; i < len(c); i += ecb.aes.BlockSize() {
+ ecb.aes.Decrypt(p[i:], c[i:])
+ }
+ return pkcs7Unpad(p, aes.BlockSize)
+}
+
+// examples for a blocksize of 4
+// "almost1\x1"
+// "bytes\x3\x3\x3"
+// "byte\x4\x4\x4\x4"
+func pkcs7Pad(b []byte, blockSize byte) (padded []byte) {
+ ps := int(blockSize) - len(b)%int(blockSize)
+ padded = make([]byte, 0, len(b)+ps)
+ padded = append(padded, b...)
+
+ for i := 0; i < ps; i++ {
+ padded = append(padded, byte(ps))
+ }
+ return padded
+}
+
+func pkcs7Unpad(b []byte, blockSize byte) (unpadded []byte) {
+ ps := b[len(b)-1]
+ if ps > blockSize {
+ return b
+ }
+ padding := b[len(b)-int(ps):]
+ for _, pb := range padding {
+ if pb != ps {
+ return b
+ }
+ }
+ return b[:len(b)-int(ps)]
+}
--- /dev/null
+package udp
+
+import (
+ "testing"
+)
+
+func TestPKCS7(T *testing.T) {
+ blockSize := byte(4)
+ vec := [][2]string{
+ [2]string{"testing", "testing\x01"},
+ [2]string{"byte", "byte\x04\x04\x04\x04"},
+ [2]string{"stuff", "stuff\x03\x03\x03"},
+ }
+
+ for i, v := range vec {
+ p := string(pkcs7Pad([]byte(v[0]), blockSize))
+ if p != v[1] {
+ T.Errorf("Vector #%d: expected %q, got %q", i, v[1], p)
+ }
+ u := string(pkcs7Unpad([]byte(p), blockSize))
+ if u != v[0] {
+ T.Errorf("Vector #%d: expected %q, got %q", i, v[0], u)
+ }
+ }
+}
--- /dev/null
+package udp
+
+// Authenticates the supplied user with the supplied password. Blocks until we have a reply.
+// Needed before almost any other API command can be used.
+//
+// If the udpKey is not "", then the connection will be encrypted, but the protocol's
+// encryption uses the VERY weak ECB mode.
+//
+// http://wiki.anidb.net/w/UDP_API_Definition#AUTH:_Authing_to_the_AnimeDB
+//
+// http://wiki.anidb.net/w/UDP_API_Definition#ENCRYPT:_Start_Encrypted_Session
+func (a *AniDBUDP) Auth(user, password, udpKey string) (err error) {
+ if a.session != "" {
+ if err = (<-a.Uptime()).Error(); err == nil {
+ return nil
+ }
+ }
+
+ a.session = ""
+ if udpkey != "" {
+ if err = a.encrypt(user, udpKey); err != nil {
+ return err
+ }
+ }
+ r := <-a.SendRecv("AUTH", paramMap{
+ "user": user,
+ "pass": password,
+ "protover": 3,
+ "client": "goanidbudp",
+ "clientver": 1,
+ "nat": 1,
+ "comp": 1,
+ "enc": "UTF-8",
+ })
+ switch r.Code() {
+ case 200, 201:
+ f := strings.Fields(r.Text())
+ a.session = f[0]
+ }
+ return r.Error()
+}
+
+// Ends the API session. Blocks until we have confirmation.
+//
+// http://wiki.anidb.net/w/UDP_API_Definition#LOGOUT:_Logout
+func (a *AniDBUDP) Logout() (err error) {
+ r := <-a.SendRecv("LOGOUT", paramMap{})
+ a.session = ""
+ return r.Error()
+}
+
+func (a *AniDBUDP) encrypt(user, udpKey string) (err error) {
+ if reply := <-a.SendRecv("ENCRYPT", paramMap{"user": user, "type": 1}); reply.Error() != nil {
+ return reply.Error()
+ } else {
+ switch reply.Code() {
+ case 209:
+ salt := []byte(strings.Fields(reply.Text())[0])
+
+ // Yes, AniDB works in ECB mode
+ a.ecb = newECBState(udpKey, salt)
+ }
+ return reply.Error()
+ }
+}
--- /dev/null
+// Low-level AniDB UDP API client library
+//
+// Implements the commands essential to setting up and tearing down an API connection,
+// as well as an asynchronous layer. Throttles sends internally according to API spec.
+//
+// This library doesn't implement caching; beware since aggressive caching is an
+// implementation requirement. Not doing so can get you banned.
+package udp
+
+import (
+ "bytes"
+ "compress/zlib"
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/md5"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+)
+
+const (
+ AniDBUDPServer = "api.anidb.net"
+ AniDBUDPPort = 9000
+)
+
+type AniDBUDP struct {
+ KeepAliveInterval time.Duration // Interval between keep-alive packets; only sent when PUSH notifications are enabled (default: 20 minutes)
+ Timeout time.Duration // The time to wait before a packet is considered lost (default: 45 seconds)
+ Notifications chan APIReply // Channel where PUSH notifications are sent to
+
+ session string
+
+ conn *net.UDPConn
+ ecb *ecbState
+
+ counter uint16
+ ctrLock sync.Mutex
+
+ tagRouter map[string]chan APIReply
+ routerLock sync.RWMutex
+
+ sendCh chan packet
+
+ breakRecv chan bool
+ breakSend chan bool
+
+ // notifyState *notifyState
+ pingTimer *time.Timer
+}
+
+// Creates and initializes the AniDBUDP struct
+func NewAniDBUDP() *AniDBUDP {
+ c := &AniDBUDP{
+ KeepAliveInterval: 20 * time.Minute,
+ Timeout: 45 * time.Second,
+ Notifications: make(chan APIReply, 5),
+ tagRouter: make(map[string]chan APIReply),
+ }
+ return c
+}
+
+// Key-value list of parameters.
+type ParamMap map[string]interface{}
+
+// Returns a query-like string representation of the ParamMap
+func (m ParamMap) String() string {
+ keys := make([]string, 0, len(m))
+ for k, _ := range m {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ parts := make([]string, 0, len(m))
+ for _, k := range keys {
+ parts = append(parts, strings.Join([]string{k, fmt.Sprint((m)[k])}, "="))
+ }
+ return strings.Join(parts, "&")
+}
+
+// Sends the requested query to the AniDB UDP API server.
+//
+// Returns a channel through which the eventual reply is sent.
+//
+// See http://wiki.anidb.net/w/UDP_API_Definition for the defined commands.
+func (a *AniDBUDP) SendRecv(command string, args ParamMap) <-chan APIReply {
+ a.ctrLock.Lock()
+ tag := fmt.Sprintf("T%d", a.counter)
+ a.counter++
+ a.ctrLock.Unlock()
+
+ args["tag"] = tag
+ if a.session != "" {
+ args["s"] = a.session
+ }
+
+ if err := a.dial(); err != nil {
+ ch <- newErrorWrapper(err)
+ close(ch)
+ return ch
+ }
+
+ ch := make(chan APIReply, 1)
+
+ a.routerLock.Lock()
+ a.tagRouter[tag] = ch
+ a.routerLock.Unlock()
+
+ reply := make(chan APIReply, 1)
+ go func() {
+ <-a.send(command, args)
+ timeout := time.After(a.Timeout)
+
+ select {
+ case <-timeout:
+ a.routerLock.Lock()
+ delete(a.tagRouter, tag)
+ a.routerLock.Unlock()
+ close(ch)
+
+ reply <- TimeoutError
+ close(reply)
+
+ log.Println("!!! Timeout")
+ case r := <-ch:
+ a.routerLock.Lock()
+ delete(a.tagRouter, tag)
+ a.routerLock.Unlock()
+ close(ch)
+
+ reply <- r
+ close(reply)
+ }
+ }()
+ return reply
+}
+
+var laddr, _ = net.ResolveUDPAddr("udp4", "0.0.0.0:0")
+
+func (a *AniDBUDP) dial() (err error) {
+ if a.conn != nil {
+ return nil
+ }
+
+ srv := fmt.Sprintf("%s:%d", AniDBUDPServer, AniDBUDPPort)
+ if raddr, err := net.ResolveUDPAddr("udp4", srv); err != nil {
+ return err
+ } else {
+ a.conn, err = net.DialUDP("udp4", laddr, raddr)
+
+ if a.breakSend != nil {
+ a.breakSend <- true
+ <-a.breakSend
+ } else {
+ a.breakSend = make(chan bool)
+ }
+ a.sendCh = make(chan packet, 10)
+ go a.sendLoop()
+
+ if a.breakRecv != nil {
+ a.breakRecv <- true
+ <-a.breakRecv
+ } else {
+ a.breakRecv = make(chan bool)
+ }
+ go a.recvLoop()
+ }
+ return err
+}
+
+func (a *AniDBUDP) send(command string, args ParamMap) chan bool {
+ str := command
+ arg := args.String()
+ if len(arg) > 0 {
+ str = strings.Join([]string{command, arg}, " ")
+ }
+ log.Println(">>>", str)
+
+ p := makePacket([]byte(str), a.ecb)
+
+ sendPacket(p, a.sendCh)
+}
+
+type packet struct {
+ b []byte
+ err error
+ sent chan bool
+}
+
+func (a *AniDBUDP) sendLoop() {
+ for {
+ select {
+ case <-a.breakSend:
+ a.breakSend <- true
+ return
+ case pkt := <-a.sendCh:
+ a.conn.Write(pkt.b)
+
+ // send twice: once for confirming with the queue,
+ // again for timeout calculations
+ for i := 0; i < 2; i++ {
+ pkt.sent <- true
+ }
+ }
+ }
+}
+
+func (a *AniDBUDP) recvLoop() {
+ pkt := make(chan packet, 1)
+ brk := make(chan bool)
+ go func() {
+ for {
+ select {
+ case <-brk:
+ brk <- true
+ return
+ default:
+ b, err := getPacket(a.conn, a.ecb)
+ pkt <- packet{b: b, err: err}
+ }
+ }
+ }()
+
+ var pingTimer <-chan time.Time
+
+ for {
+ if a.pingTimer != nil {
+ pingTimer = a.pingTimer.C
+ }
+
+ select {
+ case <-a.breakRecv:
+ brk <- true
+ <-brk
+ a.breakRecv <- true
+ return
+ case <-pingTimer:
+ go func() {
+ if a.KeepAliveInterval >= 30*time.Minute {
+ if (<-a.Uptime()).Error() != nil {
+ return
+ }
+ } else if (<-a.Ping()).Error() != nil {
+ return
+ }
+ a.pingTimer.Reset(a.KeepAliveInterval)
+ }()
+ case p := <-pkt:
+ b, err := p.b, p.err
+
+ if err != nil && err != io.EOF && err != zlib.ErrChecksum {
+ // can UDP recv even raise other errors?
+ panic("UDP recv: " + err.Error())
+ }
+
+ if r := newGenericReply(b); r != nil {
+ if a.pingTimer != nil {
+ a.pingTimer.Reset(a.KeepAliveInterval)
+ }
+
+ if err == zlib.ErrChecksum {
+ r.truncated = true
+ }
+
+ a.routerLock.RLock()
+ if ch, ok := a.tagRouter[r.Tag()]; ok {
+
+ log.Println("<<<", string(b))
+ ch <- r
+ } else {
+ c := r.Code()
+ if c >= 720 && c < 799 {
+ // notices that need PUSHACK
+ id := strings.Fields(r.Text())[0]
+ a.send("PUSHACK", paramMap{"nid": id})
+
+ a.Notifications <- r
+ } else if c == 799 {
+ // notice that doesn't need PUSHACK
+ a.Notifications <- r
+ } else if c == 270 {
+ // PUSH enabled
+ if a.pingTimer == nil {
+ a.pingTimer = time.NewTimer(a.KeepAliveInterval)
+ }
+ } else if c == 370 {
+ // PUSH disabled
+ a.pingTimer = nil
+ } else if c == 701 || c == 702 {
+ // PUSHACK ACK, no need to route
+ } else if c == 281 || c == 282 || c == 381 || c == 382 {
+ // NOTIFYACK reply, ignore
+ } else {
+ // untagged error, broadcast to all
+ log.Println("<!<", string(b))
+ for _, ch := range a.tagRouter {
+ ch <- r
+ }
+ }
+ }
+ a.routerLock.RUnlock()
+ }
+ }
+ }
+}
--- /dev/null
+package udp
+
+type UptimeReply struct {
+ APIReply
+ Uptime time.Duration
+}
+
+// Retrieves the server's uptime. The recommended way to verify if a session
+// is valid.
+//
+// Returns a channel through which the eventual response will be sent.
+//
+// http://wiki.anidb.net/w/UDP_API_Definition#UPTIME:_Retrieve_Server_Uptime
+func (a *AniDBUDP) Uptime() <-chan UptimeReply {
+ ch := make(chan *UptimeReply, 2)
+ go func() {
+ reply := <-a.SendRecv("UPTIME", paramMap{})
+
+ r := UptimeReply{APIReply: reply}
+ if r.Error() == nil {
+ uptime, _ := strconv.ParseInt(reply.Lines()[1], 10, 32)
+ r.Uptime = time.Duration(uptime) * time.Millisecond
+ }
+ ch <- r
+ close(ch)
+ }()
+ return ch
+}
+
+type PingReply struct {
+ APIReply
+ Port uint16 // This client's local UDP port
+}
+
+// Simple echo command. The recommended way to verify if the server
+// is alive and responding. Does not require authentication.
+//
+// Returns a channel through which the eventual response will be sent.
+//
+// http://wiki.anidb.net/w/UDP_API_Definition#PING:_Ping_Command
+func (a *AniDBUDP) Ping() <-chan PingReply {
+ ch := make(chan *PingReply, 2)
+ go func() {
+ reply := <-a.SendRecv("PING", paramMap{"nat": 1})
+
+ r := PingReply{APIReply: reply}
+ if r.Error() == nil {
+ port, _ := strconv.ParseUint(reply.Lines()[1], 10, 16)
+ r.Port = uint16(port)
+ }
+ ch <- r
+ close(ch)
+ }()
+ return ch
+}
--- /dev/null
+package udp
+
+import (
+ "bytes"
+ "compress/zlib"
+ "io"
+ "io/ioutil"
+ "net"
+)
+
+func getPacket(conn *net.UDPConn, ecb *ecbState) (buf []byte, err error) {
+ buf = make([]byte, 1500)
+ n, err := conn.Read(buf)
+
+ buf = ecb.Decrypt(buf[:n])
+
+ if buf[0] == 0 && buf[1] == 0 {
+ def, _ := zlib.NewReader(bytes.NewReader(buf[2:]))
+ t, e := ioutil.ReadAll(def)
+ def.Close()
+ buf = t
+ if e != nil && e != io.EOF {
+ err = e
+ }
+ }
+ return buf, err
+}
+
+func makePacket(buf []byte, ecb *ecbState) packet {
+ return packet{b: ecb.Encrypt(buf)}
+}
--- /dev/null
+package udp
+
+import (
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+type apiError struct {
+ Code int
+ Desc string
+}
+
+func (err *apiError) Error() string {
+ return fmt.Sprint(err.Code, err.Desc)
+}
+
+// The interface for all UDP API replies.
+//
+// The user should call Error() to verify if the API call completed successfully.
+type APIReply interface {
+ // An opaque string used as identifying tag.
+ Tag() string
+
+ // The integer code for the reply.
+ Code() int
+
+ // The description for the reply (first line minus code).
+ Text() string
+
+ // Slice with all lines of the reply.
+ Lines() []string
+
+ // Indicates whether the network code detected truncation.
+ Truncated() bool
+
+ // Returns the underlying error, if any.
+ Error() error
+}
+
+type errorWrapper struct {
+ err error
+}
+
+func (_ *errorWrapper) Tag() string {
+ return ""
+}
+
+func (_ *errorWrapper) Code() int {
+ switch e := w.err.(type) {
+ case *APIError:
+ return e.Code
+ default:
+ return 999
+ }
+}
+
+func (w *errorWrapper) Text() string {
+ switch e := w.err.(type) {
+ case *APIError:
+ return e.Desc
+ default:
+ return e.Error()
+ }
+}
+
+func (w *errorWrapper) Lines() []string {
+ return []string{w.Text()}
+}
+
+func (_ *errorWrapper) Truncated() bool {
+ return false
+}
+
+func (w *errorWrapper) Error() error {
+ return w.err
+}
+
+func newErrorWrapper(err error) APIReply {
+ return &errorWrapper{
+ err: err,
+ }
+}
+
+type genericReply struct {
+ raw []byte
+ text string
+ lines []string
+ tag string
+ code int
+ truncated bool
+ err error
+}
+
+// Don't expose the newGenericReply call in the documentation
+var timeoutResponse = newGenericReply([]byte("604 TIMEOUT - DELAY AND RESUBMIT"))
+
+var TimeoutError = APIReply(timeoutResponse) // API response "604 TIMEOUT - DELAY AND RESUBMIT", also generated on client-side timeouts
+
+func newGenericReply(raw []byte) (r *genericReply) {
+ str := string(raw)
+ lines := strings.Split(str, "\n")
+ parts := strings.Fields(lines[0])
+
+ // invalid packet
+ if len(parts) < 1 {
+ return nil
+ }
+
+ // Drop lines that are only whitespace
+ for len(lines) > 0 && strings.TrimSpace(lines[len(lines)-1]) == "" {
+ lines = lines[:len(lines)-1]
+ }
+
+ // XXX: REQUIRES that the tag is not parsable as a base 10 number.
+ // Just prepending any sent tag with 'T' ought to be enough
+ tag := ""
+ text := ""
+ code, err := strconv.ParseInt(parts[0], 10, 16)
+ if err != nil {
+ tag = parts[0]
+ code, err = strconv.ParseInt(parts[1], 10, 16)
+
+ if len(parts) > 2 {
+ text = strings.Join(parts[2:], " ")
+ }
+ } else if len(parts) > 1 {
+ text = strings.Join(parts[1:], " ")
+ }
+
+ // Make sure server-side timeouts are comparable against TimeoutError
+ if code == timeoutResponse.code {
+ return timeoutResponse
+ }
+
+ var e *apiError = nil
+ // 720-799 range is for notifications
+ // 799 is an API server shutdown notice, so I guess it's okay to be an error
+ if code < 200 || (code > 299 && code < 720) || code > 798 {
+ e = &apiError{Code: int(code), Desc: text}
+ }
+
+ return &genericReply{
+ tag: tag,
+ code: int(code),
+ text: text,
+ lines: lines,
+ err: e,
+ }
+}
+
+func (r *genericReply) Tag() string {
+ return r.tag
+}
+
+func (r *genericReply) Code() int {
+ return r.code
+}
+
+func (r *genericReply) Text() string {
+ return r.text
+}
+
+func (r *genericReply) Lines() []string {
+ return r.lines
+}
+
+func (r *genericReply) Truncated() bool {
+ return r.truncated
+}
+
+func (r *genericReply) Error() error {
+ return r.err
+}
--- /dev/null
+package udp
+
+import (
+ "time"
+)
+
+type packet struct {
+ /*...*/
+ sent chan bool
+}
+
+type enqueuedPacket struct {
+ packet
+ queue chan packet
+}
+
+type sendQueueState struct {
+ enqueue chan enqueuedPacket
+}
+
+var globalQueue sendQueueState
+
+func init() {
+ globalQueue = sendQueueState{
+ enqueue: make(chan enqueuedPacket, 10),
+ }
+ go globalQueue.sendQueueDispatch()
+}
+
+const (
+ throttleMinDuration = 2 * time.Second
+ throttleMaxDuration = 4 * time.Second
+ throttleIncFactor = 1.1
+ throttleDecFactor = 0.9
+ throttleDecInterval = 10 * time.Second
+)
+
+func sendPacket(p packet, c chan packet) {
+ p.sent = make(chan bool, 2)
+ globalQueue.enqueue <- enqueuedPacket{packet: p, queue: c}
+}
+
+func (gq *sendQueueState) sendQueueDispatch() {
+ pkt := (*enqueuedPacket)(nil)
+ queue := make([]enqueuedPacket, 0)
+
+ nextTimer := time.NewTimer(0)
+ decTimer := time.NewTimer(0)
+
+ currentThrottle := throttleMinDuration
+
+ for {
+ if pkt == nil && len(queue) > 0 {
+ pkt = &queue[0]
+ queue = queue[1:]
+ }
+
+ nextCh := nextTimer.C
+ decCh := decTimer.C
+
+ if pkt == nil {
+ nextCh = nil
+ }
+
+ select {
+ case p := <-gq.enqueue:
+ queue = append(queue, p)
+ case <-nextCh:
+ pkt.queue <- pkt.packet
+ <-pkt.packet.sent
+
+ pkt = nil
+
+ currentThrottle = time.Duration(float64(currentThrottle) * throttleIncFactor)
+ if currentThrottle > throttleMaxDuration {
+ currentThrottle = throttleMaxDuration
+ }
+ nextTimer.Reset(currentThrottle)
+
+ decTimer.Reset(throttleDecInterval)
+ case <-decCh:
+ currentThrottle = time.Duration(float64(currentThrottle) * throttleDecFactor)
+ if currentThrottle < throttleMinDuration {
+ currentThrottle = throttleMinDuration
+ } else {
+ decTimer.Reset(throttleDecInterval)
+ }
+ }
+ }
+}