Checking in vendor folder for ease of using go get.

This commit is contained in:
Renan DelValle 2018-10-23 23:32:59 -07:00
parent 7a1251853b
commit cdb4b5a1d0
No known key found for this signature in database
GPG key ID: C240AD6D6F443EC9
3554 changed files with 1270116 additions and 0 deletions

162
vendor/golang.org/x/text/cases/cases.go generated vendored Normal file
View file

@ -0,0 +1,162 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run gen.go gen_trieval.go
// Package cases provides general and language-specific case mappers.
package cases // import "golang.org/x/text/cases"
import (
"golang.org/x/text/language"
"golang.org/x/text/transform"
)
// References:
// - Unicode Reference Manual Chapter 3.13, 4.2, and 5.18.
// - http://www.unicode.org/reports/tr29/
// - http://www.unicode.org/Public/6.3.0/ucd/CaseFolding.txt
// - http://www.unicode.org/Public/6.3.0/ucd/SpecialCasing.txt
// - http://www.unicode.org/Public/6.3.0/ucd/DerivedCoreProperties.txt
// - http://www.unicode.org/Public/6.3.0/ucd/auxiliary/WordBreakProperty.txt
// - http://www.unicode.org/Public/6.3.0/ucd/auxiliary/WordBreakTest.txt
// - http://userguide.icu-project.org/transforms/casemappings
// TODO:
// - Case folding
// - Wide and Narrow?
// - Segmenter option for title casing.
// - ASCII fast paths
// - Encode Soft-Dotted property within trie somehow.
// A Caser transforms given input to a certain case. It implements
// transform.Transformer.
//
// A Caser may be stateful and should therefore not be shared between
// goroutines.
type Caser struct {
t transform.SpanningTransformer
}
// Bytes returns a new byte slice with the result of converting b to the case
// form implemented by c.
func (c Caser) Bytes(b []byte) []byte {
b, _, _ = transform.Bytes(c.t, b)
return b
}
// String returns a string with the result of transforming s to the case form
// implemented by c.
func (c Caser) String(s string) string {
s, _, _ = transform.String(c.t, s)
return s
}
// Reset resets the Caser to be reused for new input after a previous call to
// Transform.
func (c Caser) Reset() { c.t.Reset() }
// Transform implements the transform.Transformer interface and transforms the
// given input to the case form implemented by c.
func (c Caser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
return c.t.Transform(dst, src, atEOF)
}
// Span implements the transform.SpanningTransformer interface.
func (c Caser) Span(src []byte, atEOF bool) (n int, err error) {
return c.t.Span(src, atEOF)
}
// Upper returns a Caser for language-specific uppercasing.
func Upper(t language.Tag, opts ...Option) Caser {
return Caser{makeUpper(t, getOpts(opts...))}
}
// Lower returns a Caser for language-specific lowercasing.
func Lower(t language.Tag, opts ...Option) Caser {
return Caser{makeLower(t, getOpts(opts...))}
}
// Title returns a Caser for language-specific title casing. It uses an
// approximation of the default Unicode Word Break algorithm.
func Title(t language.Tag, opts ...Option) Caser {
return Caser{makeTitle(t, getOpts(opts...))}
}
// Fold returns a Caser that implements Unicode case folding. The returned Caser
// is stateless and safe to use concurrently by multiple goroutines.
//
// Case folding does not normalize the input and may not preserve a normal form.
// Use the collate or search package for more convenient and linguistically
// sound comparisons. Use golang.org/x/text/secure/precis for string comparisons
// where security aspects are a concern.
func Fold(opts ...Option) Caser {
return Caser{makeFold(getOpts(opts...))}
}
// An Option is used to modify the behavior of a Caser.
type Option func(o options) options
// TODO: consider these options to take a boolean as well, like FinalSigma.
// The advantage of using this approach is that other providers of a lower-case
// algorithm could set different defaults by prefixing a user-provided slice
// of options with their own. This is handy, for instance, for the precis
// package which would override the default to not handle the Greek final sigma.
var (
// NoLower disables the lowercasing of non-leading letters for a title
// caser.
NoLower Option = noLower
// Compact omits mappings in case folding for characters that would grow the
// input. (Unimplemented.)
Compact Option = compact
)
// TODO: option to preserve a normal form, if applicable?
type options struct {
noLower bool
simple bool
// TODO: segmenter, max ignorable, alternative versions, etc.
ignoreFinalSigma bool
}
func getOpts(o ...Option) (res options) {
for _, f := range o {
res = f(res)
}
return
}
func noLower(o options) options {
o.noLower = true
return o
}
func compact(o options) options {
o.simple = true
return o
}
// HandleFinalSigma specifies whether the special handling of Greek final sigma
// should be enabled. Unicode prescribes handling the Greek final sigma for all
// locales, but standards like IDNA and PRECIS override this default.
func HandleFinalSigma(enable bool) Option {
if enable {
return handleFinalSigma
}
return ignoreFinalSigma
}
func ignoreFinalSigma(o options) options {
o.ignoreFinalSigma = true
return o
}
func handleFinalSigma(o options) options {
o.ignoreFinalSigma = false
return o
}

376
vendor/golang.org/x/text/cases/context.go generated vendored Normal file
View file

@ -0,0 +1,376 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cases
import "golang.org/x/text/transform"
// A context is used for iterating over source bytes, fetching case info and
// writing to a destination buffer.
//
// Casing operations may need more than one rune of context to decide how a rune
// should be cased. Casing implementations should call checkpoint on context
// whenever it is known to be safe to return the runes processed so far.
//
// It is recommended for implementations to not allow for more than 30 case
// ignorables as lookahead (analogous to the limit in norm) and to use state if
// unbounded lookahead is needed for cased runes.
type context struct {
dst, src []byte
atEOF bool
pDst int // pDst points past the last written rune in dst.
pSrc int // pSrc points to the start of the currently scanned rune.
// checkpoints safe to return in Transform, where nDst <= pDst and nSrc <= pSrc.
nDst, nSrc int
err error
sz int // size of current rune
info info // case information of currently scanned rune
// State preserved across calls to Transform.
isMidWord bool // false if next cased letter needs to be title-cased.
}
func (c *context) Reset() {
c.isMidWord = false
}
// ret returns the return values for the Transform method. It checks whether
// there were insufficient bytes in src to complete and introduces an error
// accordingly, if necessary.
func (c *context) ret() (nDst, nSrc int, err error) {
if c.err != nil || c.nSrc == len(c.src) {
return c.nDst, c.nSrc, c.err
}
// This point is only reached by mappers if there was no short destination
// buffer. This means that the source buffer was exhausted and that c.sz was
// set to 0 by next.
if c.atEOF && c.pSrc == len(c.src) {
return c.pDst, c.pSrc, nil
}
return c.nDst, c.nSrc, transform.ErrShortSrc
}
// retSpan returns the return values for the Span method. It checks whether
// there were insufficient bytes in src to complete and introduces an error
// accordingly, if necessary.
func (c *context) retSpan() (n int, err error) {
_, nSrc, err := c.ret()
return nSrc, err
}
// checkpoint sets the return value buffer points for Transform to the current
// positions.
func (c *context) checkpoint() {
if c.err == nil {
c.nDst, c.nSrc = c.pDst, c.pSrc+c.sz
}
}
// unreadRune causes the last rune read by next to be reread on the next
// invocation of next. Only one unreadRune may be called after a call to next.
func (c *context) unreadRune() {
c.sz = 0
}
func (c *context) next() bool {
c.pSrc += c.sz
if c.pSrc == len(c.src) || c.err != nil {
c.info, c.sz = 0, 0
return false
}
v, sz := trie.lookup(c.src[c.pSrc:])
c.info, c.sz = info(v), sz
if c.sz == 0 {
if c.atEOF {
// A zero size means we have an incomplete rune. If we are atEOF,
// this means it is an illegal rune, which we will consume one
// byte at a time.
c.sz = 1
} else {
c.err = transform.ErrShortSrc
return false
}
}
return true
}
// writeBytes adds bytes to dst.
func (c *context) writeBytes(b []byte) bool {
if len(c.dst)-c.pDst < len(b) {
c.err = transform.ErrShortDst
return false
}
// This loop is faster than using copy.
for _, ch := range b {
c.dst[c.pDst] = ch
c.pDst++
}
return true
}
// writeString writes the given string to dst.
func (c *context) writeString(s string) bool {
if len(c.dst)-c.pDst < len(s) {
c.err = transform.ErrShortDst
return false
}
// This loop is faster than using copy.
for i := 0; i < len(s); i++ {
c.dst[c.pDst] = s[i]
c.pDst++
}
return true
}
// copy writes the current rune to dst.
func (c *context) copy() bool {
return c.writeBytes(c.src[c.pSrc : c.pSrc+c.sz])
}
// copyXOR copies the current rune to dst and modifies it by applying the XOR
// pattern of the case info. It is the responsibility of the caller to ensure
// that this is a rune with a XOR pattern defined.
func (c *context) copyXOR() bool {
if !c.copy() {
return false
}
if c.info&xorIndexBit == 0 {
// Fast path for 6-bit XOR pattern, which covers most cases.
c.dst[c.pDst-1] ^= byte(c.info >> xorShift)
} else {
// Interpret XOR bits as an index.
// TODO: test performance for unrolling this loop. Verify that we have
// at least two bytes and at most three.
idx := c.info >> xorShift
for p := c.pDst - 1; ; p-- {
c.dst[p] ^= xorData[idx]
idx--
if xorData[idx] == 0 {
break
}
}
}
return true
}
// hasPrefix returns true if src[pSrc:] starts with the given string.
func (c *context) hasPrefix(s string) bool {
b := c.src[c.pSrc:]
if len(b) < len(s) {
return false
}
for i, c := range b[:len(s)] {
if c != s[i] {
return false
}
}
return true
}
// caseType returns an info with only the case bits, normalized to either
// cLower, cUpper, cTitle or cUncased.
func (c *context) caseType() info {
cm := c.info & 0x7
if cm < 4 {
return cm
}
if cm >= cXORCase {
// xor the last bit of the rune with the case type bits.
b := c.src[c.pSrc+c.sz-1]
return info(b&1) ^ cm&0x3
}
if cm == cIgnorableCased {
return cLower
}
return cUncased
}
// lower writes the lowercase version of the current rune to dst.
func lower(c *context) bool {
ct := c.caseType()
if c.info&hasMappingMask == 0 || ct == cLower {
return c.copy()
}
if c.info&exceptionBit == 0 {
return c.copyXOR()
}
e := exceptions[c.info>>exceptionShift:]
offset := 2 + e[0]&lengthMask // size of header + fold string
if nLower := (e[1] >> lengthBits) & lengthMask; nLower != noChange {
return c.writeString(e[offset : offset+nLower])
}
return c.copy()
}
func isLower(c *context) bool {
ct := c.caseType()
if c.info&hasMappingMask == 0 || ct == cLower {
return true
}
if c.info&exceptionBit == 0 {
c.err = transform.ErrEndOfSpan
return false
}
e := exceptions[c.info>>exceptionShift:]
if nLower := (e[1] >> lengthBits) & lengthMask; nLower != noChange {
c.err = transform.ErrEndOfSpan
return false
}
return true
}
// upper writes the uppercase version of the current rune to dst.
func upper(c *context) bool {
ct := c.caseType()
if c.info&hasMappingMask == 0 || ct == cUpper {
return c.copy()
}
if c.info&exceptionBit == 0 {
return c.copyXOR()
}
e := exceptions[c.info>>exceptionShift:]
offset := 2 + e[0]&lengthMask // size of header + fold string
// Get length of first special case mapping.
n := (e[1] >> lengthBits) & lengthMask
if ct == cTitle {
// The first special case mapping is for lower. Set n to the second.
if n == noChange {
n = 0
}
n, e = e[1]&lengthMask, e[n:]
}
if n != noChange {
return c.writeString(e[offset : offset+n])
}
return c.copy()
}
// isUpper writes the isUppercase version of the current rune to dst.
func isUpper(c *context) bool {
ct := c.caseType()
if c.info&hasMappingMask == 0 || ct == cUpper {
return true
}
if c.info&exceptionBit == 0 {
c.err = transform.ErrEndOfSpan
return false
}
e := exceptions[c.info>>exceptionShift:]
// Get length of first special case mapping.
n := (e[1] >> lengthBits) & lengthMask
if ct == cTitle {
n = e[1] & lengthMask
}
if n != noChange {
c.err = transform.ErrEndOfSpan
return false
}
return true
}
// title writes the title case version of the current rune to dst.
func title(c *context) bool {
ct := c.caseType()
if c.info&hasMappingMask == 0 || ct == cTitle {
return c.copy()
}
if c.info&exceptionBit == 0 {
if ct == cLower {
return c.copyXOR()
}
return c.copy()
}
// Get the exception data.
e := exceptions[c.info>>exceptionShift:]
offset := 2 + e[0]&lengthMask // size of header + fold string
nFirst := (e[1] >> lengthBits) & lengthMask
if nTitle := e[1] & lengthMask; nTitle != noChange {
if nFirst != noChange {
e = e[nFirst:]
}
return c.writeString(e[offset : offset+nTitle])
}
if ct == cLower && nFirst != noChange {
// Use the uppercase version instead.
return c.writeString(e[offset : offset+nFirst])
}
// Already in correct case.
return c.copy()
}
// isTitle reports whether the current rune is in title case.
func isTitle(c *context) bool {
ct := c.caseType()
if c.info&hasMappingMask == 0 || ct == cTitle {
return true
}
if c.info&exceptionBit == 0 {
if ct == cLower {
c.err = transform.ErrEndOfSpan
return false
}
return true
}
// Get the exception data.
e := exceptions[c.info>>exceptionShift:]
if nTitle := e[1] & lengthMask; nTitle != noChange {
c.err = transform.ErrEndOfSpan
return false
}
nFirst := (e[1] >> lengthBits) & lengthMask
if ct == cLower && nFirst != noChange {
c.err = transform.ErrEndOfSpan
return false
}
return true
}
// foldFull writes the foldFull version of the current rune to dst.
func foldFull(c *context) bool {
if c.info&hasMappingMask == 0 {
return c.copy()
}
ct := c.caseType()
if c.info&exceptionBit == 0 {
if ct != cLower || c.info&inverseFoldBit != 0 {
return c.copyXOR()
}
return c.copy()
}
e := exceptions[c.info>>exceptionShift:]
n := e[0] & lengthMask
if n == 0 {
if ct == cLower {
return c.copy()
}
n = (e[1] >> lengthBits) & lengthMask
}
return c.writeString(e[2 : 2+n])
}
// isFoldFull reports whether the current run is mapped to foldFull
func isFoldFull(c *context) bool {
if c.info&hasMappingMask == 0 {
return true
}
ct := c.caseType()
if c.info&exceptionBit == 0 {
if ct != cLower || c.info&inverseFoldBit != 0 {
c.err = transform.ErrEndOfSpan
return false
}
return true
}
e := exceptions[c.info>>exceptionShift:]
n := e[0] & lengthMask
if n == 0 && ct == cLower {
return true
}
c.err = transform.ErrEndOfSpan
return false
}

438
vendor/golang.org/x/text/cases/context_test.go generated vendored Normal file
View file

@ -0,0 +1,438 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cases
import (
"strings"
"testing"
"unicode"
"golang.org/x/text/internal/testtext"
"golang.org/x/text/language"
"golang.org/x/text/transform"
"golang.org/x/text/unicode/norm"
"golang.org/x/text/unicode/rangetable"
)
// The following definitions are taken directly from Chapter 3 of The Unicode
// Standard.
func propCased(r rune) bool {
return propLower(r) || propUpper(r) || unicode.IsTitle(r)
}
func propLower(r rune) bool {
return unicode.IsLower(r) || unicode.Is(unicode.Other_Lowercase, r)
}
func propUpper(r rune) bool {
return unicode.IsUpper(r) || unicode.Is(unicode.Other_Uppercase, r)
}
func propIgnore(r rune) bool {
if unicode.In(r, unicode.Mn, unicode.Me, unicode.Cf, unicode.Lm, unicode.Sk) {
return true
}
return caseIgnorable[r]
}
func hasBreakProp(r rune) bool {
// binary search over ranges
lo := 0
hi := len(breakProp)
for lo < hi {
m := lo + (hi-lo)/2
bp := &breakProp[m]
if bp.lo <= r && r <= bp.hi {
return true
}
if r < bp.lo {
hi = m
} else {
lo = m + 1
}
}
return false
}
func contextFromRune(r rune) *context {
c := context{dst: make([]byte, 128), src: []byte(string(r)), atEOF: true}
c.next()
return &c
}
func TestCaseProperties(t *testing.T) {
if unicode.Version != UnicodeVersion {
// Properties of existing code points may change by Unicode version, so
// we need to skip.
t.Skipf("Skipping as core Unicode version %s different than %s", unicode.Version, UnicodeVersion)
}
assigned := rangetable.Assigned(UnicodeVersion)
coreVersion := rangetable.Assigned(unicode.Version)
for r := rune(0); r <= lastRuneForTesting; r++ {
if !unicode.In(r, assigned) || !unicode.In(r, coreVersion) {
continue
}
c := contextFromRune(r)
if got, want := c.info.isCaseIgnorable(), propIgnore(r); got != want {
t.Errorf("caseIgnorable(%U): got %v; want %v (%x)", r, got, want, c.info)
}
// New letters may change case types, but existing case pairings should
// not change. See Case Pair Stability in
// http://unicode.org/policies/stability_policy.html.
if rf := unicode.SimpleFold(r); rf != r && unicode.In(rf, assigned) {
if got, want := c.info.isCased(), propCased(r); got != want {
t.Errorf("cased(%U): got %v; want %v (%x)", r, got, want, c.info)
}
if got, want := c.caseType() == cUpper, propUpper(r); got != want {
t.Errorf("upper(%U): got %v; want %v (%x)", r, got, want, c.info)
}
if got, want := c.caseType() == cLower, propLower(r); got != want {
t.Errorf("lower(%U): got %v; want %v (%x)", r, got, want, c.info)
}
}
if got, want := c.info.isBreak(), hasBreakProp(r); got != want {
t.Errorf("isBreak(%U): got %v; want %v (%x)", r, got, want, c.info)
}
}
// TODO: get title case from unicode file.
}
func TestMapping(t *testing.T) {
assigned := rangetable.Assigned(UnicodeVersion)
coreVersion := rangetable.Assigned(unicode.Version)
if coreVersion == nil {
coreVersion = assigned
}
apply := func(r rune, f func(c *context) bool) string {
c := contextFromRune(r)
f(c)
return string(c.dst[:c.pDst])
}
for r, tt := range special {
if got, want := apply(r, lower), tt.toLower; got != want {
t.Errorf("lowerSpecial:(%U): got %+q; want %+q", r, got, want)
}
if got, want := apply(r, title), tt.toTitle; got != want {
t.Errorf("titleSpecial:(%U): got %+q; want %+q", r, got, want)
}
if got, want := apply(r, upper), tt.toUpper; got != want {
t.Errorf("upperSpecial:(%U): got %+q; want %+q", r, got, want)
}
}
for r := rune(0); r <= lastRuneForTesting; r++ {
if !unicode.In(r, assigned) || !unicode.In(r, coreVersion) {
continue
}
if rf := unicode.SimpleFold(r); rf == r || !unicode.In(rf, assigned) {
continue
}
if _, ok := special[r]; ok {
continue
}
want := string(unicode.ToLower(r))
if got := apply(r, lower); got != want {
t.Errorf("lower:%q (%U): got %q %U; want %q %U", r, r, got, []rune(got), want, []rune(want))
}
want = string(unicode.ToUpper(r))
if got := apply(r, upper); got != want {
t.Errorf("upper:%q (%U): got %q %U; want %q %U", r, r, got, []rune(got), want, []rune(want))
}
want = string(unicode.ToTitle(r))
if got := apply(r, title); got != want {
t.Errorf("title:%q (%U): got %q %U; want %q %U", r, r, got, []rune(got), want, []rune(want))
}
}
}
func runeFoldData(r rune) (x struct{ simple, full, special string }) {
x = foldMap[r]
if x.simple == "" {
x.simple = string(unicode.ToLower(r))
}
if x.full == "" {
x.full = string(unicode.ToLower(r))
}
if x.special == "" {
x.special = x.full
}
return
}
func TestFoldData(t *testing.T) {
assigned := rangetable.Assigned(UnicodeVersion)
coreVersion := rangetable.Assigned(unicode.Version)
if coreVersion == nil {
coreVersion = assigned
}
apply := func(r rune, f func(c *context) bool) (string, info) {
c := contextFromRune(r)
f(c)
return string(c.dst[:c.pDst]), c.info.cccType()
}
for r := rune(0); r <= lastRuneForTesting; r++ {
if !unicode.In(r, assigned) || !unicode.In(r, coreVersion) {
continue
}
x := runeFoldData(r)
if got, info := apply(r, foldFull); got != x.full {
t.Errorf("full:%q (%U): got %q %U; want %q %U (ccc=%x)", r, r, got, []rune(got), x.full, []rune(x.full), info)
}
// TODO: special and simple.
}
}
func TestCCC(t *testing.T) {
assigned := rangetable.Assigned(UnicodeVersion)
normVersion := rangetable.Assigned(norm.Version)
for r := rune(0); r <= lastRuneForTesting; r++ {
if !unicode.In(r, assigned) || !unicode.In(r, normVersion) {
continue
}
c := contextFromRune(r)
p := norm.NFC.PropertiesString(string(r))
want := cccOther
switch p.CCC() {
case 0:
want = cccZero
case above:
want = cccAbove
}
if got := c.info.cccType(); got != want {
t.Errorf("%U: got %x; want %x", r, got, want)
}
}
}
func TestWordBreaks(t *testing.T) {
for _, tt := range breakTest {
testtext.Run(t, tt, func(t *testing.T) {
parts := strings.Split(tt, "|")
want := ""
for _, s := range parts {
found := false
// This algorithm implements title casing given word breaks
// as defined in the Unicode standard 3.13 R3.
for _, r := range s {
title := unicode.ToTitle(r)
lower := unicode.ToLower(r)
if !found && title != lower {
found = true
want += string(title)
} else {
want += string(lower)
}
}
}
src := strings.Join(parts, "")
got := Title(language.Und).String(src)
if got != want {
t.Errorf("got %q; want %q", got, want)
}
})
}
}
func TestContext(t *testing.T) {
tests := []struct {
desc string
dstSize int
atEOF bool
src string
out string
nSrc int
err error
ops string
prefixArg string
prefixWant bool
}{{
desc: "next: past end, atEOF, no checkpoint",
dstSize: 10,
atEOF: true,
src: "12",
out: "",
nSrc: 2,
ops: "next;next;next",
// Test that calling prefix with a non-empty argument when the buffer
// is depleted returns false.
prefixArg: "x",
prefixWant: false,
}, {
desc: "next: not at end, atEOF, no checkpoint",
dstSize: 10,
atEOF: false,
src: "12",
out: "",
nSrc: 0,
err: transform.ErrShortSrc,
ops: "next;next",
prefixArg: "",
prefixWant: true,
}, {
desc: "next: past end, !atEOF, no checkpoint",
dstSize: 10,
atEOF: false,
src: "12",
out: "",
nSrc: 0,
err: transform.ErrShortSrc,
ops: "next;next;next",
prefixArg: "",
prefixWant: true,
}, {
desc: "next: past end, !atEOF, checkpoint",
dstSize: 10,
atEOF: false,
src: "12",
out: "",
nSrc: 2,
ops: "next;next;checkpoint;next",
prefixArg: "",
prefixWant: true,
}, {
desc: "copy: exact count, atEOF, no checkpoint",
dstSize: 2,
atEOF: true,
src: "12",
out: "12",
nSrc: 2,
ops: "next;copy;next;copy;next",
prefixArg: "",
prefixWant: true,
}, {
desc: "copy: past end, !atEOF, no checkpoint",
dstSize: 2,
atEOF: false,
src: "12",
out: "",
nSrc: 0,
err: transform.ErrShortSrc,
ops: "next;copy;next;copy;next",
prefixArg: "",
prefixWant: true,
}, {
desc: "copy: past end, !atEOF, checkpoint",
dstSize: 2,
atEOF: false,
src: "12",
out: "12",
nSrc: 2,
ops: "next;copy;next;copy;checkpoint;next",
prefixArg: "",
prefixWant: true,
}, {
desc: "copy: short dst",
dstSize: 1,
atEOF: false,
src: "12",
out: "",
nSrc: 0,
err: transform.ErrShortDst,
ops: "next;copy;next;copy;checkpoint;next",
prefixArg: "12",
prefixWant: false,
}, {
desc: "copy: short dst, checkpointed",
dstSize: 1,
atEOF: false,
src: "12",
out: "1",
nSrc: 1,
err: transform.ErrShortDst,
ops: "next;copy;checkpoint;next;copy;next",
prefixArg: "",
prefixWant: true,
}, {
desc: "writeString: simple",
dstSize: 3,
atEOF: true,
src: "1",
out: "1ab",
nSrc: 1,
ops: "next;copy;writeab;next",
prefixArg: "",
prefixWant: true,
}, {
desc: "writeString: short dst",
dstSize: 2,
atEOF: true,
src: "12",
out: "",
nSrc: 0,
err: transform.ErrShortDst,
ops: "next;copy;writeab;next",
prefixArg: "2",
prefixWant: true,
}, {
desc: "writeString: simple",
dstSize: 3,
atEOF: true,
src: "12",
out: "1ab",
nSrc: 2,
ops: "next;copy;next;writeab;next",
prefixArg: "",
prefixWant: true,
}, {
desc: "writeString: short dst",
dstSize: 2,
atEOF: true,
src: "12",
out: "",
nSrc: 0,
err: transform.ErrShortDst,
ops: "next;copy;next;writeab;next",
prefixArg: "1",
prefixWant: false,
}, {
desc: "prefix",
dstSize: 2,
atEOF: true,
src: "12",
out: "",
nSrc: 0,
// Context will assign an ErrShortSrc if the input wasn't exhausted.
err: transform.ErrShortSrc,
prefixArg: "12",
prefixWant: true,
}}
for _, tt := range tests {
c := context{dst: make([]byte, tt.dstSize), src: []byte(tt.src), atEOF: tt.atEOF}
for _, op := range strings.Split(tt.ops, ";") {
switch op {
case "next":
c.next()
case "checkpoint":
c.checkpoint()
case "writeab":
c.writeString("ab")
case "copy":
c.copy()
case "":
default:
t.Fatalf("unknown op %q", op)
}
}
if got := c.hasPrefix(tt.prefixArg); got != tt.prefixWant {
t.Errorf("%s:\nprefix was %v; want %v", tt.desc, got, tt.prefixWant)
}
nDst, nSrc, err := c.ret()
if err != tt.err {
t.Errorf("%s:\nerror was %v; want %v", tt.desc, err, tt.err)
}
if out := string(c.dst[:nDst]); out != tt.out {
t.Errorf("%s:\nout was %q; want %q", tt.desc, out, tt.out)
}
if nSrc != tt.nSrc {
t.Errorf("%s:\nnSrc was %d; want %d", tt.desc, nSrc, tt.nSrc)
}
}
}

53
vendor/golang.org/x/text/cases/example_test.go generated vendored Normal file
View file

@ -0,0 +1,53 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cases_test
import (
"fmt"
"golang.org/x/text/cases"
"golang.org/x/text/language"
)
func Example() {
src := []string{
"hello world!",
"i with dot",
"'n ijsberg",
"here comes O'Brian",
}
for _, c := range []cases.Caser{
cases.Lower(language.Und),
cases.Upper(language.Turkish),
cases.Title(language.Dutch),
cases.Title(language.Und, cases.NoLower),
} {
fmt.Println()
for _, s := range src {
fmt.Println(c.String(s))
}
}
// Output:
// hello world!
// i with dot
// 'n ijsberg
// here comes o'brian
//
// HELLO WORLD!
// İ WİTH DOT
// 'N İJSBERG
// HERE COMES O'BRİAN
//
// Hello World!
// I With Dot
// 'n IJsberg
// Here Comes O'brian
//
// Hello World!
// I With Dot
// 'N Ijsberg
// Here Comes O'Brian
}

34
vendor/golang.org/x/text/cases/fold.go generated vendored Normal file
View file

@ -0,0 +1,34 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cases
import "golang.org/x/text/transform"
type caseFolder struct{ transform.NopResetter }
// caseFolder implements the Transformer interface for doing case folding.
func (t *caseFolder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
c := context{dst: dst, src: src, atEOF: atEOF}
for c.next() {
foldFull(&c)
c.checkpoint()
}
return c.ret()
}
func (t *caseFolder) Span(src []byte, atEOF bool) (n int, err error) {
c := context{src: src, atEOF: atEOF}
for c.next() && isFoldFull(&c) {
c.checkpoint()
}
return c.retSpan()
}
func makeFold(o options) transform.SpanningTransformer {
// TODO: Special case folding, through option Language, Special/Turkic, or
// both.
// TODO: Implement Compact options.
return &caseFolder{}
}

51
vendor/golang.org/x/text/cases/fold_test.go generated vendored Normal file
View file

@ -0,0 +1,51 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cases
import (
"testing"
"golang.org/x/text/internal/testtext"
)
var foldTestCases = []string{
"βß\u13f8", // "βssᏰ"
"ab\u13fc\uab7aꭰ", // ab
"affifflast", // affifflast
"Iİiı\u0345", // ii̇iıι
"µµΜΜςσΣΣ", // μμμμσσσσ
}
func TestFold(t *testing.T) {
for _, tc := range foldTestCases {
testEntry := func(name string, c Caser, m func(r rune) string) {
want := ""
for _, r := range tc {
want += m(r)
}
if got := c.String(tc); got != want {
t.Errorf("%s(%s) = %+q; want %+q", name, tc, got, want)
}
dst := make([]byte, 256) // big enough to hold any result
src := []byte(tc)
v := testtext.AllocsPerRun(20, func() {
c.Transform(dst, src, true)
})
if v > 0 {
t.Errorf("%s(%s): number of allocs was %f; want 0", name, tc, v)
}
}
testEntry("FullFold", Fold(), func(r rune) string {
return runeFoldData(r).full
})
// TODO:
// testEntry("SimpleFold", Fold(Compact), func(r rune) string {
// return runeFoldData(r).simple
// })
// testEntry("SpecialFold", Fold(Turkic), func(r rune) string {
// return runeFoldData(r).special
// })
}
}

839
vendor/golang.org/x/text/cases/gen.go generated vendored Normal file
View file

@ -0,0 +1,839 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// This program generates the trie for casing operations. The Unicode casing
// algorithm requires the lookup of various properties and mappings for each
// rune. The table generated by this generator combines several of the most
// frequently used of these into a single trie so that they can be accessed
// with a single lookup.
package main
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"reflect"
"strconv"
"strings"
"unicode"
"golang.org/x/text/internal/gen"
"golang.org/x/text/internal/triegen"
"golang.org/x/text/internal/ucd"
"golang.org/x/text/unicode/norm"
)
func main() {
gen.Init()
genTables()
genTablesTest()
gen.Repackage("gen_trieval.go", "trieval.go", "cases")
}
// runeInfo contains all information for a rune that we care about for casing
// operations.
type runeInfo struct {
Rune rune
entry info // trie value for this rune.
CaseMode info
// Simple case mappings.
Simple [1 + maxCaseMode][]rune
// Special casing
HasSpecial bool
Conditional bool
Special [1 + maxCaseMode][]rune
// Folding
FoldSimple rune
FoldSpecial rune
FoldFull []rune
// TODO: FC_NFKC, or equivalent data.
// Properties
SoftDotted bool
CaseIgnorable bool
Cased bool
DecomposeGreek bool
BreakType string
BreakCat breakCategory
// We care mostly about 0, Above, and IotaSubscript.
CCC byte
}
type breakCategory int
const (
breakBreak breakCategory = iota
breakLetter
breakMid
)
// mapping returns the case mapping for the given case type.
func (r *runeInfo) mapping(c info) string {
if r.HasSpecial {
return string(r.Special[c])
}
if len(r.Simple[c]) != 0 {
return string(r.Simple[c])
}
return string(r.Rune)
}
func parse(file string, f func(p *ucd.Parser)) {
ucd.Parse(gen.OpenUCDFile(file), f)
}
func parseUCD() []runeInfo {
chars := make([]runeInfo, unicode.MaxRune)
get := func(r rune) *runeInfo {
c := &chars[r]
c.Rune = r
return c
}
parse("UnicodeData.txt", func(p *ucd.Parser) {
ri := get(p.Rune(0))
ri.CCC = byte(p.Int(ucd.CanonicalCombiningClass))
ri.Simple[cLower] = p.Runes(ucd.SimpleLowercaseMapping)
ri.Simple[cUpper] = p.Runes(ucd.SimpleUppercaseMapping)
ri.Simple[cTitle] = p.Runes(ucd.SimpleTitlecaseMapping)
if p.String(ucd.GeneralCategory) == "Lt" {
ri.CaseMode = cTitle
}
})
// <code>; <property>
parse("PropList.txt", func(p *ucd.Parser) {
if p.String(1) == "Soft_Dotted" {
chars[p.Rune(0)].SoftDotted = true
}
})
// <code>; <word break type>
parse("DerivedCoreProperties.txt", func(p *ucd.Parser) {
ri := get(p.Rune(0))
switch p.String(1) {
case "Case_Ignorable":
ri.CaseIgnorable = true
case "Cased":
ri.Cased = true
case "Lowercase":
ri.CaseMode = cLower
case "Uppercase":
ri.CaseMode = cUpper
}
})
// <code>; <lower> ; <title> ; <upper> ; (<condition_list> ;)?
parse("SpecialCasing.txt", func(p *ucd.Parser) {
// We drop all conditional special casing and deal with them manually in
// the language-specific case mappers. Rune 0x03A3 is the only one with
// a conditional formatting that is not language-specific. However,
// dealing with this letter is tricky, especially in a streaming
// context, so we deal with it in the Caser for Greek specifically.
ri := get(p.Rune(0))
if p.String(4) == "" {
ri.HasSpecial = true
ri.Special[cLower] = p.Runes(1)
ri.Special[cTitle] = p.Runes(2)
ri.Special[cUpper] = p.Runes(3)
} else {
ri.Conditional = true
}
})
// TODO: Use text breaking according to UAX #29.
// <code>; <word break type>
parse("auxiliary/WordBreakProperty.txt", func(p *ucd.Parser) {
ri := get(p.Rune(0))
ri.BreakType = p.String(1)
// We collapse the word breaking properties onto the categories we need.
switch p.String(1) { // TODO: officially we need to canonicalize.
case "MidLetter", "MidNumLet", "Single_Quote":
ri.BreakCat = breakMid
if !ri.CaseIgnorable {
// finalSigma relies on the fact that all breakMid runes are
// also a Case_Ignorable. Revisit this code when this changes.
log.Fatalf("Rune %U, which has a break category mid, is not a case ignorable", ri)
}
case "ALetter", "Hebrew_Letter", "Numeric", "Extend", "ExtendNumLet", "Format", "ZWJ":
ri.BreakCat = breakLetter
}
})
// <code>; <type>; <mapping>
parse("CaseFolding.txt", func(p *ucd.Parser) {
ri := get(p.Rune(0))
switch p.String(1) {
case "C":
ri.FoldSimple = p.Rune(2)
ri.FoldFull = p.Runes(2)
case "S":
ri.FoldSimple = p.Rune(2)
case "T":
ri.FoldSpecial = p.Rune(2)
case "F":
ri.FoldFull = p.Runes(2)
default:
log.Fatalf("%U: unknown type: %s", p.Rune(0), p.String(1))
}
})
return chars
}
func genTables() {
chars := parseUCD()
verifyProperties(chars)
t := triegen.NewTrie("case")
for i := range chars {
c := &chars[i]
makeEntry(c)
t.Insert(rune(i), uint64(c.entry))
}
w := gen.NewCodeWriter()
defer w.WriteVersionedGoFile("tables.go", "cases")
gen.WriteUnicodeVersion(w)
// TODO: write CLDR version after adding a mechanism to detect that the
// tables on which the manually created locale-sensitive casing code is
// based hasn't changed.
w.WriteVar("xorData", string(xorData))
w.WriteVar("exceptions", string(exceptionData))
sz, err := t.Gen(w, triegen.Compact(&sparseCompacter{}))
if err != nil {
log.Fatal(err)
}
w.Size += sz
}
func makeEntry(ri *runeInfo) {
if ri.CaseIgnorable {
if ri.Cased {
ri.entry = cIgnorableCased
} else {
ri.entry = cIgnorableUncased
}
} else {
ri.entry = ri.CaseMode
}
// TODO: handle soft-dotted.
ccc := cccOther
switch ri.CCC {
case 0: // Not_Reordered
ccc = cccZero
case above: // Above
ccc = cccAbove
}
switch ri.BreakCat {
case breakBreak:
ccc = cccBreak
case breakMid:
ri.entry |= isMidBit
}
ri.entry |= ccc
if ri.CaseMode == cUncased {
return
}
// Need to do something special.
if ri.CaseMode == cTitle || ri.HasSpecial || ri.mapping(cTitle) != ri.mapping(cUpper) {
makeException(ri)
return
}
if f := string(ri.FoldFull); len(f) > 0 && f != ri.mapping(cUpper) && f != ri.mapping(cLower) {
makeException(ri)
return
}
// Rune is either lowercase or uppercase.
orig := string(ri.Rune)
mapped := ""
if ri.CaseMode == cUpper {
mapped = ri.mapping(cLower)
} else {
mapped = ri.mapping(cUpper)
}
if len(orig) != len(mapped) {
makeException(ri)
return
}
if string(ri.FoldFull) == ri.mapping(cUpper) {
ri.entry |= inverseFoldBit
}
n := len(orig)
// Create per-byte XOR mask.
var b []byte
for i := 0; i < n; i++ {
b = append(b, orig[i]^mapped[i])
}
// Remove leading 0 bytes, but keep at least one byte.
for ; len(b) > 1 && b[0] == 0; b = b[1:] {
}
if len(b) == 1 && b[0]&0xc0 == 0 {
ri.entry |= info(b[0]) << xorShift
return
}
key := string(b)
x, ok := xorCache[key]
if !ok {
xorData = append(xorData, 0) // for detecting start of sequence
xorData = append(xorData, b...)
x = len(xorData) - 1
xorCache[key] = x
}
ri.entry |= info(x<<xorShift) | xorIndexBit
}
var xorCache = map[string]int{}
// xorData contains byte-wise XOR data for the least significant bytes of a
// UTF-8 encoded rune. An index points to the last byte. The sequence starts
// with a zero terminator.
var xorData = []byte{}
// See the comments in gen_trieval.go re "the exceptions slice".
var exceptionData = []byte{0}
// makeException encodes case mappings that cannot be expressed in a simple
// XOR diff.
func makeException(ri *runeInfo) {
ccc := ri.entry & cccMask
// Set exception bit and retain case type.
ri.entry &= 0x0007
ri.entry |= exceptionBit
if len(exceptionData) >= 1<<numExceptionBits {
log.Fatalf("%U:exceptionData too large %x > %d bits", ri.Rune, len(exceptionData), numExceptionBits)
}
// Set the offset in the exceptionData array.
ri.entry |= info(len(exceptionData) << exceptionShift)
orig := string(ri.Rune)
tc := ri.mapping(cTitle)
uc := ri.mapping(cUpper)
lc := ri.mapping(cLower)
ff := string(ri.FoldFull)
// addString sets the length of a string and adds it to the expansions array.
addString := func(s string, b *byte) {
if len(s) == 0 {
// Zero-length mappings exist, but only for conditional casing,
// which we are representing outside of this table.
log.Fatalf("%U: has zero-length mapping.", ri.Rune)
}
*b <<= 3
if s != orig {
n := len(s)
if n > 7 {
log.Fatalf("%U: mapping larger than 7 (%d)", ri.Rune, n)
}
*b |= byte(n)
exceptionData = append(exceptionData, s...)
}
}
// byte 0:
exceptionData = append(exceptionData, byte(ccc)|byte(len(ff)))
// byte 1:
p := len(exceptionData)
exceptionData = append(exceptionData, 0)
if len(ff) > 7 { // May be zero-length.
log.Fatalf("%U: fold string larger than 7 (%d)", ri.Rune, len(ff))
}
exceptionData = append(exceptionData, ff...)
ct := ri.CaseMode
if ct != cLower {
addString(lc, &exceptionData[p])
}
if ct != cUpper {
addString(uc, &exceptionData[p])
}
if ct != cTitle {
// If title is the same as upper, we set it to the original string so
// that it will be marked as not present. This implies title case is
// the same as upper case.
if tc == uc {
tc = orig
}
addString(tc, &exceptionData[p])
}
}
// sparseCompacter is a trie value block Compacter. There are many cases where
// successive runes alternate between lower- and upper-case. This Compacter
// exploits this by adding a special case type where the case value is obtained
// from or-ing it with the least-significant bit of the rune, creating large
// ranges of equal case values that compress well.
type sparseCompacter struct {
sparseBlocks [][]uint16
sparseOffsets []uint16
sparseCount int
}
// makeSparse returns the number of elements that compact block would contain
// as well as the modified values.
func makeSparse(vals []uint64) ([]uint16, int) {
// Copy the values.
values := make([]uint16, len(vals))
for i, v := range vals {
values[i] = uint16(v)
}
alt := func(i int, v uint16) uint16 {
if cm := info(v & fullCasedMask); cm == cUpper || cm == cLower {
// Convert cLower or cUpper to cXORCase value, which has the form 11x.
xor := v
xor &^= 1
xor |= uint16(i&1) ^ (v & 1)
xor |= 0x4
return xor
}
return v
}
var count int
var previous uint16
for i, v := range values {
if v != 0 {
// Try if the unmodified value is equal to the previous.
if v == previous {
continue
}
// Try if the xor-ed value is equal to the previous value.
a := alt(i, v)
if a == previous {
values[i] = a
continue
}
// This is a new value.
count++
// Use the xor-ed value if it will be identical to the next value.
if p := i + 1; p < len(values) && alt(p, values[p]) == a {
values[i] = a
v = a
}
}
previous = v
}
return values, count
}
func (s *sparseCompacter) Size(v []uint64) (int, bool) {
_, n := makeSparse(v)
// We limit using this method to having 16 entries.
if n > 16 {
return 0, false
}
return 2 + int(reflect.TypeOf(valueRange{}).Size())*n, true
}
func (s *sparseCompacter) Store(v []uint64) uint32 {
h := uint32(len(s.sparseOffsets))
values, sz := makeSparse(v)
s.sparseBlocks = append(s.sparseBlocks, values)
s.sparseOffsets = append(s.sparseOffsets, uint16(s.sparseCount))
s.sparseCount += sz
return h
}
func (s *sparseCompacter) Handler() string {
// The sparse global variable and its lookup method is defined in gen_trieval.go.
return "sparse.lookup"
}
func (s *sparseCompacter) Print(w io.Writer) (retErr error) {
p := func(format string, args ...interface{}) {
_, err := fmt.Fprintf(w, format, args...)
if retErr == nil && err != nil {
retErr = err
}
}
ls := len(s.sparseBlocks)
if ls == len(s.sparseOffsets) {
s.sparseOffsets = append(s.sparseOffsets, uint16(s.sparseCount))
}
p("// sparseOffsets: %d entries, %d bytes\n", ls+1, (ls+1)*2)
p("var sparseOffsets = %#v\n\n", s.sparseOffsets)
ns := s.sparseCount
p("// sparseValues: %d entries, %d bytes\n", ns, ns*4)
p("var sparseValues = [%d]valueRange {", ns)
for i, values := range s.sparseBlocks {
p("\n// Block %#x, offset %#x", i, s.sparseOffsets[i])
var v uint16
for i, nv := range values {
if nv != v {
if v != 0 {
p(",hi:%#02x},", 0x80+i-1)
}
if nv != 0 {
p("\n{value:%#04x,lo:%#02x", nv, 0x80+i)
}
}
v = nv
}
if v != 0 {
p(",hi:%#02x},", 0x80+len(values)-1)
}
}
p("\n}\n\n")
return
}
// verifyProperties that properties of the runes that are relied upon in the
// implementation. Each property is marked with an identifier that is referred
// to in the places where it is used.
func verifyProperties(chars []runeInfo) {
for i, c := range chars {
r := rune(i)
// Rune properties.
// A.1: modifier never changes on lowercase. [ltLower]
if c.CCC > 0 && unicode.ToLower(r) != r {
log.Fatalf("%U: non-starter changes when lowercased", r)
}
// A.2: properties of decompositions starting with I or J. [ltLower]
d := norm.NFD.PropertiesString(string(r)).Decomposition()
if len(d) > 0 {
if d[0] == 'I' || d[0] == 'J' {
// A.2.1: we expect at least an ASCII character and a modifier.
if len(d) < 3 {
log.Fatalf("%U: length of decomposition was %d; want >= 3", r, len(d))
}
// All subsequent runes are modifiers and all have the same CCC.
runes := []rune(string(d[1:]))
ccc := chars[runes[0]].CCC
for _, mr := range runes[1:] {
mc := chars[mr]
// A.2.2: all modifiers have a CCC of Above or less.
if ccc == 0 || ccc > above {
log.Fatalf("%U: CCC of successive rune (%U) was %d; want (0,230]", r, mr, ccc)
}
// A.2.3: a sequence of modifiers all have the same CCC.
if mc.CCC != ccc {
log.Fatalf("%U: CCC of follow-up modifier (%U) was %d; want %d", r, mr, mc.CCC, ccc)
}
// A.2.4: for each trailing r, r in [0x300, 0x311] <=> CCC == Above.
if (ccc == above) != (0x300 <= mr && mr <= 0x311) {
log.Fatalf("%U: modifier %U in [U+0300, U+0311] != ccc(%U) == 230", r, mr, mr)
}
if i += len(string(mr)); i >= len(d) {
break
}
}
}
}
// A.3: no U+0307 in decomposition of Soft-Dotted rune. [ltUpper]
if unicode.Is(unicode.Soft_Dotted, r) && strings.Contains(string(d), "\u0307") {
log.Fatalf("%U: decomposition of soft-dotted rune may not contain U+0307", r)
}
// A.4: only rune U+0345 may be of CCC Iota_Subscript. [elUpper]
if c.CCC == iotaSubscript && r != 0x0345 {
log.Fatalf("%U: only rune U+0345 may have CCC Iota_Subscript", r)
}
// A.5: soft-dotted runes do not have exceptions.
if c.SoftDotted && c.entry&exceptionBit != 0 {
log.Fatalf("%U: soft-dotted has exception", r)
}
// A.6: Greek decomposition. [elUpper]
if unicode.Is(unicode.Greek, r) {
if b := norm.NFD.PropertiesString(string(r)).Decomposition(); b != nil {
runes := []rune(string(b))
// A.6.1: If a Greek rune decomposes and the first rune of the
// decomposition is greater than U+00FF, the rune is always
// great and not a modifier.
if f := runes[0]; unicode.IsMark(f) || f > 0xFF && !unicode.Is(unicode.Greek, f) {
log.Fatalf("%U: expected first rune of Greek decomposition to be letter, found %U", r, f)
}
// A.6.2: Any follow-up rune in a Greek decomposition is a
// modifier of which the first should be gobbled in
// decomposition.
for _, m := range runes[1:] {
switch m {
case 0x0313, 0x0314, 0x0301, 0x0300, 0x0306, 0x0342, 0x0308, 0x0304, 0x345:
default:
log.Fatalf("%U: modifier %U is outside of expected Greek modifier set", r, m)
}
}
}
}
// Breaking properties.
// B.1: all runes with CCC > 0 are of break type Extend.
if c.CCC > 0 && c.BreakType != "Extend" {
log.Fatalf("%U: CCC == %d, but got break type %s; want Extend", r, c.CCC, c.BreakType)
}
// B.2: all cased runes with c.CCC == 0 are of break type ALetter.
if c.CCC == 0 && c.Cased && c.BreakType != "ALetter" {
log.Fatalf("%U: cased, but got break type %s; want ALetter", r, c.BreakType)
}
// B.3: letter category.
if c.CCC == 0 && c.BreakCat != breakBreak && !c.CaseIgnorable {
if c.BreakCat != breakLetter {
log.Fatalf("%U: check for letter break type gave %d; want %d", r, c.BreakCat, breakLetter)
}
}
}
}
func genTablesTest() {
w := &bytes.Buffer{}
fmt.Fprintln(w, "var (")
printProperties(w, "DerivedCoreProperties.txt", "Case_Ignorable", verifyIgnore)
// We discard the output as we know we have perfect functions. We run them
// just to verify the properties are correct.
n := printProperties(ioutil.Discard, "DerivedCoreProperties.txt", "Cased", verifyCased)
n += printProperties(ioutil.Discard, "DerivedCoreProperties.txt", "Lowercase", verifyLower)
n += printProperties(ioutil.Discard, "DerivedCoreProperties.txt", "Uppercase", verifyUpper)
if n > 0 {
log.Fatalf("One of the discarded properties does not have a perfect filter.")
}
// <code>; <lower> ; <title> ; <upper> ; (<condition_list> ;)?
fmt.Fprintln(w, "\tspecial = map[rune]struct{ toLower, toTitle, toUpper string }{")
parse("SpecialCasing.txt", func(p *ucd.Parser) {
// Skip conditional entries.
if p.String(4) != "" {
return
}
r := p.Rune(0)
fmt.Fprintf(w, "\t\t0x%04x: {%q, %q, %q},\n",
r, string(p.Runes(1)), string(p.Runes(2)), string(p.Runes(3)))
})
fmt.Fprint(w, "\t}\n\n")
// <code>; <type>; <runes>
table := map[rune]struct{ simple, full, special string }{}
parse("CaseFolding.txt", func(p *ucd.Parser) {
r := p.Rune(0)
t := p.String(1)
v := string(p.Runes(2))
if t != "T" && v == string(unicode.ToLower(r)) {
return
}
x := table[r]
switch t {
case "C":
x.full = v
x.simple = v
case "S":
x.simple = v
case "F":
x.full = v
case "T":
x.special = v
}
table[r] = x
})
fmt.Fprintln(w, "\tfoldMap = map[rune]struct{ simple, full, special string }{")
for r := rune(0); r < 0x10FFFF; r++ {
x, ok := table[r]
if !ok {
continue
}
fmt.Fprintf(w, "\t\t0x%04x: {%q, %q, %q},\n", r, x.simple, x.full, x.special)
}
fmt.Fprint(w, "\t}\n\n")
// Break property
notBreak := map[rune]bool{}
parse("auxiliary/WordBreakProperty.txt", func(p *ucd.Parser) {
switch p.String(1) {
case "Extend", "Format", "MidLetter", "MidNumLet", "Single_Quote",
"ALetter", "Hebrew_Letter", "Numeric", "ExtendNumLet", "ZWJ":
notBreak[p.Rune(0)] = true
}
})
fmt.Fprintln(w, "\tbreakProp = []struct{ lo, hi rune }{")
inBreak := false
for r := rune(0); r <= lastRuneForTesting; r++ {
if isBreak := !notBreak[r]; isBreak != inBreak {
if isBreak {
fmt.Fprintf(w, "\t\t{0x%x, ", r)
} else {
fmt.Fprintf(w, "0x%x},\n", r-1)
}
inBreak = isBreak
}
}
if inBreak {
fmt.Fprintf(w, "0x%x},\n", lastRuneForTesting)
}
fmt.Fprint(w, "\t}\n\n")
// Word break test
// Filter out all samples that do not contain cased characters.
cased := map[rune]bool{}
parse("DerivedCoreProperties.txt", func(p *ucd.Parser) {
if p.String(1) == "Cased" {
cased[p.Rune(0)] = true
}
})
fmt.Fprintln(w, "\tbreakTest = []string{")
parse("auxiliary/WordBreakTest.txt", func(p *ucd.Parser) {
c := strings.Split(p.String(0), " ")
const sep = '|'
numCased := 0
test := ""
for ; len(c) >= 2; c = c[2:] {
if c[0] == "÷" && test != "" {
test += string(sep)
}
i, err := strconv.ParseUint(c[1], 16, 32)
r := rune(i)
if err != nil {
log.Fatalf("Invalid rune %q.", c[1])
}
if r == sep {
log.Fatalf("Separator %q not allowed in test data. Pick another one.", sep)
}
if cased[r] {
numCased++
}
test += string(r)
}
if numCased > 1 {
fmt.Fprintf(w, "\t\t%q,\n", test)
}
})
fmt.Fprintln(w, "\t}")
fmt.Fprintln(w, ")")
gen.WriteVersionedGoFile("tables_test.go", "cases", w.Bytes())
}
// These functions are just used for verification that their definition have not
// changed in the Unicode Standard.
func verifyCased(r rune) bool {
return verifyLower(r) || verifyUpper(r) || unicode.IsTitle(r)
}
func verifyLower(r rune) bool {
return unicode.IsLower(r) || unicode.Is(unicode.Other_Lowercase, r)
}
func verifyUpper(r rune) bool {
return unicode.IsUpper(r) || unicode.Is(unicode.Other_Uppercase, r)
}
// verifyIgnore is an approximation of the Case_Ignorable property using the
// core unicode package. It is used to reduce the size of the test data.
func verifyIgnore(r rune) bool {
props := []*unicode.RangeTable{
unicode.Mn,
unicode.Me,
unicode.Cf,
unicode.Lm,
unicode.Sk,
}
for _, p := range props {
if unicode.Is(p, r) {
return true
}
}
return false
}
// printProperties prints tables of rune properties from the given UCD file.
// A filter func f can be given to exclude certain values. A rune r will have
// the indicated property if it is in the generated table or if f(r).
func printProperties(w io.Writer, file, property string, f func(r rune) bool) int {
verify := map[rune]bool{}
n := 0
varNameParts := strings.Split(property, "_")
varNameParts[0] = strings.ToLower(varNameParts[0])
fmt.Fprintf(w, "\t%s = map[rune]bool{\n", strings.Join(varNameParts, ""))
parse(file, func(p *ucd.Parser) {
if p.String(1) == property {
r := p.Rune(0)
verify[r] = true
if !f(r) {
n++
fmt.Fprintf(w, "\t\t0x%.4x: true,\n", r)
}
}
})
fmt.Fprint(w, "\t}\n\n")
// Verify that f is correct, that is, it represents a subset of the property.
for r := rune(0); r <= lastRuneForTesting; r++ {
if !verify[r] && f(r) {
log.Fatalf("Incorrect filter func for property %q.", property)
}
}
return n
}
// The newCaseTrie, sparseValues and sparseOffsets definitions below are
// placeholders referred to by gen_trieval.go. The real definitions are
// generated by this program and written to tables.go.
func newCaseTrie(int) int { return 0 }
var (
sparseValues [0]valueRange
sparseOffsets [0]uint16
)

219
vendor/golang.org/x/text/cases/gen_trieval.go generated vendored Normal file
View file

@ -0,0 +1,219 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
// This file contains definitions for interpreting the trie value of the case
// trie generated by "go run gen*.go". It is shared by both the generator
// program and the resultant package. Sharing is achieved by the generator
// copying gen_trieval.go to trieval.go and changing what's above this comment.
// info holds case information for a single rune. It is the value returned
// by a trie lookup. Most mapping information can be stored in a single 16-bit
// value. If not, for example when a rune is mapped to multiple runes, the value
// stores some basic case data and an index into an array with additional data.
//
// The per-rune values have the following format:
//
// if (exception) {
// 15..5 unsigned exception index
// 4 unused
// } else {
// 15..8 XOR pattern or index to XOR pattern for case mapping
// Only 13..8 are used for XOR patterns.
// 7 inverseFold (fold to upper, not to lower)
// 6 index: interpret the XOR pattern as an index
// or isMid if case mode is cIgnorableUncased.
// 5..4 CCC: zero (normal or break), above or other
// }
// 3 exception: interpret this value as an exception index
// (TODO: is this bit necessary? Probably implied from case mode.)
// 2..0 case mode
//
// For the non-exceptional cases, a rune must be either uncased, lowercase or
// uppercase. If the rune is cased, the XOR pattern maps either a lowercase
// rune to uppercase or an uppercase rune to lowercase (applied to the 10
// least-significant bits of the rune).
//
// See the definitions below for a more detailed description of the various
// bits.
type info uint16
const (
casedMask = 0x0003
fullCasedMask = 0x0007
ignorableMask = 0x0006
ignorableValue = 0x0004
inverseFoldBit = 1 << 7
isMidBit = 1 << 6
exceptionBit = 1 << 3
exceptionShift = 5
numExceptionBits = 11
xorIndexBit = 1 << 6
xorShift = 8
// There is no mapping if all xor bits and the exception bit are zero.
hasMappingMask = 0xff80 | exceptionBit
)
// The case mode bits encodes the case type of a rune. This includes uncased,
// title, upper and lower case and case ignorable. (For a definition of these
// terms see Chapter 3 of The Unicode Standard Core Specification.) In some rare
// cases, a rune can be both cased and case-ignorable. This is encoded by
// cIgnorableCased. A rune of this type is always lower case. Some runes are
// cased while not having a mapping.
//
// A common pattern for scripts in the Unicode standard is for upper and lower
// case runes to alternate for increasing rune values (e.g. the accented Latin
// ranges starting from U+0100 and U+1E00 among others and some Cyrillic
// characters). We use this property by defining a cXORCase mode, where the case
// mode (always upper or lower case) is derived from the rune value. As the XOR
// pattern for case mappings is often identical for successive runes, using
// cXORCase can result in large series of identical trie values. This, in turn,
// allows us to better compress the trie blocks.
const (
cUncased info = iota // 000
cTitle // 001
cLower // 010
cUpper // 011
cIgnorableUncased // 100
cIgnorableCased // 101 // lower case if mappings exist
cXORCase // 11x // case is cLower | ((rune&1) ^ x)
maxCaseMode = cUpper
)
func (c info) isCased() bool {
return c&casedMask != 0
}
func (c info) isCaseIgnorable() bool {
return c&ignorableMask == ignorableValue
}
func (c info) isNotCasedAndNotCaseIgnorable() bool {
return c&fullCasedMask == 0
}
func (c info) isCaseIgnorableAndNotCased() bool {
return c&fullCasedMask == cIgnorableUncased
}
func (c info) isMid() bool {
return c&(fullCasedMask|isMidBit) == isMidBit|cIgnorableUncased
}
// The case mapping implementation will need to know about various Canonical
// Combining Class (CCC) values. We encode two of these in the trie value:
// cccZero (0) and cccAbove (230). If the value is cccOther, it means that
// CCC(r) > 0, but not 230. A value of cccBreak means that CCC(r) == 0 and that
// the rune also has the break category Break (see below).
const (
cccBreak info = iota << 4
cccZero
cccAbove
cccOther
cccMask = cccBreak | cccZero | cccAbove | cccOther
)
const (
starter = 0
above = 230
iotaSubscript = 240
)
// The exceptions slice holds data that does not fit in a normal info entry.
// The entry is pointed to by the exception index in an entry. It has the
// following format:
//
// Header
// byte 0:
// 7..6 unused
// 5..4 CCC type (same bits as entry)
// 3 unused
// 2..0 length of fold
//
// byte 1:
// 7..6 unused
// 5..3 length of 1st mapping of case type
// 2..0 length of 2nd mapping of case type
//
// case 1st 2nd
// lower -> upper, title
// upper -> lower, title
// title -> lower, upper
//
// Lengths with the value 0x7 indicate no value and implies no change.
// A length of 0 indicates a mapping to zero-length string.
//
// Body bytes:
// case folding bytes
// lowercase mapping bytes
// uppercase mapping bytes
// titlecase mapping bytes
// closure mapping bytes (for NFKC_Casefold). (TODO)
//
// Fallbacks:
// missing fold -> lower
// missing title -> upper
// all missing -> original rune
//
// exceptions starts with a dummy byte to enforce that there is no zero index
// value.
const (
lengthMask = 0x07
lengthBits = 3
noChange = 0
)
// References to generated trie.
var trie = newCaseTrie(0)
var sparse = sparseBlocks{
values: sparseValues[:],
offsets: sparseOffsets[:],
}
// Sparse block lookup code.
// valueRange is an entry in a sparse block.
type valueRange struct {
value uint16
lo, hi byte
}
type sparseBlocks struct {
values []valueRange
offsets []uint16
}
// lookup returns the value from values block n for byte b using binary search.
func (s *sparseBlocks) lookup(n uint32, b byte) uint16 {
lo := s.offsets[n]
hi := s.offsets[n+1]
for lo < hi {
m := lo + (hi-lo)/2
r := s.values[m]
if r.lo <= b && b <= r.hi {
return r.value
}
if b < r.lo {
hi = m
} else {
lo = m + 1
}
}
return 0
}
// lastRuneForTesting is the last rune used for testing. Everything after this
// is boring.
const lastRuneForTesting = rune(0x1FFFF)

61
vendor/golang.org/x/text/cases/icu.go generated vendored Normal file
View file

@ -0,0 +1,61 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build icu
package cases
// Ideally these functions would be defined in a test file, but go test doesn't
// allow CGO in tests. The build tag should ensure either way that these
// functions will not end up in the package.
// TODO: Ensure that the correct ICU version is set.
/*
#cgo LDFLAGS: -licui18n.57 -licuuc.57
#include <stdlib.h>
#include <unicode/ustring.h>
#include <unicode/utypes.h>
#include <unicode/localpointer.h>
#include <unicode/ucasemap.h>
*/
import "C"
import "unsafe"
func doICU(tag, caser, input string) string {
err := C.UErrorCode(0)
loc := C.CString(tag)
cm := C.ucasemap_open(loc, C.uint32_t(0), &err)
buf := make([]byte, len(input)*4)
dst := (*C.char)(unsafe.Pointer(&buf[0]))
src := C.CString(input)
cn := C.int32_t(0)
switch caser {
case "fold":
cn = C.ucasemap_utf8FoldCase(cm,
dst, C.int32_t(len(buf)),
src, C.int32_t(len(input)),
&err)
case "lower":
cn = C.ucasemap_utf8ToLower(cm,
dst, C.int32_t(len(buf)),
src, C.int32_t(len(input)),
&err)
case "upper":
cn = C.ucasemap_utf8ToUpper(cm,
dst, C.int32_t(len(buf)),
src, C.int32_t(len(input)),
&err)
case "title":
cn = C.ucasemap_utf8ToTitle(cm,
dst, C.int32_t(len(buf)),
src, C.int32_t(len(input)),
&err)
}
return string(buf[:cn])
}

210
vendor/golang.org/x/text/cases/icu_test.go generated vendored Normal file
View file

@ -0,0 +1,210 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build icu
package cases
import (
"path"
"strings"
"testing"
"golang.org/x/text/internal/testtext"
"golang.org/x/text/language"
"golang.org/x/text/unicode/norm"
)
func TestICUConformance(t *testing.T) {
// Build test set.
input := []string{
"a.a a_a",
"a\u05d0a",
"\u05d0'a",
"a\u03084a",
"a\u0308a",
"a3\u30a3a",
"a\u303aa",
"a_\u303a_a",
"1_a..a",
"1_a.a",
"a..a.",
"a--a-",
"a-a-",
"a\u200ba",
"a\u200b\u200ba",
"a\u00ad\u00ada", // Format
"a\u00ada",
"a''a", // SingleQuote
"a'a",
"a::a", // MidLetter
"a:a",
"a..a", // MidNumLet
"a.a",
"a;;a", // MidNum
"a;a",
"a__a", // ExtendNumlet
"a_a",
"ΟΣ''a",
}
add := func(x interface{}) {
switch v := x.(type) {
case string:
input = append(input, v)
case []string:
for _, s := range v {
input = append(input, s)
}
}
}
for _, tc := range testCases {
add(tc.src)
add(tc.lower)
add(tc.upper)
add(tc.title)
}
for _, tc := range bufferTests {
add(tc.src)
}
for _, tc := range breakTest {
add(strings.Replace(tc, "|", "", -1))
}
for _, tc := range foldTestCases {
add(tc)
}
// Compare ICU to Go.
for _, c := range []string{"lower", "upper", "title", "fold"} {
for _, tag := range []string{
"und", "af", "az", "el", "lt", "nl", "tr",
} {
for _, s := range input {
if exclude(c, tag, s) {
continue
}
testtext.Run(t, path.Join(c, tag, s), func(t *testing.T) {
want := doICU(tag, c, s)
got := doGo(tag, c, s)
if norm.NFC.String(got) != norm.NFC.String(want) {
t.Errorf("\n in %[3]q (%+[3]q)\n got %[1]q (%+[1]q)\n want %[2]q (%+[2]q)", got, want, s)
}
})
}
}
}
}
// exclude indicates if a string should be excluded from testing.
func exclude(cm, tag, s string) bool {
list := []struct{ cm, tags, pattern string }{
// TODO: Go does not handle certain esoteric breaks correctly. This will be
// fixed once we have a real word break iterator. Alternatively, it
// seems like we're not too far off from making it work, so we could
// fix these last steps. But first verify that using a separate word
// breaker does not hurt performance.
{"title", "af nl", "a''a"},
{"", "", "א'a"},
// All the exclusions below seem to be issues with the ICU
// implementation (at version 57) and thus are not marked as TODO.
// ICU does not handle leading apostrophe for Dutch and
// Afrikaans correctly. See http://unicode.org/cldr/trac/ticket/7078.
{"title", "af nl", "'n"},
{"title", "af nl", "'N"},
// Go terminates the final sigma check after a fixed number of
// ignorables have been found. This ensures that the algorithm can make
// progress in a streaming scenario.
{"lower title", "", "\u039f\u03a3...............................a"},
// This also applies to upper in Greek.
// NOTE: we could fix the following two cases by adding state to elUpper
// and aztrLower. However, considering a modifier to not belong to the
// preceding letter after the maximum modifiers count is reached is
// consistent with the behavior of unicode/norm.
{"upper", "el", "\u03bf" + strings.Repeat("\u0321", 29) + "\u0313"},
{"lower", "az tr lt", "I" + strings.Repeat("\u0321", 30) + "\u0307\u0300"},
{"upper", "lt", "i" + strings.Repeat("\u0321", 30) + "\u0307\u0300"},
{"lower", "lt", "I" + strings.Repeat("\u0321", 30) + "\u0300"},
// ICU title case seems to erroneously removes \u0307 from an upper case
// I unconditionally, instead of only when lowercasing. The ICU
// transform algorithm transforms these cases consistently with our
// implementation.
{"title", "az tr", "\u0307"},
// The spec says to remove \u0307 after Soft-Dotted characters. ICU
// transforms conform but ucasemap_utf8ToUpper does not.
{"upper title", "lt", "i\u0307"},
{"upper title", "lt", "i" + strings.Repeat("\u0321", 29) + "\u0307\u0300"},
// Both Unicode and CLDR prescribe an extra explicit dot above after a
// Soft_Dotted character if there are other modifiers.
// ucasemap_utf8ToUpper does not do this; ICU transforms do.
// The issue with ucasemap_utf8ToUpper seems to be that it does not
// consider the modifiers that are part of composition in the evaluation
// of More_Above. For instance, according to the More_Above rule for lt,
// a dotted capital I (U+0130) becomes i\u0307\u0307 (an small i with
// two additional dots). This seems odd, but is correct. ICU is
// definitely not correct as it produces different results for different
// normal forms. For instance, for an İ:
// \u0130 (NFC) -> i\u0307 (incorrect)
// I\u0307 (NFD) -> i\u0307\u0307 (correct)
// We could argue that we should not add a \u0307 if there already is
// one, but this may be hard to get correct and is not conform the
// standard.
{"lower title", "lt", "\u0130"},
{"lower title", "lt", "\u00cf"},
// We are conform ICU ucasemap_utf8ToUpper if we remove support for
// elUpper. However, this is clearly not conform the spec. Moreover, the
// ICU transforms _do_ implement this transform and produces results
// consistent with our implementation. Note that we still prefer to use
// ucasemap_utf8ToUpper instead of transforms as the latter have
// inconsistencies in the word breaking algorithm.
{"upper", "el", "\u0386"}, // GREEK CAPITAL LETTER ALPHA WITH TONOS
{"upper", "el", "\u0389"}, // GREEK CAPITAL LETTER ETA WITH TONOS
{"upper", "el", "\u038A"}, // GREEK CAPITAL LETTER IOTA WITH TONOS
{"upper", "el", "\u0391"}, // GREEK CAPITAL LETTER ALPHA
{"upper", "el", "\u0397"}, // GREEK CAPITAL LETTER ETA
{"upper", "el", "\u0399"}, // GREEK CAPITAL LETTER IOTA
{"upper", "el", "\u03AC"}, // GREEK SMALL LETTER ALPHA WITH TONOS
{"upper", "el", "\u03AE"}, // GREEK SMALL LETTER ALPHA WITH ETA
{"upper", "el", "\u03AF"}, // GREEK SMALL LETTER ALPHA WITH IOTA
{"upper", "el", "\u03B1"}, // GREEK SMALL LETTER ALPHA
{"upper", "el", "\u03B7"}, // GREEK SMALL LETTER ETA
{"upper", "el", "\u03B9"}, // GREEK SMALL LETTER IOTA
}
for _, x := range list {
if x.cm != "" && strings.Index(x.cm, cm) == -1 {
continue
}
if x.tags != "" && strings.Index(x.tags, tag) == -1 {
continue
}
if strings.Index(s, x.pattern) != -1 {
return true
}
}
return false
}
func doGo(tag, caser, input string) string {
var c Caser
t := language.MustParse(tag)
switch caser {
case "lower":
c = Lower(t)
case "upper":
c = Upper(t)
case "title":
c = Title(t)
case "fold":
c = Fold()
}
return c.String(input)
}

82
vendor/golang.org/x/text/cases/info.go generated vendored Normal file
View file

@ -0,0 +1,82 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cases
func (c info) cccVal() info {
if c&exceptionBit != 0 {
return info(exceptions[c>>exceptionShift]) & cccMask
}
return c & cccMask
}
func (c info) cccType() info {
ccc := c.cccVal()
if ccc <= cccZero {
return cccZero
}
return ccc
}
// TODO: Implement full Unicode breaking algorithm:
// 1) Implement breaking in separate package.
// 2) Use the breaker here.
// 3) Compare table size and performance of using the more generic breaker.
//
// Note that we can extend the current algorithm to be much more accurate. This
// only makes sense, though, if the performance and/or space penalty of using
// the generic breaker is big. Extra data will only be needed for non-cased
// runes, which means there are sufficient bits left in the caseType.
// ICU prohibits breaking in such cases as well.
// For the purpose of title casing we use an approximation of the Unicode Word
// Breaking algorithm defined in Annex #29:
// http://www.unicode.org/reports/tr29/#Default_Grapheme_Cluster_Table.
//
// For our approximation, we group the Word Break types into the following
// categories, with associated rules:
//
// 1) Letter:
// ALetter, Hebrew_Letter, Numeric, ExtendNumLet, Extend, Format_FE, ZWJ.
// Rule: Never break between consecutive runes of this category.
//
// 2) Mid:
// MidLetter, MidNumLet, Single_Quote.
// (Cf. case-ignorable: MidLetter, MidNumLet, Single_Quote or cat is Mn,
// Me, Cf, Lm or Sk).
// Rule: Don't break between Letter and Mid, but break between two Mids.
//
// 3) Break:
// Any other category: NewLine, MidNum, CR, LF, Double_Quote, Katakana, and
// Other.
// These categories should always result in a break between two cased letters.
// Rule: Always break.
//
// Note 1: the Katakana and MidNum categories can, in esoteric cases, result in
// preventing a break between two cased letters. For now we will ignore this
// (e.g. [ALetter] [ExtendNumLet] [Katakana] [ExtendNumLet] [ALetter] and
// [ALetter] [Numeric] [MidNum] [Numeric] [ALetter].)
//
// Note 2: the rule for Mid is very approximate, but works in most cases. To
// improve, we could store the categories in the trie value and use a FA to
// manage breaks. See TODO comment above.
//
// Note 3: according to the spec, it is possible for the Extend category to
// introduce breaks between other categories grouped in Letter. However, this
// is undesirable for our purposes. ICU prevents breaks in such cases as well.
// isBreak returns whether this rune should introduce a break.
func (c info) isBreak() bool {
return c.cccVal() == cccBreak
}
// isLetter returns whether the rune is of break type ALetter, Hebrew_Letter,
// Numeric, ExtendNumLet, or Extend.
func (c info) isLetter() bool {
ccc := c.cccVal()
if ccc == cccZero {
return !c.isCaseIgnorable()
}
return ccc != cccBreak
}

816
vendor/golang.org/x/text/cases/map.go generated vendored Normal file
View file

@ -0,0 +1,816 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cases
// This file contains the definitions of case mappings for all supported
// languages. The rules for the language-specific tailorings were taken and
// modified from the CLDR transform definitions in common/transforms.
import (
"strings"
"unicode"
"unicode/utf8"
"golang.org/x/text/internal"
"golang.org/x/text/language"
"golang.org/x/text/transform"
"golang.org/x/text/unicode/norm"
)
// A mapFunc takes a context set to the current rune and writes the mapped
// version to the same context. It may advance the context to the next rune. It
// returns whether a checkpoint is possible: whether the pDst bytes written to
// dst so far won't need changing as we see more source bytes.
type mapFunc func(*context) bool
// A spanFunc takes a context set to the current rune and returns whether this
// rune would be altered when written to the output. It may advance the context
// to the next rune. It returns whether a checkpoint is possible.
type spanFunc func(*context) bool
// maxIgnorable defines the maximum number of ignorables to consider for
// lookahead operations.
const maxIgnorable = 30
// supported lists the language tags for which we have tailorings.
const supported = "und af az el lt nl tr"
func init() {
tags := []language.Tag{}
for _, s := range strings.Split(supported, " ") {
tags = append(tags, language.MustParse(s))
}
matcher = internal.NewInheritanceMatcher(tags)
Supported = language.NewCoverage(tags)
}
var (
matcher *internal.InheritanceMatcher
Supported language.Coverage
// We keep the following lists separate, instead of having a single per-
// language struct, to give the compiler a chance to remove unused code.
// Some uppercase mappers are stateless, so we can precompute the
// Transformers and save a bit on runtime allocations.
upperFunc = []struct {
upper mapFunc
span spanFunc
}{
{nil, nil}, // und
{nil, nil}, // af
{aztrUpper(upper), isUpper}, // az
{elUpper, noSpan}, // el
{ltUpper(upper), noSpan}, // lt
{nil, nil}, // nl
{aztrUpper(upper), isUpper}, // tr
}
undUpper transform.SpanningTransformer = &undUpperCaser{}
undLower transform.SpanningTransformer = &undLowerCaser{}
undLowerIgnoreSigma transform.SpanningTransformer = &undLowerIgnoreSigmaCaser{}
lowerFunc = []mapFunc{
nil, // und
nil, // af
aztrLower, // az
nil, // el
ltLower, // lt
nil, // nl
aztrLower, // tr
}
titleInfos = []struct {
title mapFunc
lower mapFunc
titleSpan spanFunc
rewrite func(*context)
}{
{title, lower, isTitle, nil}, // und
{title, lower, isTitle, afnlRewrite}, // af
{aztrUpper(title), aztrLower, isTitle, nil}, // az
{title, lower, isTitle, nil}, // el
{ltUpper(title), ltLower, noSpan, nil}, // lt
{nlTitle, lower, nlTitleSpan, afnlRewrite}, // nl
{aztrUpper(title), aztrLower, isTitle, nil}, // tr
}
)
func makeUpper(t language.Tag, o options) transform.SpanningTransformer {
_, i, _ := matcher.Match(t)
f := upperFunc[i].upper
if f == nil {
return undUpper
}
return &simpleCaser{f: f, span: upperFunc[i].span}
}
func makeLower(t language.Tag, o options) transform.SpanningTransformer {
_, i, _ := matcher.Match(t)
f := lowerFunc[i]
if f == nil {
if o.ignoreFinalSigma {
return undLowerIgnoreSigma
}
return undLower
}
if o.ignoreFinalSigma {
return &simpleCaser{f: f, span: isLower}
}
return &lowerCaser{
first: f,
midWord: finalSigma(f),
}
}
func makeTitle(t language.Tag, o options) transform.SpanningTransformer {
_, i, _ := matcher.Match(t)
x := &titleInfos[i]
lower := x.lower
if o.noLower {
lower = (*context).copy
} else if !o.ignoreFinalSigma {
lower = finalSigma(lower)
}
return &titleCaser{
title: x.title,
lower: lower,
titleSpan: x.titleSpan,
rewrite: x.rewrite,
}
}
func noSpan(c *context) bool {
c.err = transform.ErrEndOfSpan
return false
}
// TODO: consider a similar special case for the fast majority lower case. This
// is a bit more involved so will require some more precise benchmarking to
// justify it.
type undUpperCaser struct{ transform.NopResetter }
// undUpperCaser implements the Transformer interface for doing an upper case
// mapping for the root locale (und). It eliminates the need for an allocation
// as it prevents escaping by not using function pointers.
func (t undUpperCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
c := context{dst: dst, src: src, atEOF: atEOF}
for c.next() {
upper(&c)
c.checkpoint()
}
return c.ret()
}
func (t undUpperCaser) Span(src []byte, atEOF bool) (n int, err error) {
c := context{src: src, atEOF: atEOF}
for c.next() && isUpper(&c) {
c.checkpoint()
}
return c.retSpan()
}
// undLowerIgnoreSigmaCaser implements the Transformer interface for doing
// a lower case mapping for the root locale (und) ignoring final sigma
// handling. This casing algorithm is used in some performance-critical packages
// like secure/precis and x/net/http/idna, which warrants its special-casing.
type undLowerIgnoreSigmaCaser struct{ transform.NopResetter }
func (t undLowerIgnoreSigmaCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
c := context{dst: dst, src: src, atEOF: atEOF}
for c.next() && lower(&c) {
c.checkpoint()
}
return c.ret()
}
// Span implements a generic lower-casing. This is possible as isLower works
// for all lowercasing variants. All lowercase variants only vary in how they
// transform a non-lowercase letter. They will never change an already lowercase
// letter. In addition, there is no state.
func (t undLowerIgnoreSigmaCaser) Span(src []byte, atEOF bool) (n int, err error) {
c := context{src: src, atEOF: atEOF}
for c.next() && isLower(&c) {
c.checkpoint()
}
return c.retSpan()
}
type simpleCaser struct {
context
f mapFunc
span spanFunc
}
// simpleCaser implements the Transformer interface for doing a case operation
// on a rune-by-rune basis.
func (t *simpleCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
c := context{dst: dst, src: src, atEOF: atEOF}
for c.next() && t.f(&c) {
c.checkpoint()
}
return c.ret()
}
func (t *simpleCaser) Span(src []byte, atEOF bool) (n int, err error) {
c := context{src: src, atEOF: atEOF}
for c.next() && t.span(&c) {
c.checkpoint()
}
return c.retSpan()
}
// undLowerCaser implements the Transformer interface for doing a lower case
// mapping for the root locale (und) ignoring final sigma handling. This casing
// algorithm is used in some performance-critical packages like secure/precis
// and x/net/http/idna, which warrants its special-casing.
type undLowerCaser struct{ transform.NopResetter }
func (t undLowerCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
c := context{dst: dst, src: src, atEOF: atEOF}
for isInterWord := true; c.next(); {
if isInterWord {
if c.info.isCased() {
if !lower(&c) {
break
}
isInterWord = false
} else if !c.copy() {
break
}
} else {
if c.info.isNotCasedAndNotCaseIgnorable() {
if !c.copy() {
break
}
isInterWord = true
} else if !c.hasPrefix("Σ") {
if !lower(&c) {
break
}
} else if !finalSigmaBody(&c) {
break
}
}
c.checkpoint()
}
return c.ret()
}
func (t undLowerCaser) Span(src []byte, atEOF bool) (n int, err error) {
c := context{src: src, atEOF: atEOF}
for c.next() && isLower(&c) {
c.checkpoint()
}
return c.retSpan()
}
// lowerCaser implements the Transformer interface. The default Unicode lower
// casing requires different treatment for the first and subsequent characters
// of a word, most notably to handle the Greek final Sigma.
type lowerCaser struct {
undLowerIgnoreSigmaCaser
context
first, midWord mapFunc
}
func (t *lowerCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
t.context = context{dst: dst, src: src, atEOF: atEOF}
c := &t.context
for isInterWord := true; c.next(); {
if isInterWord {
if c.info.isCased() {
if !t.first(c) {
break
}
isInterWord = false
} else if !c.copy() {
break
}
} else {
if c.info.isNotCasedAndNotCaseIgnorable() {
if !c.copy() {
break
}
isInterWord = true
} else if !t.midWord(c) {
break
}
}
c.checkpoint()
}
return c.ret()
}
// titleCaser implements the Transformer interface. Title casing algorithms
// distinguish between the first letter of a word and subsequent letters of the
// same word. It uses state to avoid requiring a potentially infinite lookahead.
type titleCaser struct {
context
// rune mappings used by the actual casing algorithms.
title mapFunc
lower mapFunc
titleSpan spanFunc
rewrite func(*context)
}
// Transform implements the standard Unicode title case algorithm as defined in
// Chapter 3 of The Unicode Standard:
// toTitlecase(X): Find the word boundaries in X according to Unicode Standard
// Annex #29, "Unicode Text Segmentation." For each word boundary, find the
// first cased character F following the word boundary. If F exists, map F to
// Titlecase_Mapping(F); then map all characters C between F and the following
// word boundary to Lowercase_Mapping(C).
func (t *titleCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
t.context = context{dst: dst, src: src, atEOF: atEOF, isMidWord: t.isMidWord}
c := &t.context
if !c.next() {
return c.ret()
}
for {
p := c.info
if t.rewrite != nil {
t.rewrite(c)
}
wasMid := p.isMid()
// Break out of this loop on failure to ensure we do not modify the
// state incorrectly.
if p.isCased() {
if !c.isMidWord {
if !t.title(c) {
break
}
c.isMidWord = true
} else if !t.lower(c) {
break
}
} else if !c.copy() {
break
} else if p.isBreak() {
c.isMidWord = false
}
// As we save the state of the transformer, it is safe to call
// checkpoint after any successful write.
if !(c.isMidWord && wasMid) {
c.checkpoint()
}
if !c.next() {
break
}
if wasMid && c.info.isMid() {
c.isMidWord = false
}
}
return c.ret()
}
func (t *titleCaser) Span(src []byte, atEOF bool) (n int, err error) {
t.context = context{src: src, atEOF: atEOF, isMidWord: t.isMidWord}
c := &t.context
if !c.next() {
return c.retSpan()
}
for {
p := c.info
if t.rewrite != nil {
t.rewrite(c)
}
wasMid := p.isMid()
// Break out of this loop on failure to ensure we do not modify the
// state incorrectly.
if p.isCased() {
if !c.isMidWord {
if !t.titleSpan(c) {
break
}
c.isMidWord = true
} else if !isLower(c) {
break
}
} else if p.isBreak() {
c.isMidWord = false
}
// As we save the state of the transformer, it is safe to call
// checkpoint after any successful write.
if !(c.isMidWord && wasMid) {
c.checkpoint()
}
if !c.next() {
break
}
if wasMid && c.info.isMid() {
c.isMidWord = false
}
}
return c.retSpan()
}
// finalSigma adds Greek final Sigma handing to another casing function. It
// determines whether a lowercased sigma should be σ or ς, by looking ahead for
// case-ignorables and a cased letters.
func finalSigma(f mapFunc) mapFunc {
return func(c *context) bool {
if !c.hasPrefix("Σ") {
return f(c)
}
return finalSigmaBody(c)
}
}
func finalSigmaBody(c *context) bool {
// Current rune must be ∑.
// ::NFD();
// # 03A3; 03C2; 03A3; 03A3; Final_Sigma; # GREEK CAPITAL LETTER SIGMA
// Σ } [:case-ignorable:]* [:cased:] → σ;
// [:cased:] [:case-ignorable:]* { Σ → ς;
// ::Any-Lower;
// ::NFC();
p := c.pDst
c.writeString("ς")
// TODO: we should do this here, but right now this will never have an
// effect as this is called when the prefix is Sigma, whereas Dutch and
// Afrikaans only test for an apostrophe.
//
// if t.rewrite != nil {
// t.rewrite(c)
// }
// We need to do one more iteration after maxIgnorable, as a cased
// letter is not an ignorable and may modify the result.
wasMid := false
for i := 0; i < maxIgnorable+1; i++ {
if !c.next() {
return false
}
if !c.info.isCaseIgnorable() {
// All Midword runes are also case ignorable, so we are
// guaranteed to have a letter or word break here. As we are
// unreading the run, there is no need to unset c.isMidWord;
// the title caser will handle this.
if c.info.isCased() {
// p+1 is guaranteed to be in bounds: if writing ς was
// successful, p+1 will contain the second byte of ς. If not,
// this function will have returned after c.next returned false.
c.dst[p+1]++ // ς → σ
}
c.unreadRune()
return true
}
// A case ignorable may also introduce a word break, so we may need
// to continue searching even after detecting a break.
isMid := c.info.isMid()
if (wasMid && isMid) || c.info.isBreak() {
c.isMidWord = false
}
wasMid = isMid
c.copy()
}
return true
}
// finalSigmaSpan would be the same as isLower.
// elUpper implements Greek upper casing, which entails removing a predefined
// set of non-blocked modifiers. Note that these accents should not be removed
// for title casing!
// Example: "Οδός" -> "ΟΔΟΣ".
func elUpper(c *context) bool {
// From CLDR:
// [:Greek:] [^[:ccc=Not_Reordered:][:ccc=Above:]]*? { [\u0313\u0314\u0301\u0300\u0306\u0342\u0308\u0304] → ;
// [:Greek:] [^[:ccc=Not_Reordered:][:ccc=Iota_Subscript:]]*? { \u0345 → ;
r, _ := utf8.DecodeRune(c.src[c.pSrc:])
oldPDst := c.pDst
if !upper(c) {
return false
}
if !unicode.Is(unicode.Greek, r) {
return true
}
i := 0
// Take the properties of the uppercased rune that is already written to the
// destination. This saves us the trouble of having to uppercase the
// decomposed rune again.
if b := norm.NFD.Properties(c.dst[oldPDst:]).Decomposition(); b != nil {
// Restore the destination position and process the decomposed rune.
r, sz := utf8.DecodeRune(b)
if r <= 0xFF { // See A.6.1
return true
}
c.pDst = oldPDst
// Insert the first rune and ignore the modifiers. See A.6.2.
c.writeBytes(b[:sz])
i = len(b[sz:]) / 2 // Greek modifiers are always of length 2.
}
for ; i < maxIgnorable && c.next(); i++ {
switch r, _ := utf8.DecodeRune(c.src[c.pSrc:]); r {
// Above and Iota Subscript
case 0x0300, // U+0300 COMBINING GRAVE ACCENT
0x0301, // U+0301 COMBINING ACUTE ACCENT
0x0304, // U+0304 COMBINING MACRON
0x0306, // U+0306 COMBINING BREVE
0x0308, // U+0308 COMBINING DIAERESIS
0x0313, // U+0313 COMBINING COMMA ABOVE
0x0314, // U+0314 COMBINING REVERSED COMMA ABOVE
0x0342, // U+0342 COMBINING GREEK PERISPOMENI
0x0345: // U+0345 COMBINING GREEK YPOGEGRAMMENI
// No-op. Gobble the modifier.
default:
switch v, _ := trie.lookup(c.src[c.pSrc:]); info(v).cccType() {
case cccZero:
c.unreadRune()
return true
// We don't need to test for IotaSubscript as the only rune that
// qualifies (U+0345) was already excluded in the switch statement
// above. See A.4.
case cccAbove:
return c.copy()
default:
// Some other modifier. We're still allowed to gobble Greek
// modifiers after this.
c.copy()
}
}
}
return i == maxIgnorable
}
// TODO: implement elUpperSpan (low-priority: complex and infrequent).
func ltLower(c *context) bool {
// From CLDR:
// # Introduce an explicit dot above when lowercasing capital I's and J's
// # whenever there are more accents above.
// # (of the accents used in Lithuanian: grave, acute, tilde above, and ogonek)
// # 0049; 0069 0307; 0049; 0049; lt More_Above; # LATIN CAPITAL LETTER I
// # 004A; 006A 0307; 004A; 004A; lt More_Above; # LATIN CAPITAL LETTER J
// # 012E; 012F 0307; 012E; 012E; lt More_Above; # LATIN CAPITAL LETTER I WITH OGONEK
// # 00CC; 0069 0307 0300; 00CC; 00CC; lt; # LATIN CAPITAL LETTER I WITH GRAVE
// # 00CD; 0069 0307 0301; 00CD; 00CD; lt; # LATIN CAPITAL LETTER I WITH ACUTE
// # 0128; 0069 0307 0303; 0128; 0128; lt; # LATIN CAPITAL LETTER I WITH TILDE
// ::NFD();
// I } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → i \u0307;
// J } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → j \u0307;
// I \u0328 (Į) } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → i \u0328 \u0307;
// I \u0300 (Ì) → i \u0307 \u0300;
// I \u0301 (Í) → i \u0307 \u0301;
// I \u0303 (Ĩ) → i \u0307 \u0303;
// ::Any-Lower();
// ::NFC();
i := 0
if r := c.src[c.pSrc]; r < utf8.RuneSelf {
lower(c)
if r != 'I' && r != 'J' {
return true
}
} else {
p := norm.NFD.Properties(c.src[c.pSrc:])
if d := p.Decomposition(); len(d) >= 3 && (d[0] == 'I' || d[0] == 'J') {
// UTF-8 optimization: the decomposition will only have an above
// modifier if the last rune of the decomposition is in [U+300-U+311].
// In all other cases, a decomposition starting with I is always
// an I followed by modifiers that are not cased themselves. See A.2.
if d[1] == 0xCC && d[2] <= 0x91 { // A.2.4.
if !c.writeBytes(d[:1]) {
return false
}
c.dst[c.pDst-1] += 'a' - 'A' // lower
// Assumption: modifier never changes on lowercase. See A.1.
// Assumption: all modifiers added have CCC = Above. See A.2.3.
return c.writeString("\u0307") && c.writeBytes(d[1:])
}
// In all other cases the additional modifiers will have a CCC
// that is less than 230 (Above). We will insert the U+0307, if
// needed, after these modifiers so that a string in FCD form
// will remain so. See A.2.2.
lower(c)
i = 1
} else {
return lower(c)
}
}
for ; i < maxIgnorable && c.next(); i++ {
switch c.info.cccType() {
case cccZero:
c.unreadRune()
return true
case cccAbove:
return c.writeString("\u0307") && c.copy() // See A.1.
default:
c.copy() // See A.1.
}
}
return i == maxIgnorable
}
// ltLowerSpan would be the same as isLower.
func ltUpper(f mapFunc) mapFunc {
return func(c *context) bool {
// Unicode:
// 0307; 0307; ; ; lt After_Soft_Dotted; # COMBINING DOT ABOVE
//
// From CLDR:
// # Remove \u0307 following soft-dotteds (i, j, and the like), with possible
// # intervening non-230 marks.
// ::NFD();
// [:Soft_Dotted:] [^[:ccc=Not_Reordered:][:ccc=Above:]]* { \u0307 → ;
// ::Any-Upper();
// ::NFC();
// TODO: See A.5. A soft-dotted rune never has an exception. This would
// allow us to overload the exception bit and encode this property in
// info. Need to measure performance impact of this.
r, _ := utf8.DecodeRune(c.src[c.pSrc:])
oldPDst := c.pDst
if !f(c) {
return false
}
if !unicode.Is(unicode.Soft_Dotted, r) {
return true
}
// We don't need to do an NFD normalization, as a soft-dotted rune never
// contains U+0307. See A.3.
i := 0
for ; i < maxIgnorable && c.next(); i++ {
switch c.info.cccType() {
case cccZero:
c.unreadRune()
return true
case cccAbove:
if c.hasPrefix("\u0307") {
// We don't do a full NFC, but rather combine runes for
// some of the common cases. (Returning NFC or
// preserving normal form is neither a requirement nor
// a possibility anyway).
if !c.next() {
return false
}
if c.dst[oldPDst] == 'I' && c.pDst == oldPDst+1 && c.src[c.pSrc] == 0xcc {
s := ""
switch c.src[c.pSrc+1] {
case 0x80: // U+0300 COMBINING GRAVE ACCENT
s = "\u00cc" // U+00CC LATIN CAPITAL LETTER I WITH GRAVE
case 0x81: // U+0301 COMBINING ACUTE ACCENT
s = "\u00cd" // U+00CD LATIN CAPITAL LETTER I WITH ACUTE
case 0x83: // U+0303 COMBINING TILDE
s = "\u0128" // U+0128 LATIN CAPITAL LETTER I WITH TILDE
case 0x88: // U+0308 COMBINING DIAERESIS
s = "\u00cf" // U+00CF LATIN CAPITAL LETTER I WITH DIAERESIS
default:
}
if s != "" {
c.pDst = oldPDst
return c.writeString(s)
}
}
}
return c.copy()
default:
c.copy()
}
}
return i == maxIgnorable
}
}
// TODO: implement ltUpperSpan (low priority: complex and infrequent).
func aztrUpper(f mapFunc) mapFunc {
return func(c *context) bool {
// i→İ;
if c.src[c.pSrc] == 'i' {
return c.writeString("İ")
}
return f(c)
}
}
func aztrLower(c *context) (done bool) {
// From CLDR:
// # I and i-dotless; I-dot and i are case pairs in Turkish and Azeri
// # 0130; 0069; 0130; 0130; tr; # LATIN CAPITAL LETTER I WITH DOT ABOVE
// İ→i;
// # When lowercasing, remove dot_above in the sequence I + dot_above, which will turn into i.
// # This matches the behavior of the canonically equivalent I-dot_above
// # 0307; ; 0307; 0307; tr After_I; # COMBINING DOT ABOVE
// # When lowercasing, unless an I is before a dot_above, it turns into a dotless i.
// # 0049; 0131; 0049; 0049; tr Not_Before_Dot; # LATIN CAPITAL LETTER I
// I([^[:ccc=Not_Reordered:][:ccc=Above:]]*)\u0307 → i$1 ;
// I→ı ;
// ::Any-Lower();
if c.hasPrefix("\u0130") { // İ
return c.writeString("i")
}
if c.src[c.pSrc] != 'I' {
return lower(c)
}
// We ignore the lower-case I for now, but insert it later when we know
// which form we need.
start := c.pSrc + c.sz
i := 0
Loop:
// We check for up to n ignorables before \u0307. As \u0307 is an
// ignorable as well, n is maxIgnorable-1.
for ; i < maxIgnorable && c.next(); i++ {
switch c.info.cccType() {
case cccAbove:
if c.hasPrefix("\u0307") {
return c.writeString("i") && c.writeBytes(c.src[start:c.pSrc]) // ignore U+0307
}
done = true
break Loop
case cccZero:
c.unreadRune()
done = true
break Loop
default:
// We'll write this rune after we know which starter to use.
}
}
if i == maxIgnorable {
done = true
}
return c.writeString("ı") && c.writeBytes(c.src[start:c.pSrc+c.sz]) && done
}
// aztrLowerSpan would be the same as isLower.
func nlTitle(c *context) bool {
// From CLDR:
// # Special titlecasing for Dutch initial "ij".
// ::Any-Title();
// # Fix up Ij at the beginning of a "word" (per Any-Title, notUAX #29)
// [:^WB=ALetter:] [:WB=Extend:]* [[:WB=MidLetter:][:WB=MidNumLet:]]? { Ij } → IJ ;
if c.src[c.pSrc] != 'I' && c.src[c.pSrc] != 'i' {
return title(c)
}
if !c.writeString("I") || !c.next() {
return false
}
if c.src[c.pSrc] == 'j' || c.src[c.pSrc] == 'J' {
return c.writeString("J")
}
c.unreadRune()
return true
}
func nlTitleSpan(c *context) bool {
// From CLDR:
// # Special titlecasing for Dutch initial "ij".
// ::Any-Title();
// # Fix up Ij at the beginning of a "word" (per Any-Title, notUAX #29)
// [:^WB=ALetter:] [:WB=Extend:]* [[:WB=MidLetter:][:WB=MidNumLet:]]? { Ij } → IJ ;
if c.src[c.pSrc] != 'I' {
return isTitle(c)
}
if !c.next() || c.src[c.pSrc] == 'j' {
return false
}
if c.src[c.pSrc] != 'J' {
c.unreadRune()
}
return true
}
// Not part of CLDR, but see http://unicode.org/cldr/trac/ticket/7078.
func afnlRewrite(c *context) {
if c.hasPrefix("'") || c.hasPrefix("") {
c.isMidWord = true
}
}

950
vendor/golang.org/x/text/cases/map_test.go generated vendored Normal file
View file

@ -0,0 +1,950 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cases
import (
"bytes"
"fmt"
"path"
"strings"
"testing"
"unicode/utf8"
"golang.org/x/text/internal/testtext"
"golang.org/x/text/language"
"golang.org/x/text/transform"
"golang.org/x/text/unicode/norm"
)
type testCase struct {
lang string
src interface{} // string, []string, or nil to skip test
title interface{} // string, []string, or nil to skip test
lower interface{} // string, []string, or nil to skip test
upper interface{} // string, []string, or nil to skip test
opts options
}
var testCases = []testCase{
0: {
lang: "und",
src: "abc aBc ABC abC İsıI ΕΣΆΣ",
title: "Abc Abc Abc Abc İsıi Εσάσ",
lower: "abc abc abc abc i\u0307sıi εσάσ",
upper: "ABC ABC ABC ABC İSII ΕΣΆΣ",
opts: getOpts(HandleFinalSigma(false)),
},
1: {
lang: "und",
src: "abc aBc ABC abC İsıI ΕΣΆΣ Σ _Σ -Σ",
title: "Abc Abc Abc Abc İsıi Εσάς Σ _Σ -Σ",
lower: "abc abc abc abc i\u0307sıi εσάς σ _σ -σ",
upper: "ABC ABC ABC ABC İSII ΕΣΆΣ Σ _Σ -Σ",
opts: getOpts(HandleFinalSigma(true)),
},
2: { // Title cased runes.
lang: supported,
src: "DžA",
title: "Dža",
lower: "dža",
upper: "DŽA",
},
3: {
// Title breaking.
lang: supported,
src: []string{
"FOO CASE TEST",
"DON'T DO THiS",
"χωΡΊΣ χωΡΊΣ^a χωΡΊΣ:a χωΡΊΣ:^a χωΡΊΣ^ όμΩΣ Σ",
"with-hyphens",
"49ers 49ers",
`"capitalize a^a -hyphen 0X _u a_u:a`,
"MidNumLet a.b\u2018c\u2019d\u2024e\ufe52f\uff07f\uff0eg",
"MidNum a,b;c\u037ed\u0589e\u060cf\u2044g\ufe50h",
"\u0345 x\u3031x x\u05d0x \u05d0x a'.a a.a a4,a",
},
title: []string{
"Foo Case Test",
"Don't Do This",
"Χωρίς Χωρίσ^A Χωρίσ:a Χωρίσ:^A Χωρίς^ Όμως Σ",
"With-Hyphens",
// Note that 49Ers is correct according to the spec.
// TODO: provide some option to the user to treat different
// characters as cased.
"49Ers 49Ers",
`"Capitalize A^A -Hyphen 0X _U A_u:a`,
"Midnumlet A.b\u2018c\u2019d\u2024e\ufe52f\uff07f\uff0eg",
"Midnum A,B;C\u037eD\u0589E\u060cF\u2044G\ufe50H",
"\u0399 X\u3031X X\u05d0x \u05d0X A'.A A.a A4,A",
},
},
// TODO: These are known deviations from the options{} Unicode Word Breaking
// Algorithm.
// {
// "und",
// "x_\u3031_x a4,4a",
// "X_\u3031_x A4,4a", // Currently is "X_\U3031_X A4,4A".
// "x_\u3031_x a4,4a",
// "X_\u3031_X A4,4A",
// options{},
// },
4: {
// Tests title options
lang: "und",
src: "abc aBc ABC abC İsıI o'Brien",
title: "Abc ABc ABC AbC İsıI O'Brien",
opts: getOpts(NoLower),
},
5: {
lang: "el",
src: "aBc ΟΔΌΣ Οδός Σο ΣΟ Σ oΣ ΟΣ σ ἕξ \u03ac",
title: "Abc Οδός Οδός Σο Σο Σ Oς Ος Σ Ἕξ \u0386",
lower: "abc οδός οδός σο σο σ oς ος σ ἕξ \u03ac",
upper: "ABC ΟΔΟΣ ΟΔΟΣ ΣΟ ΣΟ Σ OΣ ΟΣ Σ ΕΞ \u0391", // Uppercase removes accents
},
6: {
lang: "tr az",
src: "Isiİ İsıI I\u0307sIiİ İsıI\u0307 I\u0300\u0307",
title: "Isii İsıı I\u0307sıii İsıi I\u0300\u0307",
lower: "ısii isıı isıii isıi \u0131\u0300\u0307",
upper: "ISİİ İSII I\u0307SIİİ İSII\u0307 I\u0300\u0307",
},
7: {
lang: "lt",
src: "I Ï J J̈ Į Į̈ Ì Í Ĩ xi̇̈ xj̇̈ xį̇̈ xi̇̀ xi̇́ xi̇̃ XI XÏ XJ XJ̈ XĮ XĮ̈ XI̟̤",
title: "I Ï J J̈ Į Į̈ Ì Í Ĩ Xi̇̈ Xj̇̈ Xį̇̈ Xi̇̀ Xi̇́ Xi̇̃ Xi Xi̇̈ Xj Xj̇̈ Xį Xį̇̈ Xi̟̤",
lower: "i i̇̈ j j̇̈ į į̇̈ i̇̀ i̇́ i̇̃ xi̇̈ xj̇̈ xį̇̈ xi̇̀ xi̇́ xi̇̃ xi xi̇̈ xj xj̇̈ xį xį̇̈ xi̟̤",
upper: "I Ï J J̈ Į Į̈ Ì Í Ĩ XÏ XJ̈ XĮ̈ XÌ XÍ XĨ XI XÏ XJ XJ̈ XĮ XĮ̈ XI̟̤",
},
8: {
lang: "lt",
src: "\u012e\u0300 \u00cc i\u0307\u0300 i\u0307\u0301 i\u0307\u0303 i\u0307\u0308 i\u0300\u0307",
title: "\u012e\u0300 \u00cc \u00cc \u00cd \u0128 \u00cf I\u0300\u0307",
lower: "\u012f\u0307\u0300 i\u0307\u0300 i\u0307\u0300 i\u0307\u0301 i\u0307\u0303 i\u0307\u0308 i\u0300\u0307",
upper: "\u012e\u0300 \u00cc \u00cc \u00cd \u0128 \u00cf I\u0300\u0307",
},
9: {
lang: "nl",
src: "ijs IJs Ij Ijs İJ İJs aa aA 'ns 'S",
title: "IJs IJs IJ IJs İj İjs Aa Aa 'ns 's",
},
// Note: this specification is not currently part of CLDR. The same holds
// for the leading apostrophe handling for Dutch.
// See http://unicode.org/cldr/trac/ticket/7078.
10: {
lang: "af",
src: "wag 'n bietjie",
title: "Wag 'n Bietjie",
lower: "wag 'n bietjie",
upper: "WAG 'N BIETJIE",
},
}
func TestCaseMappings(t *testing.T) {
for i, tt := range testCases {
src, ok := tt.src.([]string)
if !ok {
src = strings.Split(tt.src.(string), " ")
}
for _, lang := range strings.Split(tt.lang, " ") {
tag := language.MustParse(lang)
testEntry := func(name string, mk func(language.Tag, options) transform.SpanningTransformer, gold interface{}) {
c := Caser{mk(tag, tt.opts)}
if gold != nil {
wants, ok := gold.([]string)
if !ok {
wants = strings.Split(gold.(string), " ")
}
for j, want := range wants {
if got := c.String(src[j]); got != want {
t.Errorf("%d:%s:\n%s.String(%+q):\ngot %+q;\nwant %+q", i, lang, name, src[j], got, want)
}
}
}
dst := make([]byte, 256) // big enough to hold any result
src := []byte(strings.Join(src, " "))
v := testtext.AllocsPerRun(20, func() {
c.Transform(dst, src, true)
})
if v > 1.1 {
t.Errorf("%d:%s:\n%s: number of allocs was %f; want 0", i, lang, name, v)
}
}
testEntry("Upper", makeUpper, tt.upper)
testEntry("Lower", makeLower, tt.lower)
testEntry("Title", makeTitle, tt.title)
}
}
}
// TestAlloc tests that some mapping methods should not cause any allocation.
func TestAlloc(t *testing.T) {
dst := make([]byte, 256) // big enough to hold any result
src := []byte(txtNonASCII)
for i, f := range []func() Caser{
func() Caser { return Upper(language.Und) },
func() Caser { return Lower(language.Und) },
func() Caser { return Lower(language.Und, HandleFinalSigma(false)) },
// TODO: use a shared copy for these casers as well, in order of
// importance, starting with the most important:
// func() Caser { return Title(language.Und) },
// func() Caser { return Title(language.Und, HandleFinalSigma(false)) },
} {
testtext.Run(t, "", func(t *testing.T) {
var c Caser
v := testtext.AllocsPerRun(10, func() {
c = f()
})
if v > 0 {
// TODO: Right now only Upper has 1 allocation. Special-case Lower
// and Title as well to have less allocations for the root locale.
t.Errorf("%d:init: number of allocs was %f; want 0", i, v)
}
v = testtext.AllocsPerRun(2, func() {
c.Transform(dst, src, true)
})
if v > 0 {
t.Errorf("%d:transform: number of allocs was %f; want 0", i, v)
}
})
}
}
func testHandover(t *testing.T, c Caser, src string) {
want := c.String(src)
// Find the common prefix.
pSrc := 0
for ; pSrc < len(src) && pSrc < len(want) && want[pSrc] == src[pSrc]; pSrc++ {
}
// Test handover for each substring of the prefix.
for i := 0; i < pSrc; i++ {
testtext.Run(t, fmt.Sprint("interleave/", i), func(t *testing.T) {
dst := make([]byte, 4*len(src))
c.Reset()
nSpan, _ := c.Span([]byte(src[:i]), false)
copy(dst, src[:nSpan])
nTransform, _, _ := c.Transform(dst[nSpan:], []byte(src[nSpan:]), true)
got := string(dst[:nSpan+nTransform])
if got != want {
t.Errorf("full string: got %q; want %q", got, want)
}
})
}
}
func TestHandover(t *testing.T) {
testCases := []struct {
desc string
t Caser
first, second string
}{{
"title/nosigma/single midword",
Title(language.Und, HandleFinalSigma(false)),
"A.", "a",
}, {
"title/nosigma/single midword",
Title(language.Und, HandleFinalSigma(false)),
"A", ".a",
}, {
"title/nosigma/double midword",
Title(language.Und, HandleFinalSigma(false)),
"A..", "a",
}, {
"title/nosigma/double midword",
Title(language.Und, HandleFinalSigma(false)),
"A.", ".a",
}, {
"title/nosigma/double midword",
Title(language.Und, HandleFinalSigma(false)),
"A", "..a",
}, {
"title/sigma/single midword",
Title(language.Und),
"ΟΣ.", "a",
}, {
"title/sigma/single midword",
Title(language.Und),
"ΟΣ", ".a",
}, {
"title/sigma/double midword",
Title(language.Und),
"ΟΣ..", "a",
}, {
"title/sigma/double midword",
Title(language.Und),
"ΟΣ.", ".a",
}, {
"title/sigma/double midword",
Title(language.Und),
"ΟΣ", "..a",
}, {
"title/af/leading apostrophe",
Title(language.Afrikaans),
"'", "n bietje",
}}
for _, tc := range testCases {
testtext.Run(t, tc.desc, func(t *testing.T) {
src := tc.first + tc.second
want := tc.t.String(src)
tc.t.Reset()
n, _ := tc.t.Span([]byte(tc.first), false)
dst := make([]byte, len(want))
copy(dst, tc.first[:n])
nDst, _, _ := tc.t.Transform(dst[n:], []byte(src[n:]), true)
got := string(dst[:n+nDst])
if got != want {
t.Errorf("got %q; want %q", got, want)
}
})
}
}
// minBufSize is the size of the buffer by which the casing operation in
// this package are guaranteed to make progress.
const minBufSize = norm.MaxSegmentSize
type bufferTest struct {
desc, src, want string
firstErr error
dstSize, srcSize int
t transform.SpanningTransformer
}
var bufferTests []bufferTest
func init() {
bufferTests = []bufferTest{{
desc: "und/upper/short dst",
src: "abcdefg",
want: "ABCDEFG",
firstErr: transform.ErrShortDst,
dstSize: 3,
srcSize: minBufSize,
t: Upper(language.Und),
}, {
desc: "und/upper/short src",
src: "123é56",
want: "123É56",
firstErr: transform.ErrShortSrc,
dstSize: 4,
srcSize: 4,
t: Upper(language.Und),
}, {
desc: "und/upper/no error on short",
src: "12",
want: "12",
firstErr: nil,
dstSize: 1,
srcSize: 1,
t: Upper(language.Und),
}, {
desc: "und/lower/short dst",
src: "ABCDEFG",
want: "abcdefg",
firstErr: transform.ErrShortDst,
dstSize: 3,
srcSize: minBufSize,
t: Lower(language.Und),
}, {
desc: "und/lower/short src",
src: "123É56",
want: "123é56",
firstErr: transform.ErrShortSrc,
dstSize: 4,
srcSize: 4,
t: Lower(language.Und),
}, {
desc: "und/lower/no error on short",
src: "12",
want: "12",
firstErr: nil,
dstSize: 1,
srcSize: 1,
t: Lower(language.Und),
}, {
desc: "und/lower/simple (no final sigma)",
src: "ΟΣ ΟΣΣ",
want: "οσ οσσ",
dstSize: minBufSize,
srcSize: minBufSize,
t: Lower(language.Und, HandleFinalSigma(false)),
}, {
desc: "und/title/simple (no final sigma)",
src: "ΟΣ ΟΣΣ",
want: "Οσ Οσσ",
dstSize: minBufSize,
srcSize: minBufSize,
t: Title(language.Und, HandleFinalSigma(false)),
}, {
desc: "und/title/final sigma: no error",
src: "ΟΣ",
want: "Ος",
dstSize: minBufSize,
srcSize: minBufSize,
t: Title(language.Und),
}, {
desc: "und/title/final sigma: short source",
src: "ΟΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣ",
want: "Οσσσσσσσσσσσσσσσσσσσσσσσσσσσσσσσσσς",
firstErr: transform.ErrShortSrc,
dstSize: minBufSize,
srcSize: 10,
t: Title(language.Und),
}, {
desc: "und/title/final sigma: short destination 1",
src: "ΟΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣ",
want: "Οσσσσσσσσσσσσσσσσσσσσσσσσσσσσσσσσσς",
firstErr: transform.ErrShortDst,
dstSize: 10,
srcSize: minBufSize,
t: Title(language.Und),
}, {
desc: "und/title/final sigma: short destination 2",
src: "ΟΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣ",
want: "Οσσσσσσσσσσσσσσσσσσσσσσσσσσσσσσσσσς",
firstErr: transform.ErrShortDst,
dstSize: 9,
srcSize: minBufSize,
t: Title(language.Und),
}, {
desc: "und/title/final sigma: short destination 3",
src: "ΟΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣΣ",
want: "Οσσσσσσσσσσσσσσσσσσσσσσσσσσσσσσσσσς",
firstErr: transform.ErrShortDst,
dstSize: 8,
srcSize: minBufSize,
t: Title(language.Und),
}, {
desc: "und/title/clipped UTF-8 rune",
src: "σσσσσσσσσσσ",
want: "Σσσσσσσσσσσ",
firstErr: transform.ErrShortSrc,
dstSize: minBufSize,
srcSize: 5,
t: Title(language.Und),
}, {
desc: "und/title/clipped UTF-8 rune atEOF",
src: "σσσ" + string([]byte{0xCF}),
want: "Σσσ" + string([]byte{0xCF}),
dstSize: minBufSize,
srcSize: minBufSize,
t: Title(language.Und),
}, {
// Note: the choice to change the final sigma at the end in case of
// too many case ignorables is arbitrary. The main reason for this
// choice is that it results in simpler code.
desc: "und/title/final sigma: max ignorables",
src: "ΟΣ" + strings.Repeat(".", maxIgnorable) + "a",
want: "Οσ" + strings.Repeat(".", maxIgnorable) + "A",
dstSize: minBufSize,
srcSize: minBufSize,
t: Title(language.Und),
}, {
// Note: the choice to change the final sigma at the end in case of
// too many case ignorables is arbitrary. The main reason for this
// choice is that it results in simpler code.
desc: "und/title/long string",
src: "AA" + strings.Repeat(".", maxIgnorable+1) + "a",
want: "Aa" + strings.Repeat(".", maxIgnorable+1) + "A",
dstSize: minBufSize,
srcSize: len("AA" + strings.Repeat(".", maxIgnorable+1)),
t: Title(language.Und),
}, {
// Note: the choice to change the final sigma at the end in case of
// too many case ignorables is arbitrary. The main reason for this
// choice is that it results in simpler code.
desc: "und/title/final sigma: too many ignorables",
src: "ΟΣ" + strings.Repeat(".", maxIgnorable+1) + "a",
want: "Ος" + strings.Repeat(".", maxIgnorable+1) + "A",
dstSize: minBufSize,
srcSize: len("ΟΣ" + strings.Repeat(".", maxIgnorable+1)),
t: Title(language.Und),
}, {
desc: "und/title/final sigma: apostrophe",
src: "ΟΣ''a",
want: "Οσ''A",
dstSize: minBufSize,
srcSize: minBufSize,
t: Title(language.Und),
}, {
desc: "el/upper/max ignorables",
src: "ο" + strings.Repeat("\u0321", maxIgnorable-1) + "\u0313",
want: "Ο" + strings.Repeat("\u0321", maxIgnorable-1),
dstSize: minBufSize,
srcSize: minBufSize,
t: Upper(language.Greek),
}, {
desc: "el/upper/too many ignorables",
src: "ο" + strings.Repeat("\u0321", maxIgnorable) + "\u0313",
want: "Ο" + strings.Repeat("\u0321", maxIgnorable) + "\u0313",
dstSize: minBufSize,
srcSize: len("ο" + strings.Repeat("\u0321", maxIgnorable)),
t: Upper(language.Greek),
}, {
desc: "el/upper/short dst",
src: "123ο",
want: "123Ο",
firstErr: transform.ErrShortDst,
dstSize: 3,
srcSize: minBufSize,
t: Upper(language.Greek),
}, {
desc: "lt/lower/max ignorables",
src: "I" + strings.Repeat("\u0321", maxIgnorable-1) + "\u0300",
want: "i" + strings.Repeat("\u0321", maxIgnorable-1) + "\u0307\u0300",
dstSize: minBufSize,
srcSize: minBufSize,
t: Lower(language.Lithuanian),
}, {
desc: "lt/lower/too many ignorables",
src: "I" + strings.Repeat("\u0321", maxIgnorable) + "\u0300",
want: "i" + strings.Repeat("\u0321", maxIgnorable) + "\u0300",
dstSize: minBufSize,
srcSize: len("I" + strings.Repeat("\u0321", maxIgnorable)),
t: Lower(language.Lithuanian),
}, {
desc: "lt/lower/decomposition with short dst buffer 1",
src: "aaaaa\u00cc", // U+00CC LATIN CAPITAL LETTER I GRAVE
firstErr: transform.ErrShortDst,
want: "aaaaai\u0307\u0300",
dstSize: 5,
srcSize: minBufSize,
t: Lower(language.Lithuanian),
}, {
desc: "lt/lower/decomposition with short dst buffer 2",
src: "aaaa\u00cc", // U+00CC LATIN CAPITAL LETTER I GRAVE
firstErr: transform.ErrShortDst,
want: "aaaai\u0307\u0300",
dstSize: 5,
srcSize: minBufSize,
t: Lower(language.Lithuanian),
}, {
desc: "lt/upper/max ignorables",
src: "i" + strings.Repeat("\u0321", maxIgnorable-1) + "\u0307\u0300",
want: "I" + strings.Repeat("\u0321", maxIgnorable-1) + "\u0300",
dstSize: minBufSize,
srcSize: minBufSize,
t: Upper(language.Lithuanian),
}, {
desc: "lt/upper/too many ignorables",
src: "i" + strings.Repeat("\u0321", maxIgnorable) + "\u0307\u0300",
want: "I" + strings.Repeat("\u0321", maxIgnorable) + "\u0307\u0300",
dstSize: minBufSize,
srcSize: len("i" + strings.Repeat("\u0321", maxIgnorable)),
t: Upper(language.Lithuanian),
}, {
desc: "lt/upper/short dst",
src: "12i\u0307\u0300",
want: "12\u00cc",
firstErr: transform.ErrShortDst,
dstSize: 3,
srcSize: minBufSize,
t: Upper(language.Lithuanian),
}, {
desc: "aztr/lower/max ignorables",
src: "I" + strings.Repeat("\u0321", maxIgnorable-1) + "\u0307\u0300",
want: "i" + strings.Repeat("\u0321", maxIgnorable-1) + "\u0300",
dstSize: minBufSize,
srcSize: minBufSize,
t: Lower(language.Turkish),
}, {
desc: "aztr/lower/too many ignorables",
src: "I" + strings.Repeat("\u0321", maxIgnorable) + "\u0307\u0300",
want: "\u0131" + strings.Repeat("\u0321", maxIgnorable) + "\u0307\u0300",
dstSize: minBufSize,
srcSize: len("I" + strings.Repeat("\u0321", maxIgnorable)),
t: Lower(language.Turkish),
}, {
desc: "nl/title/pre-IJ cutoff",
src: " ij",
want: " IJ",
firstErr: transform.ErrShortDst,
dstSize: 2,
srcSize: minBufSize,
t: Title(language.Dutch),
}, {
desc: "nl/title/mid-IJ cutoff",
src: " ij",
want: " IJ",
firstErr: transform.ErrShortDst,
dstSize: 3,
srcSize: minBufSize,
t: Title(language.Dutch),
}, {
desc: "af/title/apostrophe",
src: "'n bietje",
want: "'n Bietje",
firstErr: transform.ErrShortDst,
dstSize: 3,
srcSize: minBufSize,
t: Title(language.Afrikaans),
}}
}
func TestShortBuffersAndOverflow(t *testing.T) {
for i, tt := range bufferTests {
testtext.Run(t, tt.desc, func(t *testing.T) {
buf := make([]byte, tt.dstSize)
got := []byte{}
var nSrc, nDst int
var err error
for p := 0; p < len(tt.src); p += nSrc {
q := p + tt.srcSize
if q > len(tt.src) {
q = len(tt.src)
}
nDst, nSrc, err = tt.t.Transform(buf, []byte(tt.src[p:q]), q == len(tt.src))
got = append(got, buf[:nDst]...)
if p == 0 && err != tt.firstErr {
t.Errorf("%d:%s:\n error was %v; want %v", i, tt.desc, err, tt.firstErr)
break
}
}
if string(got) != tt.want {
t.Errorf("%d:%s:\ngot %+q;\nwant %+q", i, tt.desc, got, tt.want)
}
testHandover(t, Caser{tt.t}, tt.src)
})
}
}
func TestSpan(t *testing.T) {
for _, tt := range []struct {
desc string
src string
want string
atEOF bool
err error
t Caser
}{{
desc: "und/upper/basic",
src: "abcdefg",
want: "",
atEOF: true,
err: transform.ErrEndOfSpan,
t: Upper(language.Und),
}, {
desc: "und/upper/short src",
src: "123É"[:4],
want: "123",
atEOF: false,
err: transform.ErrShortSrc,
t: Upper(language.Und),
}, {
desc: "und/upper/no error on short",
src: "12",
want: "12",
atEOF: false,
t: Upper(language.Und),
}, {
desc: "und/lower/basic",
src: "ABCDEFG",
want: "",
atEOF: true,
err: transform.ErrEndOfSpan,
t: Lower(language.Und),
}, {
desc: "und/lower/short src num",
src: "123é"[:4],
want: "123",
atEOF: false,
err: transform.ErrShortSrc,
t: Lower(language.Und),
}, {
desc: "und/lower/short src greek",
src: "αβγé"[:7],
want: "αβγ",
atEOF: false,
err: transform.ErrShortSrc,
t: Lower(language.Und),
}, {
desc: "und/lower/no error on short",
src: "12",
want: "12",
atEOF: false,
t: Lower(language.Und),
}, {
desc: "und/lower/simple (no final sigma)",
src: "ος οσσ",
want: "οσ οσσ",
atEOF: true,
t: Lower(language.Und, HandleFinalSigma(false)),
}, {
desc: "und/title/simple (no final sigma)",
src: "Οσ Οσσ",
want: "Οσ Οσσ",
atEOF: true,
t: Title(language.Und, HandleFinalSigma(false)),
}, {
desc: "und/lower/final sigma: no error",
src: "οΣ", // Oς
want: "ο", // Oς
err: transform.ErrEndOfSpan,
t: Lower(language.Und),
}, {
desc: "und/title/final sigma: no error",
src: "ΟΣ", // Oς
want: "Ο", // Oς
err: transform.ErrEndOfSpan,
t: Title(language.Und),
}, {
desc: "und/title/final sigma: no short source!",
src: "ΟσσσσσσσσσσσσσσσσσσσσσσσσσσσσσσσσσΣ",
want: "Οσσσσσσσσσσσσσσσσσσσσσσσσσσσσσσσσσ",
err: transform.ErrEndOfSpan,
t: Title(language.Und),
}, {
desc: "und/title/clipped UTF-8 rune",
src: "Σσ" + string([]byte{0xCF}),
want: "Σσ",
atEOF: false,
err: transform.ErrShortSrc,
t: Title(language.Und),
}, {
desc: "und/title/clipped UTF-8 rune atEOF",
src: "Σσσ" + string([]byte{0xCF}),
want: "Σσσ" + string([]byte{0xCF}),
atEOF: true,
t: Title(language.Und),
}, {
// Note: the choice to change the final sigma at the end in case of
// too many case ignorables is arbitrary. The main reason for this
// choice is that it results in simpler code.
desc: "und/title/long string",
src: "A" + strings.Repeat("a", maxIgnorable+5),
want: "A" + strings.Repeat("a", maxIgnorable+5),
t: Title(language.Und),
}, {
// Note: the choice to change the final sigma at the end in case of
// too many case ignorables is arbitrary. The main reason for this
// choice is that it results in simpler code.
desc: "und/title/cyrillic",
src: "При",
want: "При",
atEOF: true,
t: Title(language.Und, HandleFinalSigma(false)),
}, {
// Note: the choice to change the final sigma at the end in case of
// too many case ignorables is arbitrary. The main reason for this
// choice is that it results in simpler code.
desc: "und/title/final sigma: max ignorables",
src: "Οσ" + strings.Repeat(".", maxIgnorable) + "A",
want: "Οσ" + strings.Repeat(".", maxIgnorable) + "A",
t: Title(language.Und),
}, {
desc: "el/upper/max ignorables - not implemented",
src: "Ο" + strings.Repeat("\u0321", maxIgnorable-1) + "\u0313",
want: "",
err: transform.ErrEndOfSpan,
t: Upper(language.Greek),
}, {
desc: "el/upper/too many ignorables - not implemented",
src: "Ο" + strings.Repeat("\u0321", maxIgnorable) + "\u0313",
want: "",
err: transform.ErrEndOfSpan,
t: Upper(language.Greek),
}, {
desc: "el/upper/short dst",
src: "123ο",
want: "",
err: transform.ErrEndOfSpan,
t: Upper(language.Greek),
}, {
desc: "lt/lower/max ignorables",
src: "i" + strings.Repeat("\u0321", maxIgnorable-1) + "\u0307\u0300",
want: "i" + strings.Repeat("\u0321", maxIgnorable-1) + "\u0307\u0300",
t: Lower(language.Lithuanian),
}, {
desc: "lt/lower/isLower",
src: "I" + strings.Repeat("\u0321", maxIgnorable) + "\u0300",
want: "",
err: transform.ErrEndOfSpan,
t: Lower(language.Lithuanian),
}, {
desc: "lt/lower/not identical",
src: "aaaaa\u00cc", // U+00CC LATIN CAPITAL LETTER I GRAVE
err: transform.ErrEndOfSpan,
want: "aaaaa",
t: Lower(language.Lithuanian),
}, {
desc: "lt/lower/identical",
src: "aaaai\u0307\u0300", // U+00CC LATIN CAPITAL LETTER I GRAVE
want: "aaaai\u0307\u0300",
t: Lower(language.Lithuanian),
}, {
desc: "lt/upper/not implemented",
src: "I" + strings.Repeat("\u0321", maxIgnorable-1) + "\u0300",
want: "",
err: transform.ErrEndOfSpan,
t: Upper(language.Lithuanian),
}, {
desc: "lt/upper/not implemented, ascii",
src: "AB",
want: "",
err: transform.ErrEndOfSpan,
t: Upper(language.Lithuanian),
}, {
desc: "nl/title/pre-IJ cutoff",
src: " IJ",
want: " IJ",
t: Title(language.Dutch),
}, {
desc: "nl/title/mid-IJ cutoff",
src: " Ia",
want: " Ia",
t: Title(language.Dutch),
}, {
desc: "af/title/apostrophe",
src: "'n Bietje",
want: "'n Bietje",
t: Title(language.Afrikaans),
}, {
desc: "af/title/apostrophe-incorrect",
src: "'N Bietje",
// The Single_Quote (a MidWord), needs to be retained as unspanned so
// that a successive call to Transform can detect that N should not be
// capitalized.
want: "",
err: transform.ErrEndOfSpan,
t: Title(language.Afrikaans),
}} {
testtext.Run(t, tt.desc, func(t *testing.T) {
for p := 0; p < len(tt.want); p += utf8.RuneLen([]rune(tt.src[p:])[0]) {
tt.t.Reset()
n, err := tt.t.Span([]byte(tt.src[:p]), false)
if err != nil && err != transform.ErrShortSrc {
t.Errorf("early failure:Span(%+q): %v (%d < %d)", tt.src[:p], err, n, len(tt.want))
break
}
}
tt.t.Reset()
n, err := tt.t.Span([]byte(tt.src), tt.atEOF)
if n != len(tt.want) || err != tt.err {
t.Errorf("Span(%+q, %v): got %d, %v; want %d, %v", tt.src, tt.atEOF, n, err, len(tt.want), tt.err)
}
testHandover(t, tt.t, tt.src)
})
}
}
var txtASCII = strings.Repeat("The quick brown fox jumps over the lazy dog. ", 50)
// Taken from http://creativecommons.org/licenses/by-sa/3.0/vn/
const txt_vn = `Với các điều kiện sau: Ghi nhận công của tác giả. Nếu bạn sử
dụng, chuyển đổi, hoặc xây dựng dự án từ nội dung được chia sẻ này, bạn phải áp
dụng giấy phép này hoặc một giấy phép khác các điều khoản tương tự như giấy
phép này cho dự án của bạn. Hiểu rằng: Miễn Bất kỳ các điều kiện nào trên đây
cũng thể được miễn bỏ nếu bạn được sự cho phép của người sở hữu bản quyền.
Phạm vi công chúng Khi tác phẩm hoặc bất kỳ chương nào của tác phẩm đã trong
vùng dành cho công chúng theo quy định của pháp luật thì tình trạng của không
bị ảnh hưởng bởi giấy phép trong bất kỳ trường hợp nào.`
// http://creativecommons.org/licenses/by-sa/2.5/cn/
const txt_cn = `您可以自由 复制发行展览表演放映
广播或通过信息网络传播本作品 创作演绎作品
对本作品进行商业性使用 惟须遵守下列条件
署名 您必须按照作者或者许可人指定的方式对作品进行署名
相同方式共享 如果您改变转换本作品或者以本作品为基础进行创作
您只能采用与本协议相同的许可协议发布基于本作品的演绎作品`
// Taken from http://creativecommons.org/licenses/by-sa/1.0/deed.ru
const txt_ru = `При обязательном соблюдении следующих условий: Attribution Вы
должны атрибутировать произведение (указывать автора и источник) в порядке,
предусмотренном автором или лицензиаром (но только так, чтобы никоим образом не
подразумевалось, что они поддерживают вас или использование вами данного
произведения). Υπό τις ακόλουθες προϋποθέσεις:`
// Taken from http://creativecommons.org/licenses/by-sa/3.0/gr/
const txt_gr = `Αναφορά Δημιουργού Θα πρέπει να κάνετε την αναφορά στο έργο με
τον τρόπο που έχει οριστεί από το δημιουργό ή το χορηγούντο την άδεια (χωρίς
όμως να εννοείται με οποιονδήποτε τρόπο ότι εγκρίνουν εσάς ή τη χρήση του έργου
από εσάς). Παρόμοια Διανομή Εάν αλλοιώσετε, τροποποιήσετε ή δημιουργήσετε
περαιτέρω βασισμένοι στο έργο θα μπορείτε να διανέμετε το έργο που θα προκύψει
μόνο με την ίδια ή παρόμοια άδεια.`
const txtNonASCII = txt_vn + txt_cn + txt_ru + txt_gr
// TODO: Improve ASCII performance.
func BenchmarkCasers(b *testing.B) {
for _, s := range []struct{ name, text string }{
{"ascii", txtASCII},
{"nonASCII", txtNonASCII},
{"short", "При"},
} {
src := []byte(s.text)
// Measure case mappings in bytes package for comparison.
for _, f := range []struct {
name string
fn func(b []byte) []byte
}{
{"lower", bytes.ToLower},
{"title", bytes.ToTitle},
{"upper", bytes.ToUpper},
} {
testtext.Bench(b, path.Join(s.name, "bytes", f.name), func(b *testing.B) {
b.SetBytes(int64(len(src)))
for i := 0; i < b.N; i++ {
f.fn(src)
}
})
}
for _, t := range []struct {
name string
caser transform.SpanningTransformer
}{
{"fold/default", Fold()},
{"upper/default", Upper(language.Und)},
{"lower/sigma", Lower(language.Und)},
{"lower/simple", Lower(language.Und, HandleFinalSigma(false))},
{"title/sigma", Title(language.Und)},
{"title/simple", Title(language.Und, HandleFinalSigma(false))},
} {
c := Caser{t.caser}
dst := make([]byte, len(src))
testtext.Bench(b, path.Join(s.name, t.name, "transform"), func(b *testing.B) {
b.SetBytes(int64(len(src)))
for i := 0; i < b.N; i++ {
c.Reset()
c.Transform(dst, src, true)
}
})
// No need to check span for simple cases, as they will be the same
// as sigma.
if strings.HasSuffix(t.name, "/simple") {
continue
}
spanSrc := c.Bytes(src)
testtext.Bench(b, path.Join(s.name, t.name, "span"), func(b *testing.B) {
c.Reset()
if n, _ := c.Span(spanSrc, true); n < len(spanSrc) {
b.Fatalf("spanner is not recognizing text %q as done (at %d)", spanSrc, n)
}
b.SetBytes(int64(len(spanSrc)))
for i := 0; i < b.N; i++ {
c.Reset()
c.Span(spanSrc, true)
}
})
}
}
}

2253
vendor/golang.org/x/text/cases/tables10.0.0.go generated vendored Normal file

File diff suppressed because it is too large Load diff

1160
vendor/golang.org/x/text/cases/tables10.0.0_test.go generated vendored Normal file

File diff suppressed because it is too large Load diff

2213
vendor/golang.org/x/text/cases/tables9.0.0.go generated vendored Normal file

File diff suppressed because it is too large Load diff

1156
vendor/golang.org/x/text/cases/tables9.0.0_test.go generated vendored Normal file

File diff suppressed because it is too large Load diff

215
vendor/golang.org/x/text/cases/trieval.go generated vendored Normal file
View file

@ -0,0 +1,215 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
package cases
// This file contains definitions for interpreting the trie value of the case
// trie generated by "go run gen*.go". It is shared by both the generator
// program and the resultant package. Sharing is achieved by the generator
// copying gen_trieval.go to trieval.go and changing what's above this comment.
// info holds case information for a single rune. It is the value returned
// by a trie lookup. Most mapping information can be stored in a single 16-bit
// value. If not, for example when a rune is mapped to multiple runes, the value
// stores some basic case data and an index into an array with additional data.
//
// The per-rune values have the following format:
//
// if (exception) {
// 15..5 unsigned exception index
// 4 unused
// } else {
// 15..8 XOR pattern or index to XOR pattern for case mapping
// Only 13..8 are used for XOR patterns.
// 7 inverseFold (fold to upper, not to lower)
// 6 index: interpret the XOR pattern as an index
// or isMid if case mode is cIgnorableUncased.
// 5..4 CCC: zero (normal or break), above or other
// }
// 3 exception: interpret this value as an exception index
// (TODO: is this bit necessary? Probably implied from case mode.)
// 2..0 case mode
//
// For the non-exceptional cases, a rune must be either uncased, lowercase or
// uppercase. If the rune is cased, the XOR pattern maps either a lowercase
// rune to uppercase or an uppercase rune to lowercase (applied to the 10
// least-significant bits of the rune).
//
// See the definitions below for a more detailed description of the various
// bits.
type info uint16
const (
casedMask = 0x0003
fullCasedMask = 0x0007
ignorableMask = 0x0006
ignorableValue = 0x0004
inverseFoldBit = 1 << 7
isMidBit = 1 << 6
exceptionBit = 1 << 3
exceptionShift = 5
numExceptionBits = 11
xorIndexBit = 1 << 6
xorShift = 8
// There is no mapping if all xor bits and the exception bit are zero.
hasMappingMask = 0xff80 | exceptionBit
)
// The case mode bits encodes the case type of a rune. This includes uncased,
// title, upper and lower case and case ignorable. (For a definition of these
// terms see Chapter 3 of The Unicode Standard Core Specification.) In some rare
// cases, a rune can be both cased and case-ignorable. This is encoded by
// cIgnorableCased. A rune of this type is always lower case. Some runes are
// cased while not having a mapping.
//
// A common pattern for scripts in the Unicode standard is for upper and lower
// case runes to alternate for increasing rune values (e.g. the accented Latin
// ranges starting from U+0100 and U+1E00 among others and some Cyrillic
// characters). We use this property by defining a cXORCase mode, where the case
// mode (always upper or lower case) is derived from the rune value. As the XOR
// pattern for case mappings is often identical for successive runes, using
// cXORCase can result in large series of identical trie values. This, in turn,
// allows us to better compress the trie blocks.
const (
cUncased info = iota // 000
cTitle // 001
cLower // 010
cUpper // 011
cIgnorableUncased // 100
cIgnorableCased // 101 // lower case if mappings exist
cXORCase // 11x // case is cLower | ((rune&1) ^ x)
maxCaseMode = cUpper
)
func (c info) isCased() bool {
return c&casedMask != 0
}
func (c info) isCaseIgnorable() bool {
return c&ignorableMask == ignorableValue
}
func (c info) isNotCasedAndNotCaseIgnorable() bool {
return c&fullCasedMask == 0
}
func (c info) isCaseIgnorableAndNotCased() bool {
return c&fullCasedMask == cIgnorableUncased
}
func (c info) isMid() bool {
return c&(fullCasedMask|isMidBit) == isMidBit|cIgnorableUncased
}
// The case mapping implementation will need to know about various Canonical
// Combining Class (CCC) values. We encode two of these in the trie value:
// cccZero (0) and cccAbove (230). If the value is cccOther, it means that
// CCC(r) > 0, but not 230. A value of cccBreak means that CCC(r) == 0 and that
// the rune also has the break category Break (see below).
const (
cccBreak info = iota << 4
cccZero
cccAbove
cccOther
cccMask = cccBreak | cccZero | cccAbove | cccOther
)
const (
starter = 0
above = 230
iotaSubscript = 240
)
// The exceptions slice holds data that does not fit in a normal info entry.
// The entry is pointed to by the exception index in an entry. It has the
// following format:
//
// Header
// byte 0:
// 7..6 unused
// 5..4 CCC type (same bits as entry)
// 3 unused
// 2..0 length of fold
//
// byte 1:
// 7..6 unused
// 5..3 length of 1st mapping of case type
// 2..0 length of 2nd mapping of case type
//
// case 1st 2nd
// lower -> upper, title
// upper -> lower, title
// title -> lower, upper
//
// Lengths with the value 0x7 indicate no value and implies no change.
// A length of 0 indicates a mapping to zero-length string.
//
// Body bytes:
// case folding bytes
// lowercase mapping bytes
// uppercase mapping bytes
// titlecase mapping bytes
// closure mapping bytes (for NFKC_Casefold). (TODO)
//
// Fallbacks:
// missing fold -> lower
// missing title -> upper
// all missing -> original rune
//
// exceptions starts with a dummy byte to enforce that there is no zero index
// value.
const (
lengthMask = 0x07
lengthBits = 3
noChange = 0
)
// References to generated trie.
var trie = newCaseTrie(0)
var sparse = sparseBlocks{
values: sparseValues[:],
offsets: sparseOffsets[:],
}
// Sparse block lookup code.
// valueRange is an entry in a sparse block.
type valueRange struct {
value uint16
lo, hi byte
}
type sparseBlocks struct {
values []valueRange
offsets []uint16
}
// lookup returns the value from values block n for byte b using binary search.
func (s *sparseBlocks) lookup(n uint32, b byte) uint16 {
lo := s.offsets[n]
hi := s.offsets[n+1]
for lo < hi {
m := lo + (hi-lo)/2
r := s.values[m]
if r.lo <= b && b <= r.hi {
return r.value
}
if b < r.lo {
hi = m
} else {
lo = m + 1
}
}
return 0
}
// lastRuneForTesting is the last rune used for testing. Everything after this
// is boring.
const lastRuneForTesting = rune(0x1FFFF)