Initial commit: Go 1.23 release state
This commit is contained in:
140
src/text/scanner/example_test.go
Normal file
140
src/text/scanner/example_test.go
Normal file
@@ -0,0 +1,140 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package scanner_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"text/scanner"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
func Example() {
|
||||
const src = `
|
||||
// This is scanned code.
|
||||
if a > 10 {
|
||||
someParsable = text
|
||||
}`
|
||||
|
||||
var s scanner.Scanner
|
||||
s.Init(strings.NewReader(src))
|
||||
s.Filename = "example"
|
||||
for tok := s.Scan(); tok != scanner.EOF; tok = s.Scan() {
|
||||
fmt.Printf("%s: %s\n", s.Position, s.TokenText())
|
||||
}
|
||||
|
||||
// Output:
|
||||
// example:3:1: if
|
||||
// example:3:4: a
|
||||
// example:3:6: >
|
||||
// example:3:8: 10
|
||||
// example:3:11: {
|
||||
// example:4:2: someParsable
|
||||
// example:4:15: =
|
||||
// example:4:17: text
|
||||
// example:5:1: }
|
||||
}
|
||||
|
||||
func Example_isIdentRune() {
|
||||
const src = "%var1 var2%"
|
||||
|
||||
var s scanner.Scanner
|
||||
s.Init(strings.NewReader(src))
|
||||
s.Filename = "default"
|
||||
|
||||
for tok := s.Scan(); tok != scanner.EOF; tok = s.Scan() {
|
||||
fmt.Printf("%s: %s\n", s.Position, s.TokenText())
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
s.Init(strings.NewReader(src))
|
||||
s.Filename = "percent"
|
||||
|
||||
// treat leading '%' as part of an identifier
|
||||
s.IsIdentRune = func(ch rune, i int) bool {
|
||||
return ch == '%' && i == 0 || unicode.IsLetter(ch) || unicode.IsDigit(ch) && i > 0
|
||||
}
|
||||
|
||||
for tok := s.Scan(); tok != scanner.EOF; tok = s.Scan() {
|
||||
fmt.Printf("%s: %s\n", s.Position, s.TokenText())
|
||||
}
|
||||
|
||||
// Output:
|
||||
// default:1:1: %
|
||||
// default:1:2: var1
|
||||
// default:1:7: var2
|
||||
// default:1:11: %
|
||||
//
|
||||
// percent:1:1: %var1
|
||||
// percent:1:7: var2
|
||||
// percent:1:11: %
|
||||
}
|
||||
|
||||
func Example_mode() {
|
||||
const src = `
|
||||
// Comment begins at column 5.
|
||||
|
||||
This line should not be included in the output.
|
||||
|
||||
/*
|
||||
This multiline comment
|
||||
should be extracted in
|
||||
its entirety.
|
||||
*/
|
||||
`
|
||||
|
||||
var s scanner.Scanner
|
||||
s.Init(strings.NewReader(src))
|
||||
s.Filename = "comments"
|
||||
s.Mode ^= scanner.SkipComments // don't skip comments
|
||||
|
||||
for tok := s.Scan(); tok != scanner.EOF; tok = s.Scan() {
|
||||
txt := s.TokenText()
|
||||
if strings.HasPrefix(txt, "//") || strings.HasPrefix(txt, "/*") {
|
||||
fmt.Printf("%s: %s\n", s.Position, txt)
|
||||
}
|
||||
}
|
||||
|
||||
// Output:
|
||||
// comments:2:5: // Comment begins at column 5.
|
||||
// comments:6:1: /*
|
||||
// This multiline comment
|
||||
// should be extracted in
|
||||
// its entirety.
|
||||
// */
|
||||
}
|
||||
|
||||
func Example_whitespace() {
|
||||
// tab-separated values
|
||||
const src = `aa ab ac ad
|
||||
ba bb bc bd
|
||||
ca cb cc cd
|
||||
da db dc dd`
|
||||
|
||||
var (
|
||||
col, row int
|
||||
s scanner.Scanner
|
||||
tsv [4][4]string // large enough for example above
|
||||
)
|
||||
s.Init(strings.NewReader(src))
|
||||
s.Whitespace ^= 1<<'\t' | 1<<'\n' // don't skip tabs and new lines
|
||||
|
||||
for tok := s.Scan(); tok != scanner.EOF; tok = s.Scan() {
|
||||
switch tok {
|
||||
case '\n':
|
||||
row++
|
||||
col = 0
|
||||
case '\t':
|
||||
col++
|
||||
default:
|
||||
tsv[row][col] = s.TokenText()
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Print(tsv)
|
||||
|
||||
// Output:
|
||||
// [[aa ab ac ad] [ba bb bc bd] [ca cb cc cd] [da db dc dd]]
|
||||
}
|
||||
792
src/text/scanner/scanner.go
Normal file
792
src/text/scanner/scanner.go
Normal file
@@ -0,0 +1,792 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package scanner provides a scanner and tokenizer for UTF-8-encoded text.
|
||||
// It takes an io.Reader providing the source, which then can be tokenized
|
||||
// through repeated calls to the Scan function. For compatibility with
|
||||
// existing tools, the NUL character is not allowed. If the first character
|
||||
// in the source is a UTF-8 encoded byte order mark (BOM), it is discarded.
|
||||
//
|
||||
// By default, a [Scanner] skips white space and Go comments and recognizes all
|
||||
// literals as defined by the Go language specification. It may be
|
||||
// customized to recognize only a subset of those literals and to recognize
|
||||
// different identifier and white space characters.
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Position is a value that represents a source position.
|
||||
// A position is valid if Line > 0.
|
||||
type Position struct {
|
||||
Filename string // filename, if any
|
||||
Offset int // byte offset, starting at 0
|
||||
Line int // line number, starting at 1
|
||||
Column int // column number, starting at 1 (character count per line)
|
||||
}
|
||||
|
||||
// IsValid reports whether the position is valid.
|
||||
func (pos *Position) IsValid() bool { return pos.Line > 0 }
|
||||
|
||||
func (pos Position) String() string {
|
||||
s := pos.Filename
|
||||
if s == "" {
|
||||
s = "<input>"
|
||||
}
|
||||
if pos.IsValid() {
|
||||
s += fmt.Sprintf(":%d:%d", pos.Line, pos.Column)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Predefined mode bits to control recognition of tokens. For instance,
|
||||
// to configure a [Scanner] such that it only recognizes (Go) identifiers,
|
||||
// integers, and skips comments, set the Scanner's Mode field to:
|
||||
//
|
||||
// ScanIdents | ScanInts | SkipComments
|
||||
//
|
||||
// With the exceptions of comments, which are skipped if SkipComments is
|
||||
// set, unrecognized tokens are not ignored. Instead, the scanner simply
|
||||
// returns the respective individual characters (or possibly sub-tokens).
|
||||
// For instance, if the mode is ScanIdents (not ScanStrings), the string
|
||||
// "foo" is scanned as the token sequence '"' [Ident] '"'.
|
||||
//
|
||||
// Use GoTokens to configure the Scanner such that it accepts all Go
|
||||
// literal tokens including Go identifiers. Comments will be skipped.
|
||||
const (
|
||||
ScanIdents = 1 << -Ident
|
||||
ScanInts = 1 << -Int
|
||||
ScanFloats = 1 << -Float // includes Ints and hexadecimal floats
|
||||
ScanChars = 1 << -Char
|
||||
ScanStrings = 1 << -String
|
||||
ScanRawStrings = 1 << -RawString
|
||||
ScanComments = 1 << -Comment
|
||||
SkipComments = 1 << -skipComment // if set with ScanComments, comments become white space
|
||||
GoTokens = ScanIdents | ScanFloats | ScanChars | ScanStrings | ScanRawStrings | ScanComments | SkipComments
|
||||
)
|
||||
|
||||
// The result of Scan is one of these tokens or a Unicode character.
|
||||
const (
|
||||
EOF = -(iota + 1)
|
||||
Ident
|
||||
Int
|
||||
Float
|
||||
Char
|
||||
String
|
||||
RawString
|
||||
Comment
|
||||
|
||||
// internal use only
|
||||
skipComment
|
||||
)
|
||||
|
||||
var tokenString = map[rune]string{
|
||||
EOF: "EOF",
|
||||
Ident: "Ident",
|
||||
Int: "Int",
|
||||
Float: "Float",
|
||||
Char: "Char",
|
||||
String: "String",
|
||||
RawString: "RawString",
|
||||
Comment: "Comment",
|
||||
}
|
||||
|
||||
// TokenString returns a printable string for a token or Unicode character.
|
||||
func TokenString(tok rune) string {
|
||||
if s, found := tokenString[tok]; found {
|
||||
return s
|
||||
}
|
||||
return fmt.Sprintf("%q", string(tok))
|
||||
}
|
||||
|
||||
// GoWhitespace is the default value for the [Scanner]'s Whitespace field.
|
||||
// Its value selects Go's white space characters.
|
||||
const GoWhitespace = 1<<'\t' | 1<<'\n' | 1<<'\r' | 1<<' '
|
||||
|
||||
const bufLen = 1024 // at least utf8.UTFMax
|
||||
|
||||
// A Scanner implements reading of Unicode characters and tokens from an [io.Reader].
|
||||
type Scanner struct {
|
||||
// Input
|
||||
src io.Reader
|
||||
|
||||
// Source buffer
|
||||
srcBuf [bufLen + 1]byte // +1 for sentinel for common case of s.next()
|
||||
srcPos int // reading position (srcBuf index)
|
||||
srcEnd int // source end (srcBuf index)
|
||||
|
||||
// Source position
|
||||
srcBufOffset int // byte offset of srcBuf[0] in source
|
||||
line int // line count
|
||||
column int // character count
|
||||
lastLineLen int // length of last line in characters (for correct column reporting)
|
||||
lastCharLen int // length of last character in bytes
|
||||
|
||||
// Token text buffer
|
||||
// Typically, token text is stored completely in srcBuf, but in general
|
||||
// the token text's head may be buffered in tokBuf while the token text's
|
||||
// tail is stored in srcBuf.
|
||||
tokBuf bytes.Buffer // token text head that is not in srcBuf anymore
|
||||
tokPos int // token text tail position (srcBuf index); valid if >= 0
|
||||
tokEnd int // token text tail end (srcBuf index)
|
||||
|
||||
// One character look-ahead
|
||||
ch rune // character before current srcPos
|
||||
|
||||
// Error is called for each error encountered. If no Error
|
||||
// function is set, the error is reported to os.Stderr.
|
||||
Error func(s *Scanner, msg string)
|
||||
|
||||
// ErrorCount is incremented by one for each error encountered.
|
||||
ErrorCount int
|
||||
|
||||
// The Mode field controls which tokens are recognized. For instance,
|
||||
// to recognize Ints, set the ScanInts bit in Mode. The field may be
|
||||
// changed at any time.
|
||||
Mode uint
|
||||
|
||||
// The Whitespace field controls which characters are recognized
|
||||
// as white space. To recognize a character ch <= ' ' as white space,
|
||||
// set the ch'th bit in Whitespace (the Scanner's behavior is undefined
|
||||
// for values ch > ' '). The field may be changed at any time.
|
||||
Whitespace uint64
|
||||
|
||||
// IsIdentRune is a predicate controlling the characters accepted
|
||||
// as the ith rune in an identifier. The set of valid characters
|
||||
// must not intersect with the set of white space characters.
|
||||
// If no IsIdentRune function is set, regular Go identifiers are
|
||||
// accepted instead. The field may be changed at any time.
|
||||
IsIdentRune func(ch rune, i int) bool
|
||||
|
||||
// Start position of most recently scanned token; set by Scan.
|
||||
// Calling Init or Next invalidates the position (Line == 0).
|
||||
// The Filename field is always left untouched by the Scanner.
|
||||
// If an error is reported (via Error) and Position is invalid,
|
||||
// the scanner is not inside a token. Call Pos to obtain an error
|
||||
// position in that case, or to obtain the position immediately
|
||||
// after the most recently scanned token.
|
||||
Position
|
||||
}
|
||||
|
||||
// Init initializes a [Scanner] with a new source and returns s.
|
||||
// [Scanner.Error] is set to nil, [Scanner.ErrorCount] is set to 0, [Scanner.Mode] is set to [GoTokens],
|
||||
// and [Scanner.Whitespace] is set to [GoWhitespace].
|
||||
func (s *Scanner) Init(src io.Reader) *Scanner {
|
||||
s.src = src
|
||||
|
||||
// initialize source buffer
|
||||
// (the first call to next() will fill it by calling src.Read)
|
||||
s.srcBuf[0] = utf8.RuneSelf // sentinel
|
||||
s.srcPos = 0
|
||||
s.srcEnd = 0
|
||||
|
||||
// initialize source position
|
||||
s.srcBufOffset = 0
|
||||
s.line = 1
|
||||
s.column = 0
|
||||
s.lastLineLen = 0
|
||||
s.lastCharLen = 0
|
||||
|
||||
// initialize token text buffer
|
||||
// (required for first call to next()).
|
||||
s.tokPos = -1
|
||||
|
||||
// initialize one character look-ahead
|
||||
s.ch = -2 // no char read yet, not EOF
|
||||
|
||||
// initialize public fields
|
||||
s.Error = nil
|
||||
s.ErrorCount = 0
|
||||
s.Mode = GoTokens
|
||||
s.Whitespace = GoWhitespace
|
||||
s.Line = 0 // invalidate token position
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// next reads and returns the next Unicode character. It is designed such
|
||||
// that only a minimal amount of work needs to be done in the common ASCII
|
||||
// case (one test to check for both ASCII and end-of-buffer, and one test
|
||||
// to check for newlines).
|
||||
func (s *Scanner) next() rune {
|
||||
ch, width := rune(s.srcBuf[s.srcPos]), 1
|
||||
|
||||
if ch >= utf8.RuneSelf {
|
||||
// uncommon case: not ASCII or not enough bytes
|
||||
for s.srcPos+utf8.UTFMax > s.srcEnd && !utf8.FullRune(s.srcBuf[s.srcPos:s.srcEnd]) {
|
||||
// not enough bytes: read some more, but first
|
||||
// save away token text if any
|
||||
if s.tokPos >= 0 {
|
||||
s.tokBuf.Write(s.srcBuf[s.tokPos:s.srcPos])
|
||||
s.tokPos = 0
|
||||
// s.tokEnd is set by Scan()
|
||||
}
|
||||
// move unread bytes to beginning of buffer
|
||||
copy(s.srcBuf[0:], s.srcBuf[s.srcPos:s.srcEnd])
|
||||
s.srcBufOffset += s.srcPos
|
||||
// read more bytes
|
||||
// (an io.Reader must return io.EOF when it reaches
|
||||
// the end of what it is reading - simply returning
|
||||
// n == 0 will make this loop retry forever; but the
|
||||
// error is in the reader implementation in that case)
|
||||
i := s.srcEnd - s.srcPos
|
||||
n, err := s.src.Read(s.srcBuf[i:bufLen])
|
||||
s.srcPos = 0
|
||||
s.srcEnd = i + n
|
||||
s.srcBuf[s.srcEnd] = utf8.RuneSelf // sentinel
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
s.error(err.Error())
|
||||
}
|
||||
if s.srcEnd == 0 {
|
||||
if s.lastCharLen > 0 {
|
||||
// previous character was not EOF
|
||||
s.column++
|
||||
}
|
||||
s.lastCharLen = 0
|
||||
return EOF
|
||||
}
|
||||
// If err == EOF, we won't be getting more
|
||||
// bytes; break to avoid infinite loop. If
|
||||
// err is something else, we don't know if
|
||||
// we can get more bytes; thus also break.
|
||||
break
|
||||
}
|
||||
}
|
||||
// at least one byte
|
||||
ch = rune(s.srcBuf[s.srcPos])
|
||||
if ch >= utf8.RuneSelf {
|
||||
// uncommon case: not ASCII
|
||||
ch, width = utf8.DecodeRune(s.srcBuf[s.srcPos:s.srcEnd])
|
||||
if ch == utf8.RuneError && width == 1 {
|
||||
// advance for correct error position
|
||||
s.srcPos += width
|
||||
s.lastCharLen = width
|
||||
s.column++
|
||||
s.error("invalid UTF-8 encoding")
|
||||
return ch
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// advance
|
||||
s.srcPos += width
|
||||
s.lastCharLen = width
|
||||
s.column++
|
||||
|
||||
// special situations
|
||||
switch ch {
|
||||
case 0:
|
||||
// for compatibility with other tools
|
||||
s.error("invalid character NUL")
|
||||
case '\n':
|
||||
s.line++
|
||||
s.lastLineLen = s.column
|
||||
s.column = 0
|
||||
}
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
// Next reads and returns the next Unicode character.
|
||||
// It returns [EOF] at the end of the source. It reports
|
||||
// a read error by calling s.Error, if not nil; otherwise
|
||||
// it prints an error message to [os.Stderr]. Next does not
|
||||
// update the [Scanner.Position] field; use [Scanner.Pos]() to
|
||||
// get the current position.
|
||||
func (s *Scanner) Next() rune {
|
||||
s.tokPos = -1 // don't collect token text
|
||||
s.Line = 0 // invalidate token position
|
||||
ch := s.Peek()
|
||||
if ch != EOF {
|
||||
s.ch = s.next()
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// Peek returns the next Unicode character in the source without advancing
|
||||
// the scanner. It returns [EOF] if the scanner's position is at the last
|
||||
// character of the source.
|
||||
func (s *Scanner) Peek() rune {
|
||||
if s.ch == -2 {
|
||||
// this code is only run for the very first character
|
||||
s.ch = s.next()
|
||||
if s.ch == '\uFEFF' {
|
||||
s.ch = s.next() // ignore BOM
|
||||
}
|
||||
}
|
||||
return s.ch
|
||||
}
|
||||
|
||||
func (s *Scanner) error(msg string) {
|
||||
s.tokEnd = s.srcPos - s.lastCharLen // make sure token text is terminated
|
||||
s.ErrorCount++
|
||||
if s.Error != nil {
|
||||
s.Error(s, msg)
|
||||
return
|
||||
}
|
||||
pos := s.Position
|
||||
if !pos.IsValid() {
|
||||
pos = s.Pos()
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
|
||||
}
|
||||
|
||||
func (s *Scanner) errorf(format string, args ...any) {
|
||||
s.error(fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (s *Scanner) isIdentRune(ch rune, i int) bool {
|
||||
if s.IsIdentRune != nil {
|
||||
return ch != EOF && s.IsIdentRune(ch, i)
|
||||
}
|
||||
return ch == '_' || unicode.IsLetter(ch) || unicode.IsDigit(ch) && i > 0
|
||||
}
|
||||
|
||||
func (s *Scanner) scanIdentifier() rune {
|
||||
// we know the zero'th rune is OK; start scanning at the next one
|
||||
ch := s.next()
|
||||
for i := 1; s.isIdentRune(ch, i); i++ {
|
||||
ch = s.next()
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
func lower(ch rune) rune { return ('a' - 'A') | ch } // returns lower-case ch iff ch is ASCII letter
|
||||
func isDecimal(ch rune) bool { return '0' <= ch && ch <= '9' }
|
||||
func isHex(ch rune) bool { return '0' <= ch && ch <= '9' || 'a' <= lower(ch) && lower(ch) <= 'f' }
|
||||
|
||||
// digits accepts the sequence { digit | '_' } starting with ch0.
|
||||
// If base <= 10, digits accepts any decimal digit but records
|
||||
// the first invalid digit >= base in *invalid if *invalid == 0.
|
||||
// digits returns the first rune that is not part of the sequence
|
||||
// anymore, and a bitset describing whether the sequence contained
|
||||
// digits (bit 0 is set), or separators '_' (bit 1 is set).
|
||||
func (s *Scanner) digits(ch0 rune, base int, invalid *rune) (ch rune, digsep int) {
|
||||
ch = ch0
|
||||
if base <= 10 {
|
||||
max := rune('0' + base)
|
||||
for isDecimal(ch) || ch == '_' {
|
||||
ds := 1
|
||||
if ch == '_' {
|
||||
ds = 2
|
||||
} else if ch >= max && *invalid == 0 {
|
||||
*invalid = ch
|
||||
}
|
||||
digsep |= ds
|
||||
ch = s.next()
|
||||
}
|
||||
} else {
|
||||
for isHex(ch) || ch == '_' {
|
||||
ds := 1
|
||||
if ch == '_' {
|
||||
ds = 2
|
||||
}
|
||||
digsep |= ds
|
||||
ch = s.next()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Scanner) scanNumber(ch rune, seenDot bool) (rune, rune) {
|
||||
base := 10 // number base
|
||||
prefix := rune(0) // one of 0 (decimal), '0' (0-octal), 'x', 'o', or 'b'
|
||||
digsep := 0 // bit 0: digit present, bit 1: '_' present
|
||||
invalid := rune(0) // invalid digit in literal, or 0
|
||||
|
||||
// integer part
|
||||
var tok rune
|
||||
var ds int
|
||||
if !seenDot {
|
||||
tok = Int
|
||||
if ch == '0' {
|
||||
ch = s.next()
|
||||
switch lower(ch) {
|
||||
case 'x':
|
||||
ch = s.next()
|
||||
base, prefix = 16, 'x'
|
||||
case 'o':
|
||||
ch = s.next()
|
||||
base, prefix = 8, 'o'
|
||||
case 'b':
|
||||
ch = s.next()
|
||||
base, prefix = 2, 'b'
|
||||
default:
|
||||
base, prefix = 8, '0'
|
||||
digsep = 1 // leading 0
|
||||
}
|
||||
}
|
||||
ch, ds = s.digits(ch, base, &invalid)
|
||||
digsep |= ds
|
||||
if ch == '.' && s.Mode&ScanFloats != 0 {
|
||||
ch = s.next()
|
||||
seenDot = true
|
||||
}
|
||||
}
|
||||
|
||||
// fractional part
|
||||
if seenDot {
|
||||
tok = Float
|
||||
if prefix == 'o' || prefix == 'b' {
|
||||
s.error("invalid radix point in " + litname(prefix))
|
||||
}
|
||||
ch, ds = s.digits(ch, base, &invalid)
|
||||
digsep |= ds
|
||||
}
|
||||
|
||||
if digsep&1 == 0 {
|
||||
s.error(litname(prefix) + " has no digits")
|
||||
}
|
||||
|
||||
// exponent
|
||||
if e := lower(ch); (e == 'e' || e == 'p') && s.Mode&ScanFloats != 0 {
|
||||
switch {
|
||||
case e == 'e' && prefix != 0 && prefix != '0':
|
||||
s.errorf("%q exponent requires decimal mantissa", ch)
|
||||
case e == 'p' && prefix != 'x':
|
||||
s.errorf("%q exponent requires hexadecimal mantissa", ch)
|
||||
}
|
||||
ch = s.next()
|
||||
tok = Float
|
||||
if ch == '+' || ch == '-' {
|
||||
ch = s.next()
|
||||
}
|
||||
ch, ds = s.digits(ch, 10, nil)
|
||||
digsep |= ds
|
||||
if ds&1 == 0 {
|
||||
s.error("exponent has no digits")
|
||||
}
|
||||
} else if prefix == 'x' && tok == Float {
|
||||
s.error("hexadecimal mantissa requires a 'p' exponent")
|
||||
}
|
||||
|
||||
if tok == Int && invalid != 0 {
|
||||
s.errorf("invalid digit %q in %s", invalid, litname(prefix))
|
||||
}
|
||||
|
||||
if digsep&2 != 0 {
|
||||
s.tokEnd = s.srcPos - s.lastCharLen // make sure token text is terminated
|
||||
if i := invalidSep(s.TokenText()); i >= 0 {
|
||||
s.error("'_' must separate successive digits")
|
||||
}
|
||||
}
|
||||
|
||||
return tok, ch
|
||||
}
|
||||
|
||||
func litname(prefix rune) string {
|
||||
switch prefix {
|
||||
default:
|
||||
return "decimal literal"
|
||||
case 'x':
|
||||
return "hexadecimal literal"
|
||||
case 'o', '0':
|
||||
return "octal literal"
|
||||
case 'b':
|
||||
return "binary literal"
|
||||
}
|
||||
}
|
||||
|
||||
// invalidSep returns the index of the first invalid separator in x, or -1.
|
||||
func invalidSep(x string) int {
|
||||
x1 := ' ' // prefix char, we only care if it's 'x'
|
||||
d := '.' // digit, one of '_', '0' (a digit), or '.' (anything else)
|
||||
i := 0
|
||||
|
||||
// a prefix counts as a digit
|
||||
if len(x) >= 2 && x[0] == '0' {
|
||||
x1 = lower(rune(x[1]))
|
||||
if x1 == 'x' || x1 == 'o' || x1 == 'b' {
|
||||
d = '0'
|
||||
i = 2
|
||||
}
|
||||
}
|
||||
|
||||
// mantissa and exponent
|
||||
for ; i < len(x); i++ {
|
||||
p := d // previous digit
|
||||
d = rune(x[i])
|
||||
switch {
|
||||
case d == '_':
|
||||
if p != '0' {
|
||||
return i
|
||||
}
|
||||
case isDecimal(d) || x1 == 'x' && isHex(d):
|
||||
d = '0'
|
||||
default:
|
||||
if p == '_' {
|
||||
return i - 1
|
||||
}
|
||||
d = '.'
|
||||
}
|
||||
}
|
||||
if d == '_' {
|
||||
return len(x) - 1
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
||||
|
||||
func digitVal(ch rune) int {
|
||||
switch {
|
||||
case '0' <= ch && ch <= '9':
|
||||
return int(ch - '0')
|
||||
case 'a' <= lower(ch) && lower(ch) <= 'f':
|
||||
return int(lower(ch) - 'a' + 10)
|
||||
}
|
||||
return 16 // larger than any legal digit val
|
||||
}
|
||||
|
||||
func (s *Scanner) scanDigits(ch rune, base, n int) rune {
|
||||
for n > 0 && digitVal(ch) < base {
|
||||
ch = s.next()
|
||||
n--
|
||||
}
|
||||
if n > 0 {
|
||||
s.error("invalid char escape")
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
func (s *Scanner) scanEscape(quote rune) rune {
|
||||
ch := s.next() // read character after '/'
|
||||
switch ch {
|
||||
case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote:
|
||||
// nothing to do
|
||||
ch = s.next()
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||
ch = s.scanDigits(ch, 8, 3)
|
||||
case 'x':
|
||||
ch = s.scanDigits(s.next(), 16, 2)
|
||||
case 'u':
|
||||
ch = s.scanDigits(s.next(), 16, 4)
|
||||
case 'U':
|
||||
ch = s.scanDigits(s.next(), 16, 8)
|
||||
default:
|
||||
s.error("invalid char escape")
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
func (s *Scanner) scanString(quote rune) (n int) {
|
||||
ch := s.next() // read character after quote
|
||||
for ch != quote {
|
||||
if ch == '\n' || ch < 0 {
|
||||
s.error("literal not terminated")
|
||||
return
|
||||
}
|
||||
if ch == '\\' {
|
||||
ch = s.scanEscape(quote)
|
||||
} else {
|
||||
ch = s.next()
|
||||
}
|
||||
n++
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Scanner) scanRawString() {
|
||||
ch := s.next() // read character after '`'
|
||||
for ch != '`' {
|
||||
if ch < 0 {
|
||||
s.error("literal not terminated")
|
||||
return
|
||||
}
|
||||
ch = s.next()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Scanner) scanChar() {
|
||||
if s.scanString('\'') != 1 {
|
||||
s.error("invalid char literal")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Scanner) scanComment(ch rune) rune {
|
||||
// ch == '/' || ch == '*'
|
||||
if ch == '/' {
|
||||
// line comment
|
||||
ch = s.next() // read character after "//"
|
||||
for ch != '\n' && ch >= 0 {
|
||||
ch = s.next()
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// general comment
|
||||
ch = s.next() // read character after "/*"
|
||||
for {
|
||||
if ch < 0 {
|
||||
s.error("comment not terminated")
|
||||
break
|
||||
}
|
||||
ch0 := ch
|
||||
ch = s.next()
|
||||
if ch0 == '*' && ch == '/' {
|
||||
ch = s.next()
|
||||
break
|
||||
}
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// Scan reads the next token or Unicode character from source and returns it.
|
||||
// It only recognizes tokens t for which the respective [Scanner.Mode] bit (1<<-t) is set.
|
||||
// It returns [EOF] at the end of the source. It reports scanner errors (read and
|
||||
// token errors) by calling s.Error, if not nil; otherwise it prints an error
|
||||
// message to [os.Stderr].
|
||||
func (s *Scanner) Scan() rune {
|
||||
ch := s.Peek()
|
||||
|
||||
// reset token text position
|
||||
s.tokPos = -1
|
||||
s.Line = 0
|
||||
|
||||
redo:
|
||||
// skip white space
|
||||
for s.Whitespace&(1<<uint(ch)) != 0 {
|
||||
ch = s.next()
|
||||
}
|
||||
|
||||
// start collecting token text
|
||||
s.tokBuf.Reset()
|
||||
s.tokPos = s.srcPos - s.lastCharLen
|
||||
|
||||
// set token position
|
||||
// (this is a slightly optimized version of the code in Pos())
|
||||
s.Offset = s.srcBufOffset + s.tokPos
|
||||
if s.column > 0 {
|
||||
// common case: last character was not a '\n'
|
||||
s.Line = s.line
|
||||
s.Column = s.column
|
||||
} else {
|
||||
// last character was a '\n'
|
||||
// (we cannot be at the beginning of the source
|
||||
// since we have called next() at least once)
|
||||
s.Line = s.line - 1
|
||||
s.Column = s.lastLineLen
|
||||
}
|
||||
|
||||
// determine token value
|
||||
tok := ch
|
||||
switch {
|
||||
case s.isIdentRune(ch, 0):
|
||||
if s.Mode&ScanIdents != 0 {
|
||||
tok = Ident
|
||||
ch = s.scanIdentifier()
|
||||
} else {
|
||||
ch = s.next()
|
||||
}
|
||||
case isDecimal(ch):
|
||||
if s.Mode&(ScanInts|ScanFloats) != 0 {
|
||||
tok, ch = s.scanNumber(ch, false)
|
||||
} else {
|
||||
ch = s.next()
|
||||
}
|
||||
default:
|
||||
switch ch {
|
||||
case EOF:
|
||||
break
|
||||
case '"':
|
||||
if s.Mode&ScanStrings != 0 {
|
||||
s.scanString('"')
|
||||
tok = String
|
||||
}
|
||||
ch = s.next()
|
||||
case '\'':
|
||||
if s.Mode&ScanChars != 0 {
|
||||
s.scanChar()
|
||||
tok = Char
|
||||
}
|
||||
ch = s.next()
|
||||
case '.':
|
||||
ch = s.next()
|
||||
if isDecimal(ch) && s.Mode&ScanFloats != 0 {
|
||||
tok, ch = s.scanNumber(ch, true)
|
||||
}
|
||||
case '/':
|
||||
ch = s.next()
|
||||
if (ch == '/' || ch == '*') && s.Mode&ScanComments != 0 {
|
||||
if s.Mode&SkipComments != 0 {
|
||||
s.tokPos = -1 // don't collect token text
|
||||
ch = s.scanComment(ch)
|
||||
goto redo
|
||||
}
|
||||
ch = s.scanComment(ch)
|
||||
tok = Comment
|
||||
}
|
||||
case '`':
|
||||
if s.Mode&ScanRawStrings != 0 {
|
||||
s.scanRawString()
|
||||
tok = RawString
|
||||
}
|
||||
ch = s.next()
|
||||
default:
|
||||
ch = s.next()
|
||||
}
|
||||
}
|
||||
|
||||
// end of token text
|
||||
s.tokEnd = s.srcPos - s.lastCharLen
|
||||
|
||||
s.ch = ch
|
||||
return tok
|
||||
}
|
||||
|
||||
// Pos returns the position of the character immediately after
|
||||
// the character or token returned by the last call to [Scanner.Next] or [Scanner.Scan].
|
||||
// Use the [Scanner.Position] field for the start position of the most
|
||||
// recently scanned token.
|
||||
func (s *Scanner) Pos() (pos Position) {
|
||||
pos.Filename = s.Filename
|
||||
pos.Offset = s.srcBufOffset + s.srcPos - s.lastCharLen
|
||||
switch {
|
||||
case s.column > 0:
|
||||
// common case: last character was not a '\n'
|
||||
pos.Line = s.line
|
||||
pos.Column = s.column
|
||||
case s.lastLineLen > 0:
|
||||
// last character was a '\n'
|
||||
pos.Line = s.line - 1
|
||||
pos.Column = s.lastLineLen
|
||||
default:
|
||||
// at the beginning of the source
|
||||
pos.Line = 1
|
||||
pos.Column = 1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// TokenText returns the string corresponding to the most recently scanned token.
|
||||
// Valid after calling [Scanner.Scan] and in calls of [Scanner.Error].
|
||||
func (s *Scanner) TokenText() string {
|
||||
if s.tokPos < 0 {
|
||||
// no token text
|
||||
return ""
|
||||
}
|
||||
|
||||
if s.tokEnd < s.tokPos {
|
||||
// if EOF was reached, s.tokEnd is set to -1 (s.srcPos == 0)
|
||||
s.tokEnd = s.tokPos
|
||||
}
|
||||
// s.tokEnd >= s.tokPos
|
||||
|
||||
if s.tokBuf.Len() == 0 {
|
||||
// common case: the entire token text is still in srcBuf
|
||||
return string(s.srcBuf[s.tokPos:s.tokEnd])
|
||||
}
|
||||
|
||||
// part of the token text was saved in tokBuf: save the rest in
|
||||
// tokBuf as well and return its content
|
||||
s.tokBuf.Write(s.srcBuf[s.tokPos:s.tokEnd])
|
||||
s.tokPos = s.tokEnd // ensure idempotency of TokenText() call
|
||||
return s.tokBuf.String()
|
||||
}
|
||||
934
src/text/scanner/scanner_test.go
Normal file
934
src/text/scanner/scanner_test.go
Normal file
@@ -0,0 +1,934 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"testing"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// A StringReader delivers its data one string segment at a time via Read.
|
||||
type StringReader struct {
|
||||
data []string
|
||||
step int
|
||||
}
|
||||
|
||||
func (r *StringReader) Read(p []byte) (n int, err error) {
|
||||
if r.step < len(r.data) {
|
||||
s := r.data[r.step]
|
||||
n = copy(p, s)
|
||||
r.step++
|
||||
} else {
|
||||
err = io.EOF
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func readRuneSegments(t *testing.T, segments []string) {
|
||||
got := ""
|
||||
want := strings.Join(segments, "")
|
||||
s := new(Scanner).Init(&StringReader{data: segments})
|
||||
for {
|
||||
ch := s.Next()
|
||||
if ch == EOF {
|
||||
break
|
||||
}
|
||||
got += string(ch)
|
||||
}
|
||||
if got != want {
|
||||
t.Errorf("segments=%v got=%s want=%s", segments, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
var segmentList = [][]string{
|
||||
{},
|
||||
{""},
|
||||
{"日", "本語"},
|
||||
{"\u65e5", "\u672c", "\u8a9e"},
|
||||
{"\U000065e5", " ", "\U0000672c", "\U00008a9e"},
|
||||
{"\xe6", "\x97\xa5\xe6", "\x9c\xac\xe8\xaa\x9e"},
|
||||
{"Hello", ", ", "World", "!"},
|
||||
{"Hello", ", ", "", "World", "!"},
|
||||
}
|
||||
|
||||
func TestNext(t *testing.T) {
|
||||
for _, s := range segmentList {
|
||||
readRuneSegments(t, s)
|
||||
}
|
||||
}
|
||||
|
||||
type token struct {
|
||||
tok rune
|
||||
text string
|
||||
}
|
||||
|
||||
var f100 = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
|
||||
|
||||
var tokenList = []token{
|
||||
{Comment, "// line comments"},
|
||||
{Comment, "//"},
|
||||
{Comment, "////"},
|
||||
{Comment, "// comment"},
|
||||
{Comment, "// /* comment */"},
|
||||
{Comment, "// // comment //"},
|
||||
{Comment, "//" + f100},
|
||||
|
||||
{Comment, "// general comments"},
|
||||
{Comment, "/**/"},
|
||||
{Comment, "/***/"},
|
||||
{Comment, "/* comment */"},
|
||||
{Comment, "/* // comment */"},
|
||||
{Comment, "/* /* comment */"},
|
||||
{Comment, "/*\n comment\n*/"},
|
||||
{Comment, "/*" + f100 + "*/"},
|
||||
|
||||
{Comment, "// identifiers"},
|
||||
{Ident, "a"},
|
||||
{Ident, "a0"},
|
||||
{Ident, "foobar"},
|
||||
{Ident, "abc123"},
|
||||
{Ident, "LGTM"},
|
||||
{Ident, "_"},
|
||||
{Ident, "_abc123"},
|
||||
{Ident, "abc123_"},
|
||||
{Ident, "_abc_123_"},
|
||||
{Ident, "_äöü"},
|
||||
{Ident, "_本"},
|
||||
{Ident, "äöü"},
|
||||
{Ident, "本"},
|
||||
{Ident, "a۰۱۸"},
|
||||
{Ident, "foo६४"},
|
||||
{Ident, "bar9876"},
|
||||
{Ident, f100},
|
||||
|
||||
{Comment, "// decimal ints"},
|
||||
{Int, "0"},
|
||||
{Int, "1"},
|
||||
{Int, "9"},
|
||||
{Int, "42"},
|
||||
{Int, "1234567890"},
|
||||
|
||||
{Comment, "// octal ints"},
|
||||
{Int, "00"},
|
||||
{Int, "01"},
|
||||
{Int, "07"},
|
||||
{Int, "042"},
|
||||
{Int, "01234567"},
|
||||
|
||||
{Comment, "// hexadecimal ints"},
|
||||
{Int, "0x0"},
|
||||
{Int, "0x1"},
|
||||
{Int, "0xf"},
|
||||
{Int, "0x42"},
|
||||
{Int, "0x123456789abcDEF"},
|
||||
{Int, "0x" + f100},
|
||||
{Int, "0X0"},
|
||||
{Int, "0X1"},
|
||||
{Int, "0XF"},
|
||||
{Int, "0X42"},
|
||||
{Int, "0X123456789abcDEF"},
|
||||
{Int, "0X" + f100},
|
||||
|
||||
{Comment, "// floats"},
|
||||
{Float, "0."},
|
||||
{Float, "1."},
|
||||
{Float, "42."},
|
||||
{Float, "01234567890."},
|
||||
{Float, ".0"},
|
||||
{Float, ".1"},
|
||||
{Float, ".42"},
|
||||
{Float, ".0123456789"},
|
||||
{Float, "0.0"},
|
||||
{Float, "1.0"},
|
||||
{Float, "42.0"},
|
||||
{Float, "01234567890.0"},
|
||||
{Float, "0e0"},
|
||||
{Float, "1e0"},
|
||||
{Float, "42e0"},
|
||||
{Float, "01234567890e0"},
|
||||
{Float, "0E0"},
|
||||
{Float, "1E0"},
|
||||
{Float, "42E0"},
|
||||
{Float, "01234567890E0"},
|
||||
{Float, "0e+10"},
|
||||
{Float, "1e-10"},
|
||||
{Float, "42e+10"},
|
||||
{Float, "01234567890e-10"},
|
||||
{Float, "0E+10"},
|
||||
{Float, "1E-10"},
|
||||
{Float, "42E+10"},
|
||||
{Float, "01234567890E-10"},
|
||||
|
||||
{Comment, "// chars"},
|
||||
{Char, `' '`},
|
||||
{Char, `'a'`},
|
||||
{Char, `'本'`},
|
||||
{Char, `'\a'`},
|
||||
{Char, `'\b'`},
|
||||
{Char, `'\f'`},
|
||||
{Char, `'\n'`},
|
||||
{Char, `'\r'`},
|
||||
{Char, `'\t'`},
|
||||
{Char, `'\v'`},
|
||||
{Char, `'\''`},
|
||||
{Char, `'\000'`},
|
||||
{Char, `'\777'`},
|
||||
{Char, `'\x00'`},
|
||||
{Char, `'\xff'`},
|
||||
{Char, `'\u0000'`},
|
||||
{Char, `'\ufA16'`},
|
||||
{Char, `'\U00000000'`},
|
||||
{Char, `'\U0000ffAB'`},
|
||||
|
||||
{Comment, "// strings"},
|
||||
{String, `" "`},
|
||||
{String, `"a"`},
|
||||
{String, `"本"`},
|
||||
{String, `"\a"`},
|
||||
{String, `"\b"`},
|
||||
{String, `"\f"`},
|
||||
{String, `"\n"`},
|
||||
{String, `"\r"`},
|
||||
{String, `"\t"`},
|
||||
{String, `"\v"`},
|
||||
{String, `"\""`},
|
||||
{String, `"\000"`},
|
||||
{String, `"\777"`},
|
||||
{String, `"\x00"`},
|
||||
{String, `"\xff"`},
|
||||
{String, `"\u0000"`},
|
||||
{String, `"\ufA16"`},
|
||||
{String, `"\U00000000"`},
|
||||
{String, `"\U0000ffAB"`},
|
||||
{String, `"` + f100 + `"`},
|
||||
|
||||
{Comment, "// raw strings"},
|
||||
{RawString, "``"},
|
||||
{RawString, "`\\`"},
|
||||
{RawString, "`" + "\n\n/* foobar */\n\n" + "`"},
|
||||
{RawString, "`" + f100 + "`"},
|
||||
|
||||
{Comment, "// individual characters"},
|
||||
// NUL character is not allowed
|
||||
{'\x01', "\x01"},
|
||||
{' ' - 1, string(' ' - 1)},
|
||||
{'+', "+"},
|
||||
{'/', "/"},
|
||||
{'.', "."},
|
||||
{'~', "~"},
|
||||
{'(', "("},
|
||||
}
|
||||
|
||||
func makeSource(pattern string) *bytes.Buffer {
|
||||
var buf bytes.Buffer
|
||||
for _, k := range tokenList {
|
||||
fmt.Fprintf(&buf, pattern, k.text)
|
||||
}
|
||||
return &buf
|
||||
}
|
||||
|
||||
func checkTok(t *testing.T, s *Scanner, line int, got, want rune, text string) {
|
||||
if got != want {
|
||||
t.Fatalf("tok = %s, want %s for %q", TokenString(got), TokenString(want), text)
|
||||
}
|
||||
if s.Line != line {
|
||||
t.Errorf("line = %d, want %d for %q", s.Line, line, text)
|
||||
}
|
||||
stext := s.TokenText()
|
||||
if stext != text {
|
||||
t.Errorf("text = %q, want %q", stext, text)
|
||||
} else {
|
||||
// check idempotency of TokenText() call
|
||||
stext = s.TokenText()
|
||||
if stext != text {
|
||||
t.Errorf("text = %q, want %q (idempotency check)", stext, text)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkTokErr(t *testing.T, s *Scanner, line int, want rune, text string) {
|
||||
prevCount := s.ErrorCount
|
||||
checkTok(t, s, line, s.Scan(), want, text)
|
||||
if s.ErrorCount != prevCount+1 {
|
||||
t.Fatalf("want error for %q", text)
|
||||
}
|
||||
}
|
||||
|
||||
func countNewlines(s string) int {
|
||||
n := 0
|
||||
for _, ch := range s {
|
||||
if ch == '\n' {
|
||||
n++
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func testScan(t *testing.T, mode uint) {
|
||||
s := new(Scanner).Init(makeSource(" \t%s\n"))
|
||||
s.Mode = mode
|
||||
tok := s.Scan()
|
||||
line := 1
|
||||
for _, k := range tokenList {
|
||||
if mode&SkipComments == 0 || k.tok != Comment {
|
||||
checkTok(t, s, line, tok, k.tok, k.text)
|
||||
tok = s.Scan()
|
||||
}
|
||||
line += countNewlines(k.text) + 1 // each token is on a new line
|
||||
}
|
||||
checkTok(t, s, line, tok, EOF, "")
|
||||
}
|
||||
|
||||
func TestScan(t *testing.T) {
|
||||
testScan(t, GoTokens)
|
||||
testScan(t, GoTokens&^SkipComments)
|
||||
}
|
||||
|
||||
func TestInvalidExponent(t *testing.T) {
|
||||
const src = "1.5e 1.5E 1e+ 1e- 1.5z"
|
||||
s := new(Scanner).Init(strings.NewReader(src))
|
||||
s.Error = func(s *Scanner, msg string) {
|
||||
const want = "exponent has no digits"
|
||||
if msg != want {
|
||||
t.Errorf("%s: got error %q; want %q", s.TokenText(), msg, want)
|
||||
}
|
||||
}
|
||||
checkTokErr(t, s, 1, Float, "1.5e")
|
||||
checkTokErr(t, s, 1, Float, "1.5E")
|
||||
checkTokErr(t, s, 1, Float, "1e+")
|
||||
checkTokErr(t, s, 1, Float, "1e-")
|
||||
checkTok(t, s, 1, s.Scan(), Float, "1.5")
|
||||
checkTok(t, s, 1, s.Scan(), Ident, "z")
|
||||
checkTok(t, s, 1, s.Scan(), EOF, "")
|
||||
if s.ErrorCount != 4 {
|
||||
t.Errorf("%d errors, want 4", s.ErrorCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPosition(t *testing.T) {
|
||||
src := makeSource("\t\t\t\t%s\n")
|
||||
s := new(Scanner).Init(src)
|
||||
s.Mode = GoTokens &^ SkipComments
|
||||
s.Scan()
|
||||
pos := Position{"", 4, 1, 5}
|
||||
for _, k := range tokenList {
|
||||
if s.Offset != pos.Offset {
|
||||
t.Errorf("offset = %d, want %d for %q", s.Offset, pos.Offset, k.text)
|
||||
}
|
||||
if s.Line != pos.Line {
|
||||
t.Errorf("line = %d, want %d for %q", s.Line, pos.Line, k.text)
|
||||
}
|
||||
if s.Column != pos.Column {
|
||||
t.Errorf("column = %d, want %d for %q", s.Column, pos.Column, k.text)
|
||||
}
|
||||
pos.Offset += 4 + len(k.text) + 1 // 4 tabs + token bytes + newline
|
||||
pos.Line += countNewlines(k.text) + 1 // each token is on a new line
|
||||
s.Scan()
|
||||
}
|
||||
// make sure there were no token-internal errors reported by scanner
|
||||
if s.ErrorCount != 0 {
|
||||
t.Errorf("%d errors", s.ErrorCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestScanZeroMode(t *testing.T) {
|
||||
src := makeSource("%s\n")
|
||||
str := src.String()
|
||||
s := new(Scanner).Init(src)
|
||||
s.Mode = 0 // don't recognize any token classes
|
||||
s.Whitespace = 0 // don't skip any whitespace
|
||||
tok := s.Scan()
|
||||
for i, ch := range str {
|
||||
if tok != ch {
|
||||
t.Fatalf("%d. tok = %s, want %s", i, TokenString(tok), TokenString(ch))
|
||||
}
|
||||
tok = s.Scan()
|
||||
}
|
||||
if tok != EOF {
|
||||
t.Fatalf("tok = %s, want EOF", TokenString(tok))
|
||||
}
|
||||
if s.ErrorCount != 0 {
|
||||
t.Errorf("%d errors", s.ErrorCount)
|
||||
}
|
||||
}
|
||||
|
||||
func testScanSelectedMode(t *testing.T, mode uint, class rune) {
|
||||
src := makeSource("%s\n")
|
||||
s := new(Scanner).Init(src)
|
||||
s.Mode = mode
|
||||
tok := s.Scan()
|
||||
for tok != EOF {
|
||||
if tok < 0 && tok != class {
|
||||
t.Fatalf("tok = %s, want %s", TokenString(tok), TokenString(class))
|
||||
}
|
||||
tok = s.Scan()
|
||||
}
|
||||
if s.ErrorCount != 0 {
|
||||
t.Errorf("%d errors", s.ErrorCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestScanSelectedMask(t *testing.T) {
|
||||
testScanSelectedMode(t, 0, 0)
|
||||
testScanSelectedMode(t, ScanIdents, Ident)
|
||||
// Don't test ScanInts and ScanNumbers since some parts of
|
||||
// the floats in the source look like (invalid) octal ints
|
||||
// and ScanNumbers may return either Int or Float.
|
||||
testScanSelectedMode(t, ScanChars, Char)
|
||||
testScanSelectedMode(t, ScanStrings, String)
|
||||
testScanSelectedMode(t, SkipComments, 0)
|
||||
testScanSelectedMode(t, ScanComments, Comment)
|
||||
}
|
||||
|
||||
func TestScanCustomIdent(t *testing.T) {
|
||||
const src = "faab12345 a12b123 a12 3b"
|
||||
s := new(Scanner).Init(strings.NewReader(src))
|
||||
// ident = ( 'a' | 'b' ) { digit } .
|
||||
// digit = '0' .. '3' .
|
||||
// with a maximum length of 4
|
||||
s.IsIdentRune = func(ch rune, i int) bool {
|
||||
return i == 0 && (ch == 'a' || ch == 'b') || 0 < i && i < 4 && '0' <= ch && ch <= '3'
|
||||
}
|
||||
checkTok(t, s, 1, s.Scan(), 'f', "f")
|
||||
checkTok(t, s, 1, s.Scan(), Ident, "a")
|
||||
checkTok(t, s, 1, s.Scan(), Ident, "a")
|
||||
checkTok(t, s, 1, s.Scan(), Ident, "b123")
|
||||
checkTok(t, s, 1, s.Scan(), Int, "45")
|
||||
checkTok(t, s, 1, s.Scan(), Ident, "a12")
|
||||
checkTok(t, s, 1, s.Scan(), Ident, "b123")
|
||||
checkTok(t, s, 1, s.Scan(), Ident, "a12")
|
||||
checkTok(t, s, 1, s.Scan(), Int, "3")
|
||||
checkTok(t, s, 1, s.Scan(), Ident, "b")
|
||||
checkTok(t, s, 1, s.Scan(), EOF, "")
|
||||
}
|
||||
|
||||
func TestScanNext(t *testing.T) {
|
||||
const BOM = '\uFEFF'
|
||||
BOMs := string(BOM)
|
||||
s := new(Scanner).Init(strings.NewReader(BOMs + "if a == bcd /* com" + BOMs + "ment */ {\n\ta += c\n}" + BOMs + "// line comment ending in eof"))
|
||||
checkTok(t, s, 1, s.Scan(), Ident, "if") // the first BOM is ignored
|
||||
checkTok(t, s, 1, s.Scan(), Ident, "a")
|
||||
checkTok(t, s, 1, s.Scan(), '=', "=")
|
||||
checkTok(t, s, 0, s.Next(), '=', "")
|
||||
checkTok(t, s, 0, s.Next(), ' ', "")
|
||||
checkTok(t, s, 0, s.Next(), 'b', "")
|
||||
checkTok(t, s, 1, s.Scan(), Ident, "cd")
|
||||
checkTok(t, s, 1, s.Scan(), '{', "{")
|
||||
checkTok(t, s, 2, s.Scan(), Ident, "a")
|
||||
checkTok(t, s, 2, s.Scan(), '+', "+")
|
||||
checkTok(t, s, 0, s.Next(), '=', "")
|
||||
checkTok(t, s, 2, s.Scan(), Ident, "c")
|
||||
checkTok(t, s, 3, s.Scan(), '}', "}")
|
||||
checkTok(t, s, 3, s.Scan(), BOM, BOMs)
|
||||
checkTok(t, s, 3, s.Scan(), -1, "")
|
||||
if s.ErrorCount != 0 {
|
||||
t.Errorf("%d errors", s.ErrorCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestScanWhitespace(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
var ws uint64
|
||||
// start at 1, NUL character is not allowed
|
||||
for ch := byte(1); ch < ' '; ch++ {
|
||||
buf.WriteByte(ch)
|
||||
ws |= 1 << ch
|
||||
}
|
||||
const orig = 'x'
|
||||
buf.WriteByte(orig)
|
||||
|
||||
s := new(Scanner).Init(&buf)
|
||||
s.Mode = 0
|
||||
s.Whitespace = ws
|
||||
tok := s.Scan()
|
||||
if tok != orig {
|
||||
t.Errorf("tok = %s, want %s", TokenString(tok), TokenString(orig))
|
||||
}
|
||||
}
|
||||
|
||||
func testError(t *testing.T, src, pos, msg string, tok rune) {
|
||||
s := new(Scanner).Init(strings.NewReader(src))
|
||||
errorCalled := false
|
||||
s.Error = func(s *Scanner, m string) {
|
||||
if !errorCalled {
|
||||
// only look at first error
|
||||
if p := s.Pos().String(); p != pos {
|
||||
t.Errorf("pos = %q, want %q for %q", p, pos, src)
|
||||
}
|
||||
if m != msg {
|
||||
t.Errorf("msg = %q, want %q for %q", m, msg, src)
|
||||
}
|
||||
errorCalled = true
|
||||
}
|
||||
}
|
||||
tk := s.Scan()
|
||||
if tk != tok {
|
||||
t.Errorf("tok = %s, want %s for %q", TokenString(tk), TokenString(tok), src)
|
||||
}
|
||||
if !errorCalled {
|
||||
t.Errorf("error handler not called for %q", src)
|
||||
}
|
||||
if s.ErrorCount == 0 {
|
||||
t.Errorf("count = %d, want > 0 for %q", s.ErrorCount, src)
|
||||
}
|
||||
}
|
||||
|
||||
func TestError(t *testing.T) {
|
||||
testError(t, "\x00", "<input>:1:1", "invalid character NUL", 0)
|
||||
testError(t, "\x80", "<input>:1:1", "invalid UTF-8 encoding", utf8.RuneError)
|
||||
testError(t, "\xff", "<input>:1:1", "invalid UTF-8 encoding", utf8.RuneError)
|
||||
|
||||
testError(t, "a\x00", "<input>:1:2", "invalid character NUL", Ident)
|
||||
testError(t, "ab\x80", "<input>:1:3", "invalid UTF-8 encoding", Ident)
|
||||
testError(t, "abc\xff", "<input>:1:4", "invalid UTF-8 encoding", Ident)
|
||||
|
||||
testError(t, `"a`+"\x00", "<input>:1:3", "invalid character NUL", String)
|
||||
testError(t, `"ab`+"\x80", "<input>:1:4", "invalid UTF-8 encoding", String)
|
||||
testError(t, `"abc`+"\xff", "<input>:1:5", "invalid UTF-8 encoding", String)
|
||||
|
||||
testError(t, "`a"+"\x00", "<input>:1:3", "invalid character NUL", RawString)
|
||||
testError(t, "`ab"+"\x80", "<input>:1:4", "invalid UTF-8 encoding", RawString)
|
||||
testError(t, "`abc"+"\xff", "<input>:1:5", "invalid UTF-8 encoding", RawString)
|
||||
|
||||
testError(t, `'\"'`, "<input>:1:3", "invalid char escape", Char)
|
||||
testError(t, `"\'"`, "<input>:1:3", "invalid char escape", String)
|
||||
|
||||
testError(t, `01238`, "<input>:1:6", "invalid digit '8' in octal literal", Int)
|
||||
testError(t, `01238123`, "<input>:1:9", "invalid digit '8' in octal literal", Int)
|
||||
testError(t, `0x`, "<input>:1:3", "hexadecimal literal has no digits", Int)
|
||||
testError(t, `0xg`, "<input>:1:3", "hexadecimal literal has no digits", Int)
|
||||
testError(t, `'aa'`, "<input>:1:4", "invalid char literal", Char)
|
||||
testError(t, `1.5e`, "<input>:1:5", "exponent has no digits", Float)
|
||||
testError(t, `1.5E`, "<input>:1:5", "exponent has no digits", Float)
|
||||
testError(t, `1.5e+`, "<input>:1:6", "exponent has no digits", Float)
|
||||
testError(t, `1.5e-`, "<input>:1:6", "exponent has no digits", Float)
|
||||
|
||||
testError(t, `'`, "<input>:1:2", "literal not terminated", Char)
|
||||
testError(t, `'`+"\n", "<input>:1:2", "literal not terminated", Char)
|
||||
testError(t, `"abc`, "<input>:1:5", "literal not terminated", String)
|
||||
testError(t, `"abc`+"\n", "<input>:1:5", "literal not terminated", String)
|
||||
testError(t, "`abc\n", "<input>:2:1", "literal not terminated", RawString)
|
||||
testError(t, `/*/`, "<input>:1:4", "comment not terminated", EOF)
|
||||
}
|
||||
|
||||
// An errReader returns (0, err) where err is not io.EOF.
|
||||
type errReader struct{}
|
||||
|
||||
func (errReader) Read(b []byte) (int, error) {
|
||||
return 0, io.ErrNoProgress // some error that is not io.EOF
|
||||
}
|
||||
|
||||
func TestIOError(t *testing.T) {
|
||||
s := new(Scanner).Init(errReader{})
|
||||
errorCalled := false
|
||||
s.Error = func(s *Scanner, msg string) {
|
||||
if !errorCalled {
|
||||
if want := io.ErrNoProgress.Error(); msg != want {
|
||||
t.Errorf("msg = %q, want %q", msg, want)
|
||||
}
|
||||
errorCalled = true
|
||||
}
|
||||
}
|
||||
tok := s.Scan()
|
||||
if tok != EOF {
|
||||
t.Errorf("tok = %s, want EOF", TokenString(tok))
|
||||
}
|
||||
if !errorCalled {
|
||||
t.Errorf("error handler not called")
|
||||
}
|
||||
}
|
||||
|
||||
func checkPos(t *testing.T, got, want Position) {
|
||||
if got.Offset != want.Offset || got.Line != want.Line || got.Column != want.Column {
|
||||
t.Errorf("got offset, line, column = %d, %d, %d; want %d, %d, %d",
|
||||
got.Offset, got.Line, got.Column, want.Offset, want.Line, want.Column)
|
||||
}
|
||||
}
|
||||
|
||||
func checkNextPos(t *testing.T, s *Scanner, offset, line, column int, char rune) {
|
||||
if ch := s.Next(); ch != char {
|
||||
t.Errorf("ch = %s, want %s", TokenString(ch), TokenString(char))
|
||||
}
|
||||
want := Position{Offset: offset, Line: line, Column: column}
|
||||
checkPos(t, s.Pos(), want)
|
||||
}
|
||||
|
||||
func checkScanPos(t *testing.T, s *Scanner, offset, line, column int, char rune) {
|
||||
want := Position{Offset: offset, Line: line, Column: column}
|
||||
checkPos(t, s.Pos(), want)
|
||||
if ch := s.Scan(); ch != char {
|
||||
t.Errorf("ch = %s, want %s", TokenString(ch), TokenString(char))
|
||||
if string(ch) != s.TokenText() {
|
||||
t.Errorf("tok = %q, want %q", s.TokenText(), string(ch))
|
||||
}
|
||||
}
|
||||
checkPos(t, s.Position, want)
|
||||
}
|
||||
|
||||
func TestPos(t *testing.T) {
|
||||
// corner case: empty source
|
||||
s := new(Scanner).Init(strings.NewReader(""))
|
||||
checkPos(t, s.Pos(), Position{Offset: 0, Line: 1, Column: 1})
|
||||
s.Peek() // peek doesn't affect the position
|
||||
checkPos(t, s.Pos(), Position{Offset: 0, Line: 1, Column: 1})
|
||||
|
||||
// corner case: source with only a newline
|
||||
s = new(Scanner).Init(strings.NewReader("\n"))
|
||||
checkPos(t, s.Pos(), Position{Offset: 0, Line: 1, Column: 1})
|
||||
checkNextPos(t, s, 1, 2, 1, '\n')
|
||||
// after EOF position doesn't change
|
||||
for i := 10; i > 0; i-- {
|
||||
checkScanPos(t, s, 1, 2, 1, EOF)
|
||||
}
|
||||
if s.ErrorCount != 0 {
|
||||
t.Errorf("%d errors", s.ErrorCount)
|
||||
}
|
||||
|
||||
// corner case: source with only a single character
|
||||
s = new(Scanner).Init(strings.NewReader("本"))
|
||||
checkPos(t, s.Pos(), Position{Offset: 0, Line: 1, Column: 1})
|
||||
checkNextPos(t, s, 3, 1, 2, '本')
|
||||
// after EOF position doesn't change
|
||||
for i := 10; i > 0; i-- {
|
||||
checkScanPos(t, s, 3, 1, 2, EOF)
|
||||
}
|
||||
if s.ErrorCount != 0 {
|
||||
t.Errorf("%d errors", s.ErrorCount)
|
||||
}
|
||||
|
||||
// positions after calling Next
|
||||
s = new(Scanner).Init(strings.NewReader(" foo६४ \n\n本語\n"))
|
||||
checkNextPos(t, s, 1, 1, 2, ' ')
|
||||
s.Peek() // peek doesn't affect the position
|
||||
checkNextPos(t, s, 2, 1, 3, ' ')
|
||||
checkNextPos(t, s, 3, 1, 4, 'f')
|
||||
checkNextPos(t, s, 4, 1, 5, 'o')
|
||||
checkNextPos(t, s, 5, 1, 6, 'o')
|
||||
checkNextPos(t, s, 8, 1, 7, '६')
|
||||
checkNextPos(t, s, 11, 1, 8, '४')
|
||||
checkNextPos(t, s, 12, 1, 9, ' ')
|
||||
checkNextPos(t, s, 13, 1, 10, ' ')
|
||||
checkNextPos(t, s, 14, 2, 1, '\n')
|
||||
checkNextPos(t, s, 15, 3, 1, '\n')
|
||||
checkNextPos(t, s, 18, 3, 2, '本')
|
||||
checkNextPos(t, s, 21, 3, 3, '語')
|
||||
checkNextPos(t, s, 22, 4, 1, '\n')
|
||||
// after EOF position doesn't change
|
||||
for i := 10; i > 0; i-- {
|
||||
checkScanPos(t, s, 22, 4, 1, EOF)
|
||||
}
|
||||
if s.ErrorCount != 0 {
|
||||
t.Errorf("%d errors", s.ErrorCount)
|
||||
}
|
||||
|
||||
// positions after calling Scan
|
||||
s = new(Scanner).Init(strings.NewReader("abc\n本語\n\nx"))
|
||||
s.Mode = 0
|
||||
s.Whitespace = 0
|
||||
checkScanPos(t, s, 0, 1, 1, 'a')
|
||||
s.Peek() // peek doesn't affect the position
|
||||
checkScanPos(t, s, 1, 1, 2, 'b')
|
||||
checkScanPos(t, s, 2, 1, 3, 'c')
|
||||
checkScanPos(t, s, 3, 1, 4, '\n')
|
||||
checkScanPos(t, s, 4, 2, 1, '本')
|
||||
checkScanPos(t, s, 7, 2, 2, '語')
|
||||
checkScanPos(t, s, 10, 2, 3, '\n')
|
||||
checkScanPos(t, s, 11, 3, 1, '\n')
|
||||
checkScanPos(t, s, 12, 4, 1, 'x')
|
||||
// after EOF position doesn't change
|
||||
for i := 10; i > 0; i-- {
|
||||
checkScanPos(t, s, 13, 4, 2, EOF)
|
||||
}
|
||||
if s.ErrorCount != 0 {
|
||||
t.Errorf("%d errors", s.ErrorCount)
|
||||
}
|
||||
}
|
||||
|
||||
type countReader int
|
||||
|
||||
func (r *countReader) Read([]byte) (int, error) {
|
||||
*r++
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
func TestNextEOFHandling(t *testing.T) {
|
||||
var r countReader
|
||||
|
||||
// corner case: empty source
|
||||
s := new(Scanner).Init(&r)
|
||||
|
||||
tok := s.Next()
|
||||
if tok != EOF {
|
||||
t.Error("1) EOF not reported")
|
||||
}
|
||||
|
||||
tok = s.Peek()
|
||||
if tok != EOF {
|
||||
t.Error("2) EOF not reported")
|
||||
}
|
||||
|
||||
if r != 1 {
|
||||
t.Errorf("scanner called Read %d times, not once", r)
|
||||
}
|
||||
}
|
||||
|
||||
func TestScanEOFHandling(t *testing.T) {
|
||||
var r countReader
|
||||
|
||||
// corner case: empty source
|
||||
s := new(Scanner).Init(&r)
|
||||
|
||||
tok := s.Scan()
|
||||
if tok != EOF {
|
||||
t.Error("1) EOF not reported")
|
||||
}
|
||||
|
||||
tok = s.Peek()
|
||||
if tok != EOF {
|
||||
t.Error("2) EOF not reported")
|
||||
}
|
||||
|
||||
if r != 1 {
|
||||
t.Errorf("scanner called Read %d times, not once", r)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssue29723(t *testing.T) {
|
||||
s := new(Scanner).Init(strings.NewReader(`x "`))
|
||||
s.Error = func(s *Scanner, _ string) {
|
||||
got := s.TokenText() // this call shouldn't panic
|
||||
const want = `"`
|
||||
if got != want {
|
||||
t.Errorf("got %q; want %q", got, want)
|
||||
}
|
||||
}
|
||||
for r := s.Scan(); r != EOF; r = s.Scan() {
|
||||
}
|
||||
}
|
||||
|
||||
func TestNumbers(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
tok rune
|
||||
src, tokens, err string
|
||||
}{
|
||||
// binaries
|
||||
{Int, "0b0", "0b0", ""},
|
||||
{Int, "0b1010", "0b1010", ""},
|
||||
{Int, "0B1110", "0B1110", ""},
|
||||
|
||||
{Int, "0b", "0b", "binary literal has no digits"},
|
||||
{Int, "0b0190", "0b0190", "invalid digit '9' in binary literal"},
|
||||
{Int, "0b01a0", "0b01 a0", ""}, // only accept 0-9
|
||||
|
||||
// binary floats (invalid)
|
||||
{Float, "0b.", "0b.", "invalid radix point in binary literal"},
|
||||
{Float, "0b.1", "0b.1", "invalid radix point in binary literal"},
|
||||
{Float, "0b1.0", "0b1.0", "invalid radix point in binary literal"},
|
||||
{Float, "0b1e10", "0b1e10", "'e' exponent requires decimal mantissa"},
|
||||
{Float, "0b1P-1", "0b1P-1", "'P' exponent requires hexadecimal mantissa"},
|
||||
|
||||
// octals
|
||||
{Int, "0o0", "0o0", ""},
|
||||
{Int, "0o1234", "0o1234", ""},
|
||||
{Int, "0O1234", "0O1234", ""},
|
||||
|
||||
{Int, "0o", "0o", "octal literal has no digits"},
|
||||
{Int, "0o8123", "0o8123", "invalid digit '8' in octal literal"},
|
||||
{Int, "0o1293", "0o1293", "invalid digit '9' in octal literal"},
|
||||
{Int, "0o12a3", "0o12 a3", ""}, // only accept 0-9
|
||||
|
||||
// octal floats (invalid)
|
||||
{Float, "0o.", "0o.", "invalid radix point in octal literal"},
|
||||
{Float, "0o.2", "0o.2", "invalid radix point in octal literal"},
|
||||
{Float, "0o1.2", "0o1.2", "invalid radix point in octal literal"},
|
||||
{Float, "0o1E+2", "0o1E+2", "'E' exponent requires decimal mantissa"},
|
||||
{Float, "0o1p10", "0o1p10", "'p' exponent requires hexadecimal mantissa"},
|
||||
|
||||
// 0-octals
|
||||
{Int, "0", "0", ""},
|
||||
{Int, "0123", "0123", ""},
|
||||
|
||||
{Int, "08123", "08123", "invalid digit '8' in octal literal"},
|
||||
{Int, "01293", "01293", "invalid digit '9' in octal literal"},
|
||||
{Int, "0F.", "0 F .", ""}, // only accept 0-9
|
||||
{Int, "0123F.", "0123 F .", ""},
|
||||
{Int, "0123456x", "0123456 x", ""},
|
||||
|
||||
// decimals
|
||||
{Int, "1", "1", ""},
|
||||
{Int, "1234", "1234", ""},
|
||||
|
||||
{Int, "1f", "1 f", ""}, // only accept 0-9
|
||||
|
||||
// decimal floats
|
||||
{Float, "0.", "0.", ""},
|
||||
{Float, "123.", "123.", ""},
|
||||
{Float, "0123.", "0123.", ""},
|
||||
|
||||
{Float, ".0", ".0", ""},
|
||||
{Float, ".123", ".123", ""},
|
||||
{Float, ".0123", ".0123", ""},
|
||||
|
||||
{Float, "0.0", "0.0", ""},
|
||||
{Float, "123.123", "123.123", ""},
|
||||
{Float, "0123.0123", "0123.0123", ""},
|
||||
|
||||
{Float, "0e0", "0e0", ""},
|
||||
{Float, "123e+0", "123e+0", ""},
|
||||
{Float, "0123E-1", "0123E-1", ""},
|
||||
|
||||
{Float, "0.e+1", "0.e+1", ""},
|
||||
{Float, "123.E-10", "123.E-10", ""},
|
||||
{Float, "0123.e123", "0123.e123", ""},
|
||||
|
||||
{Float, ".0e-1", ".0e-1", ""},
|
||||
{Float, ".123E+10", ".123E+10", ""},
|
||||
{Float, ".0123E123", ".0123E123", ""},
|
||||
|
||||
{Float, "0.0e1", "0.0e1", ""},
|
||||
{Float, "123.123E-10", "123.123E-10", ""},
|
||||
{Float, "0123.0123e+456", "0123.0123e+456", ""},
|
||||
|
||||
{Float, "0e", "0e", "exponent has no digits"},
|
||||
{Float, "0E+", "0E+", "exponent has no digits"},
|
||||
{Float, "1e+f", "1e+ f", "exponent has no digits"},
|
||||
{Float, "0p0", "0p0", "'p' exponent requires hexadecimal mantissa"},
|
||||
{Float, "1.0P-1", "1.0P-1", "'P' exponent requires hexadecimal mantissa"},
|
||||
|
||||
// hexadecimals
|
||||
{Int, "0x0", "0x0", ""},
|
||||
{Int, "0x1234", "0x1234", ""},
|
||||
{Int, "0xcafef00d", "0xcafef00d", ""},
|
||||
{Int, "0XCAFEF00D", "0XCAFEF00D", ""},
|
||||
|
||||
{Int, "0x", "0x", "hexadecimal literal has no digits"},
|
||||
{Int, "0x1g", "0x1 g", ""},
|
||||
|
||||
// hexadecimal floats
|
||||
{Float, "0x0p0", "0x0p0", ""},
|
||||
{Float, "0x12efp-123", "0x12efp-123", ""},
|
||||
{Float, "0xABCD.p+0", "0xABCD.p+0", ""},
|
||||
{Float, "0x.0189P-0", "0x.0189P-0", ""},
|
||||
{Float, "0x1.ffffp+1023", "0x1.ffffp+1023", ""},
|
||||
|
||||
{Float, "0x.", "0x.", "hexadecimal literal has no digits"},
|
||||
{Float, "0x0.", "0x0.", "hexadecimal mantissa requires a 'p' exponent"},
|
||||
{Float, "0x.0", "0x.0", "hexadecimal mantissa requires a 'p' exponent"},
|
||||
{Float, "0x1.1", "0x1.1", "hexadecimal mantissa requires a 'p' exponent"},
|
||||
{Float, "0x1.1e0", "0x1.1e0", "hexadecimal mantissa requires a 'p' exponent"},
|
||||
{Float, "0x1.2gp1a", "0x1.2 gp1a", "hexadecimal mantissa requires a 'p' exponent"},
|
||||
{Float, "0x0p", "0x0p", "exponent has no digits"},
|
||||
{Float, "0xeP-", "0xeP-", "exponent has no digits"},
|
||||
{Float, "0x1234PAB", "0x1234P AB", "exponent has no digits"},
|
||||
{Float, "0x1.2p1a", "0x1.2p1 a", ""},
|
||||
|
||||
// separators
|
||||
{Int, "0b_1000_0001", "0b_1000_0001", ""},
|
||||
{Int, "0o_600", "0o_600", ""},
|
||||
{Int, "0_466", "0_466", ""},
|
||||
{Int, "1_000", "1_000", ""},
|
||||
{Float, "1_000.000_1", "1_000.000_1", ""},
|
||||
{Int, "0x_f00d", "0x_f00d", ""},
|
||||
{Float, "0x_f00d.0p1_2", "0x_f00d.0p1_2", ""},
|
||||
|
||||
{Int, "0b__1000", "0b__1000", "'_' must separate successive digits"},
|
||||
{Int, "0o60___0", "0o60___0", "'_' must separate successive digits"},
|
||||
{Int, "0466_", "0466_", "'_' must separate successive digits"},
|
||||
{Float, "1_.", "1_.", "'_' must separate successive digits"},
|
||||
{Float, "0._1", "0._1", "'_' must separate successive digits"},
|
||||
{Float, "2.7_e0", "2.7_e0", "'_' must separate successive digits"},
|
||||
{Int, "0x___0", "0x___0", "'_' must separate successive digits"},
|
||||
{Float, "0x1.0_p0", "0x1.0_p0", "'_' must separate successive digits"},
|
||||
} {
|
||||
s := new(Scanner).Init(strings.NewReader(test.src))
|
||||
var err string
|
||||
s.Error = func(s *Scanner, msg string) {
|
||||
if err == "" {
|
||||
err = msg
|
||||
}
|
||||
}
|
||||
|
||||
for i, want := range strings.Split(test.tokens, " ") {
|
||||
err = ""
|
||||
tok := s.Scan()
|
||||
lit := s.TokenText()
|
||||
if i == 0 {
|
||||
if tok != test.tok {
|
||||
t.Errorf("%q: got token %s; want %s", test.src, TokenString(tok), TokenString(test.tok))
|
||||
}
|
||||
if err != test.err {
|
||||
t.Errorf("%q: got error %q; want %q", test.src, err, test.err)
|
||||
}
|
||||
}
|
||||
if lit != want {
|
||||
t.Errorf("%q: got literal %q (%s); want %s", test.src, lit, TokenString(tok), want)
|
||||
}
|
||||
}
|
||||
|
||||
// make sure we read all
|
||||
if tok := s.Scan(); tok != EOF {
|
||||
t.Errorf("%q: got %s; want EOF", test.src, TokenString(tok))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssue30320(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in, want string
|
||||
mode uint
|
||||
}{
|
||||
{"foo01.bar31.xx-0-1-1-0", "01 31 0 1 1 0", ScanInts},
|
||||
{"foo0/12/0/5.67", "0 12 0 5 67", ScanInts},
|
||||
{"xxx1e0yyy", "1 0", ScanInts},
|
||||
{"1_2", "1_2", ScanInts},
|
||||
{"xxx1.0yyy2e3ee", "1 0 2 3", ScanInts},
|
||||
{"xxx1.0yyy2e3ee", "1.0 2e3", ScanFloats},
|
||||
} {
|
||||
got := extractInts(test.in, test.mode)
|
||||
if got != test.want {
|
||||
t.Errorf("%q: got %q; want %q", test.in, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func extractInts(t string, mode uint) (res string) {
|
||||
var s Scanner
|
||||
s.Init(strings.NewReader(t))
|
||||
s.Mode = mode
|
||||
for {
|
||||
switch tok := s.Scan(); tok {
|
||||
case Int, Float:
|
||||
if len(res) > 0 {
|
||||
res += " "
|
||||
}
|
||||
res += s.TokenText()
|
||||
case EOF:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssue50909(t *testing.T) {
|
||||
var s Scanner
|
||||
s.Init(strings.NewReader("hello \n\nworld\n!\n"))
|
||||
s.IsIdentRune = func(ch rune, _ int) bool { return ch != '\n' }
|
||||
|
||||
r := ""
|
||||
n := 0
|
||||
for s.Scan() != EOF && n < 10 {
|
||||
r += s.TokenText()
|
||||
n++
|
||||
}
|
||||
|
||||
const R = "hello world!"
|
||||
const N = 3
|
||||
if r != R || n != N {
|
||||
t.Errorf("got %q (n = %d); want %q (n = %d)", r, n, R, N)
|
||||
}
|
||||
}
|
||||
73
src/text/tabwriter/example_test.go
Normal file
73
src/text/tabwriter/example_test.go
Normal file
@@ -0,0 +1,73 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tabwriter_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"text/tabwriter"
|
||||
)
|
||||
|
||||
func ExampleWriter_Init() {
|
||||
w := new(tabwriter.Writer)
|
||||
|
||||
// Format in tab-separated columns with a tab stop of 8.
|
||||
w.Init(os.Stdout, 0, 8, 0, '\t', 0)
|
||||
fmt.Fprintln(w, "a\tb\tc\td\t.")
|
||||
fmt.Fprintln(w, "123\t12345\t1234567\t123456789\t.")
|
||||
fmt.Fprintln(w)
|
||||
w.Flush()
|
||||
|
||||
// Format right-aligned in space-separated columns of minimal width 5
|
||||
// and at least one blank of padding (so wider column entries do not
|
||||
// touch each other).
|
||||
w.Init(os.Stdout, 5, 0, 1, ' ', tabwriter.AlignRight)
|
||||
fmt.Fprintln(w, "a\tb\tc\td\t.")
|
||||
fmt.Fprintln(w, "123\t12345\t1234567\t123456789\t.")
|
||||
fmt.Fprintln(w)
|
||||
w.Flush()
|
||||
|
||||
// output:
|
||||
// a b c d .
|
||||
// 123 12345 1234567 123456789 .
|
||||
//
|
||||
// a b c d.
|
||||
// 123 12345 1234567 123456789.
|
||||
}
|
||||
|
||||
func Example_elastic() {
|
||||
// Observe how the b's and the d's, despite appearing in the
|
||||
// second cell of each line, belong to different columns.
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, '.', tabwriter.AlignRight|tabwriter.Debug)
|
||||
fmt.Fprintln(w, "a\tb\tc")
|
||||
fmt.Fprintln(w, "aa\tbb\tcc")
|
||||
fmt.Fprintln(w, "aaa\t") // trailing tab
|
||||
fmt.Fprintln(w, "aaaa\tdddd\teeee")
|
||||
w.Flush()
|
||||
|
||||
// output:
|
||||
// ....a|..b|c
|
||||
// ...aa|.bb|cc
|
||||
// ..aaa|
|
||||
// .aaaa|.dddd|eeee
|
||||
}
|
||||
|
||||
func Example_trailingTab() {
|
||||
// Observe that the third line has no trailing tab,
|
||||
// so its final cell is not part of an aligned column.
|
||||
const padding = 3
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, padding, '-', tabwriter.AlignRight|tabwriter.Debug)
|
||||
fmt.Fprintln(w, "a\tb\taligned\t")
|
||||
fmt.Fprintln(w, "aa\tbb\taligned\t")
|
||||
fmt.Fprintln(w, "aaa\tbbb\tunaligned") // no trailing tab
|
||||
fmt.Fprintln(w, "aaaa\tbbbb\taligned\t")
|
||||
w.Flush()
|
||||
|
||||
// output:
|
||||
// ------a|------b|---aligned|
|
||||
// -----aa|-----bb|---aligned|
|
||||
// ----aaa|----bbb|unaligned
|
||||
// ---aaaa|---bbbb|---aligned|
|
||||
}
|
||||
601
src/text/tabwriter/tabwriter.go
Normal file
601
src/text/tabwriter/tabwriter.go
Normal file
@@ -0,0 +1,601 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package tabwriter implements a write filter (tabwriter.Writer) that
|
||||
// translates tabbed columns in input into properly aligned text.
|
||||
//
|
||||
// The package is using the Elastic Tabstops algorithm described at
|
||||
// http://nickgravgaard.com/elastictabstops/index.html.
|
||||
//
|
||||
// The text/tabwriter package is frozen and is not accepting new features.
|
||||
package tabwriter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Filter implementation
|
||||
|
||||
// A cell represents a segment of text terminated by tabs or line breaks.
|
||||
// The text itself is stored in a separate buffer; cell only describes the
|
||||
// segment's size in bytes, its width in runes, and whether it's an htab
|
||||
// ('\t') terminated cell.
|
||||
type cell struct {
|
||||
size int // cell size in bytes
|
||||
width int // cell width in runes
|
||||
htab bool // true if the cell is terminated by an htab ('\t')
|
||||
}
|
||||
|
||||
// A Writer is a filter that inserts padding around tab-delimited
|
||||
// columns in its input to align them in the output.
|
||||
//
|
||||
// The Writer treats incoming bytes as UTF-8-encoded text consisting
|
||||
// of cells terminated by horizontal ('\t') or vertical ('\v') tabs,
|
||||
// and newline ('\n') or formfeed ('\f') characters; both newline and
|
||||
// formfeed act as line breaks.
|
||||
//
|
||||
// Tab-terminated cells in contiguous lines constitute a column. The
|
||||
// Writer inserts padding as needed to make all cells in a column have
|
||||
// the same width, effectively aligning the columns. It assumes that
|
||||
// all characters have the same width, except for tabs for which a
|
||||
// tabwidth must be specified. Column cells must be tab-terminated, not
|
||||
// tab-separated: non-tab terminated trailing text at the end of a line
|
||||
// forms a cell but that cell is not part of an aligned column.
|
||||
// For instance, in this example (where | stands for a horizontal tab):
|
||||
//
|
||||
// aaaa|bbb|d
|
||||
// aa |b |dd
|
||||
// a |
|
||||
// aa |cccc|eee
|
||||
//
|
||||
// the b and c are in distinct columns (the b column is not contiguous
|
||||
// all the way). The d and e are not in a column at all (there's no
|
||||
// terminating tab, nor would the column be contiguous).
|
||||
//
|
||||
// The Writer assumes that all Unicode code points have the same width;
|
||||
// this may not be true in some fonts or if the string contains combining
|
||||
// characters.
|
||||
//
|
||||
// If [DiscardEmptyColumns] is set, empty columns that are terminated
|
||||
// entirely by vertical (or "soft") tabs are discarded. Columns
|
||||
// terminated by horizontal (or "hard") tabs are not affected by
|
||||
// this flag.
|
||||
//
|
||||
// If a Writer is configured to filter HTML, HTML tags and entities
|
||||
// are passed through. The widths of tags and entities are
|
||||
// assumed to be zero (tags) and one (entities) for formatting purposes.
|
||||
//
|
||||
// A segment of text may be escaped by bracketing it with [Escape]
|
||||
// characters. The tabwriter passes escaped text segments through
|
||||
// unchanged. In particular, it does not interpret any tabs or line
|
||||
// breaks within the segment. If the [StripEscape] flag is set, the
|
||||
// Escape characters are stripped from the output; otherwise they
|
||||
// are passed through as well. For the purpose of formatting, the
|
||||
// width of the escaped text is always computed excluding the Escape
|
||||
// characters.
|
||||
//
|
||||
// The formfeed character acts like a newline but it also terminates
|
||||
// all columns in the current line (effectively calling [Writer.Flush]). Tab-
|
||||
// terminated cells in the next line start new columns. Unless found
|
||||
// inside an HTML tag or inside an escaped text segment, formfeed
|
||||
// characters appear as newlines in the output.
|
||||
//
|
||||
// The Writer must buffer input internally, because proper spacing
|
||||
// of one line may depend on the cells in future lines. Clients must
|
||||
// call Flush when done calling [Writer.Write].
|
||||
type Writer struct {
|
||||
// configuration
|
||||
output io.Writer
|
||||
minwidth int
|
||||
tabwidth int
|
||||
padding int
|
||||
padbytes [8]byte
|
||||
flags uint
|
||||
|
||||
// current state
|
||||
buf []byte // collected text excluding tabs or line breaks
|
||||
pos int // buffer position up to which cell.width of incomplete cell has been computed
|
||||
cell cell // current incomplete cell; cell.width is up to buf[pos] excluding ignored sections
|
||||
endChar byte // terminating char of escaped sequence (Escape for escapes, '>', ';' for HTML tags/entities, or 0)
|
||||
lines [][]cell // list of lines; each line is a list of cells
|
||||
widths []int // list of column widths in runes - re-used during formatting
|
||||
}
|
||||
|
||||
// addLine adds a new line.
|
||||
// flushed is a hint indicating whether the underlying writer was just flushed.
|
||||
// If so, the previous line is not likely to be a good indicator of the new line's cells.
|
||||
func (b *Writer) addLine(flushed bool) {
|
||||
// Grow slice instead of appending,
|
||||
// as that gives us an opportunity
|
||||
// to re-use an existing []cell.
|
||||
if n := len(b.lines) + 1; n <= cap(b.lines) {
|
||||
b.lines = b.lines[:n]
|
||||
b.lines[n-1] = b.lines[n-1][:0]
|
||||
} else {
|
||||
b.lines = append(b.lines, nil)
|
||||
}
|
||||
|
||||
if !flushed {
|
||||
// The previous line is probably a good indicator
|
||||
// of how many cells the current line will have.
|
||||
// If the current line's capacity is smaller than that,
|
||||
// abandon it and make a new one.
|
||||
if n := len(b.lines); n >= 2 {
|
||||
if prev := len(b.lines[n-2]); prev > cap(b.lines[n-1]) {
|
||||
b.lines[n-1] = make([]cell, 0, prev)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Reset the current state.
|
||||
func (b *Writer) reset() {
|
||||
b.buf = b.buf[:0]
|
||||
b.pos = 0
|
||||
b.cell = cell{}
|
||||
b.endChar = 0
|
||||
b.lines = b.lines[0:0]
|
||||
b.widths = b.widths[0:0]
|
||||
b.addLine(true)
|
||||
}
|
||||
|
||||
// Internal representation (current state):
|
||||
//
|
||||
// - all text written is appended to buf; tabs and line breaks are stripped away
|
||||
// - at any given time there is a (possibly empty) incomplete cell at the end
|
||||
// (the cell starts after a tab or line break)
|
||||
// - cell.size is the number of bytes belonging to the cell so far
|
||||
// - cell.width is text width in runes of that cell from the start of the cell to
|
||||
// position pos; html tags and entities are excluded from this width if html
|
||||
// filtering is enabled
|
||||
// - the sizes and widths of processed text are kept in the lines list
|
||||
// which contains a list of cells for each line
|
||||
// - the widths list is a temporary list with current widths used during
|
||||
// formatting; it is kept in Writer because it's re-used
|
||||
//
|
||||
// |<---------- size ---------->|
|
||||
// | |
|
||||
// |<- width ->|<- ignored ->| |
|
||||
// | | | |
|
||||
// [---processed---tab------------<tag>...</tag>...]
|
||||
// ^ ^ ^
|
||||
// | | |
|
||||
// buf start of incomplete cell pos
|
||||
|
||||
// Formatting can be controlled with these flags.
|
||||
const (
|
||||
// Ignore html tags and treat entities (starting with '&'
|
||||
// and ending in ';') as single characters (width = 1).
|
||||
FilterHTML uint = 1 << iota
|
||||
|
||||
// Strip Escape characters bracketing escaped text segments
|
||||
// instead of passing them through unchanged with the text.
|
||||
StripEscape
|
||||
|
||||
// Force right-alignment of cell content.
|
||||
// Default is left-alignment.
|
||||
AlignRight
|
||||
|
||||
// Handle empty columns as if they were not present in
|
||||
// the input in the first place.
|
||||
DiscardEmptyColumns
|
||||
|
||||
// Always use tabs for indentation columns (i.e., padding of
|
||||
// leading empty cells on the left) independent of padchar.
|
||||
TabIndent
|
||||
|
||||
// Print a vertical bar ('|') between columns (after formatting).
|
||||
// Discarded columns appear as zero-width columns ("||").
|
||||
Debug
|
||||
)
|
||||
|
||||
// A [Writer] must be initialized with a call to Init. The first parameter (output)
|
||||
// specifies the filter output. The remaining parameters control the formatting:
|
||||
//
|
||||
// minwidth minimal cell width including any padding
|
||||
// tabwidth width of tab characters (equivalent number of spaces)
|
||||
// padding padding added to a cell before computing its width
|
||||
// padchar ASCII char used for padding
|
||||
// if padchar == '\t', the Writer will assume that the
|
||||
// width of a '\t' in the formatted output is tabwidth,
|
||||
// and cells are left-aligned independent of align_left
|
||||
// (for correct-looking results, tabwidth must correspond
|
||||
// to the tab width in the viewer displaying the result)
|
||||
// flags formatting control
|
||||
func (b *Writer) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
|
||||
if minwidth < 0 || tabwidth < 0 || padding < 0 {
|
||||
panic("negative minwidth, tabwidth, or padding")
|
||||
}
|
||||
b.output = output
|
||||
b.minwidth = minwidth
|
||||
b.tabwidth = tabwidth
|
||||
b.padding = padding
|
||||
for i := range b.padbytes {
|
||||
b.padbytes[i] = padchar
|
||||
}
|
||||
if padchar == '\t' {
|
||||
// tab padding enforces left-alignment
|
||||
flags &^= AlignRight
|
||||
}
|
||||
b.flags = flags
|
||||
|
||||
b.reset()
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// debugging support (keep code around)
|
||||
func (b *Writer) dump() {
|
||||
pos := 0
|
||||
for i, line := range b.lines {
|
||||
print("(", i, ") ")
|
||||
for _, c := range line {
|
||||
print("[", string(b.buf[pos:pos+c.size]), "]")
|
||||
pos += c.size
|
||||
}
|
||||
print("\n")
|
||||
}
|
||||
print("\n")
|
||||
}
|
||||
|
||||
// local error wrapper so we can distinguish errors we want to return
|
||||
// as errors from genuine panics (which we don't want to return as errors)
|
||||
type osError struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (b *Writer) write0(buf []byte) {
|
||||
n, err := b.output.Write(buf)
|
||||
if n != len(buf) && err == nil {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
if err != nil {
|
||||
panic(osError{err})
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Writer) writeN(src []byte, n int) {
|
||||
for n > len(src) {
|
||||
b.write0(src)
|
||||
n -= len(src)
|
||||
}
|
||||
b.write0(src[0:n])
|
||||
}
|
||||
|
||||
var (
|
||||
newline = []byte{'\n'}
|
||||
tabs = []byte("\t\t\t\t\t\t\t\t")
|
||||
)
|
||||
|
||||
func (b *Writer) writePadding(textw, cellw int, useTabs bool) {
|
||||
if b.padbytes[0] == '\t' || useTabs {
|
||||
// padding is done with tabs
|
||||
if b.tabwidth == 0 {
|
||||
return // tabs have no width - can't do any padding
|
||||
}
|
||||
// make cellw the smallest multiple of b.tabwidth
|
||||
cellw = (cellw + b.tabwidth - 1) / b.tabwidth * b.tabwidth
|
||||
n := cellw - textw // amount of padding
|
||||
if n < 0 {
|
||||
panic("internal error")
|
||||
}
|
||||
b.writeN(tabs, (n+b.tabwidth-1)/b.tabwidth)
|
||||
return
|
||||
}
|
||||
|
||||
// padding is done with non-tab characters
|
||||
b.writeN(b.padbytes[0:], cellw-textw)
|
||||
}
|
||||
|
||||
var vbar = []byte{'|'}
|
||||
|
||||
func (b *Writer) writeLines(pos0 int, line0, line1 int) (pos int) {
|
||||
pos = pos0
|
||||
for i := line0; i < line1; i++ {
|
||||
line := b.lines[i]
|
||||
|
||||
// if TabIndent is set, use tabs to pad leading empty cells
|
||||
useTabs := b.flags&TabIndent != 0
|
||||
|
||||
for j, c := range line {
|
||||
if j > 0 && b.flags&Debug != 0 {
|
||||
// indicate column break
|
||||
b.write0(vbar)
|
||||
}
|
||||
|
||||
if c.size == 0 {
|
||||
// empty cell
|
||||
if j < len(b.widths) {
|
||||
b.writePadding(c.width, b.widths[j], useTabs)
|
||||
}
|
||||
} else {
|
||||
// non-empty cell
|
||||
useTabs = false
|
||||
if b.flags&AlignRight == 0 { // align left
|
||||
b.write0(b.buf[pos : pos+c.size])
|
||||
pos += c.size
|
||||
if j < len(b.widths) {
|
||||
b.writePadding(c.width, b.widths[j], false)
|
||||
}
|
||||
} else { // align right
|
||||
if j < len(b.widths) {
|
||||
b.writePadding(c.width, b.widths[j], false)
|
||||
}
|
||||
b.write0(b.buf[pos : pos+c.size])
|
||||
pos += c.size
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if i+1 == len(b.lines) {
|
||||
// last buffered line - we don't have a newline, so just write
|
||||
// any outstanding buffered data
|
||||
b.write0(b.buf[pos : pos+b.cell.size])
|
||||
pos += b.cell.size
|
||||
} else {
|
||||
// not the last line - write newline
|
||||
b.write0(newline)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Format the text between line0 and line1 (excluding line1); pos
|
||||
// is the buffer position corresponding to the beginning of line0.
|
||||
// Returns the buffer position corresponding to the beginning of
|
||||
// line1 and an error, if any.
|
||||
func (b *Writer) format(pos0 int, line0, line1 int) (pos int) {
|
||||
pos = pos0
|
||||
column := len(b.widths)
|
||||
for this := line0; this < line1; this++ {
|
||||
line := b.lines[this]
|
||||
|
||||
if column >= len(line)-1 {
|
||||
continue
|
||||
}
|
||||
// cell exists in this column => this line
|
||||
// has more cells than the previous line
|
||||
// (the last cell per line is ignored because cells are
|
||||
// tab-terminated; the last cell per line describes the
|
||||
// text before the newline/formfeed and does not belong
|
||||
// to a column)
|
||||
|
||||
// print unprinted lines until beginning of block
|
||||
pos = b.writeLines(pos, line0, this)
|
||||
line0 = this
|
||||
|
||||
// column block begin
|
||||
width := b.minwidth // minimal column width
|
||||
discardable := true // true if all cells in this column are empty and "soft"
|
||||
for ; this < line1; this++ {
|
||||
line = b.lines[this]
|
||||
if column >= len(line)-1 {
|
||||
break
|
||||
}
|
||||
// cell exists in this column
|
||||
c := line[column]
|
||||
// update width
|
||||
if w := c.width + b.padding; w > width {
|
||||
width = w
|
||||
}
|
||||
// update discardable
|
||||
if c.width > 0 || c.htab {
|
||||
discardable = false
|
||||
}
|
||||
}
|
||||
// column block end
|
||||
|
||||
// discard empty columns if necessary
|
||||
if discardable && b.flags&DiscardEmptyColumns != 0 {
|
||||
width = 0
|
||||
}
|
||||
|
||||
// format and print all columns to the right of this column
|
||||
// (we know the widths of this column and all columns to the left)
|
||||
b.widths = append(b.widths, width) // push width
|
||||
pos = b.format(pos, line0, this)
|
||||
b.widths = b.widths[0 : len(b.widths)-1] // pop width
|
||||
line0 = this
|
||||
}
|
||||
|
||||
// print unprinted lines until end
|
||||
return b.writeLines(pos, line0, line1)
|
||||
}
|
||||
|
||||
// Append text to current cell.
|
||||
func (b *Writer) append(text []byte) {
|
||||
b.buf = append(b.buf, text...)
|
||||
b.cell.size += len(text)
|
||||
}
|
||||
|
||||
// Update the cell width.
|
||||
func (b *Writer) updateWidth() {
|
||||
b.cell.width += utf8.RuneCount(b.buf[b.pos:])
|
||||
b.pos = len(b.buf)
|
||||
}
|
||||
|
||||
// To escape a text segment, bracket it with Escape characters.
|
||||
// For instance, the tab in this string "Ignore this tab: \xff\t\xff"
|
||||
// does not terminate a cell and constitutes a single character of
|
||||
// width one for formatting purposes.
|
||||
//
|
||||
// The value 0xff was chosen because it cannot appear in a valid UTF-8 sequence.
|
||||
const Escape = '\xff'
|
||||
|
||||
// Start escaped mode.
|
||||
func (b *Writer) startEscape(ch byte) {
|
||||
switch ch {
|
||||
case Escape:
|
||||
b.endChar = Escape
|
||||
case '<':
|
||||
b.endChar = '>'
|
||||
case '&':
|
||||
b.endChar = ';'
|
||||
}
|
||||
}
|
||||
|
||||
// Terminate escaped mode. If the escaped text was an HTML tag, its width
|
||||
// is assumed to be zero for formatting purposes; if it was an HTML entity,
|
||||
// its width is assumed to be one. In all other cases, the width is the
|
||||
// unicode width of the text.
|
||||
func (b *Writer) endEscape() {
|
||||
switch b.endChar {
|
||||
case Escape:
|
||||
b.updateWidth()
|
||||
if b.flags&StripEscape == 0 {
|
||||
b.cell.width -= 2 // don't count the Escape chars
|
||||
}
|
||||
case '>': // tag of zero width
|
||||
case ';':
|
||||
b.cell.width++ // entity, count as one rune
|
||||
}
|
||||
b.pos = len(b.buf)
|
||||
b.endChar = 0
|
||||
}
|
||||
|
||||
// Terminate the current cell by adding it to the list of cells of the
|
||||
// current line. Returns the number of cells in that line.
|
||||
func (b *Writer) terminateCell(htab bool) int {
|
||||
b.cell.htab = htab
|
||||
line := &b.lines[len(b.lines)-1]
|
||||
*line = append(*line, b.cell)
|
||||
b.cell = cell{}
|
||||
return len(*line)
|
||||
}
|
||||
|
||||
func (b *Writer) handlePanic(err *error, op string) {
|
||||
if e := recover(); e != nil {
|
||||
if op == "Flush" {
|
||||
// If Flush ran into a panic, we still need to reset.
|
||||
b.reset()
|
||||
}
|
||||
if nerr, ok := e.(osError); ok {
|
||||
*err = nerr.err
|
||||
return
|
||||
}
|
||||
panic(fmt.Sprintf("tabwriter: panic during %s (%v)", op, e))
|
||||
}
|
||||
}
|
||||
|
||||
// Flush should be called after the last call to [Writer.Write] to ensure
|
||||
// that any data buffered in the [Writer] is written to output. Any
|
||||
// incomplete escape sequence at the end is considered
|
||||
// complete for formatting purposes.
|
||||
func (b *Writer) Flush() error {
|
||||
return b.flush()
|
||||
}
|
||||
|
||||
// flush is the internal version of Flush, with a named return value which we
|
||||
// don't want to expose.
|
||||
func (b *Writer) flush() (err error) {
|
||||
defer b.handlePanic(&err, "Flush")
|
||||
b.flushNoDefers()
|
||||
return nil
|
||||
}
|
||||
|
||||
// flushNoDefers is like flush, but without a deferred handlePanic call. This
|
||||
// can be called from other methods which already have their own deferred
|
||||
// handlePanic calls, such as Write, and avoid the extra defer work.
|
||||
func (b *Writer) flushNoDefers() {
|
||||
// add current cell if not empty
|
||||
if b.cell.size > 0 {
|
||||
if b.endChar != 0 {
|
||||
// inside escape - terminate it even if incomplete
|
||||
b.endEscape()
|
||||
}
|
||||
b.terminateCell(false)
|
||||
}
|
||||
|
||||
// format contents of buffer
|
||||
b.format(0, 0, len(b.lines))
|
||||
b.reset()
|
||||
}
|
||||
|
||||
var hbar = []byte("---\n")
|
||||
|
||||
// Write writes buf to the writer b.
|
||||
// The only errors returned are ones encountered
|
||||
// while writing to the underlying output stream.
|
||||
func (b *Writer) Write(buf []byte) (n int, err error) {
|
||||
defer b.handlePanic(&err, "Write")
|
||||
|
||||
// split text into cells
|
||||
n = 0
|
||||
for i, ch := range buf {
|
||||
if b.endChar == 0 {
|
||||
// outside escape
|
||||
switch ch {
|
||||
case '\t', '\v', '\n', '\f':
|
||||
// end of cell
|
||||
b.append(buf[n:i])
|
||||
b.updateWidth()
|
||||
n = i + 1 // ch consumed
|
||||
ncells := b.terminateCell(ch == '\t')
|
||||
if ch == '\n' || ch == '\f' {
|
||||
// terminate line
|
||||
b.addLine(ch == '\f')
|
||||
if ch == '\f' || ncells == 1 {
|
||||
// A '\f' always forces a flush. Otherwise, if the previous
|
||||
// line has only one cell which does not have an impact on
|
||||
// the formatting of the following lines (the last cell per
|
||||
// line is ignored by format()), thus we can flush the
|
||||
// Writer contents.
|
||||
b.flushNoDefers()
|
||||
if ch == '\f' && b.flags&Debug != 0 {
|
||||
// indicate section break
|
||||
b.write0(hbar)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case Escape:
|
||||
// start of escaped sequence
|
||||
b.append(buf[n:i])
|
||||
b.updateWidth()
|
||||
n = i
|
||||
if b.flags&StripEscape != 0 {
|
||||
n++ // strip Escape
|
||||
}
|
||||
b.startEscape(Escape)
|
||||
|
||||
case '<', '&':
|
||||
// possibly an html tag/entity
|
||||
if b.flags&FilterHTML != 0 {
|
||||
// begin of tag/entity
|
||||
b.append(buf[n:i])
|
||||
b.updateWidth()
|
||||
n = i
|
||||
b.startEscape(ch)
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
// inside escape
|
||||
if ch == b.endChar {
|
||||
// end of tag/entity
|
||||
j := i + 1
|
||||
if ch == Escape && b.flags&StripEscape != 0 {
|
||||
j = i // strip Escape
|
||||
}
|
||||
b.append(buf[n:j])
|
||||
n = i + 1 // ch consumed
|
||||
b.endEscape()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// append leftover text
|
||||
b.append(buf[n:])
|
||||
n = len(buf)
|
||||
return
|
||||
}
|
||||
|
||||
// NewWriter allocates and initializes a new [Writer].
|
||||
// The parameters are the same as for the Init function.
|
||||
func NewWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
|
||||
return new(Writer).Init(output, minwidth, tabwidth, padding, padchar, flags)
|
||||
}
|
||||
754
src/text/tabwriter/tabwriter_test.go
Normal file
754
src/text/tabwriter/tabwriter_test.go
Normal file
@@ -0,0 +1,754 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tabwriter_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
. "text/tabwriter"
|
||||
)
|
||||
|
||||
type buffer struct {
|
||||
a []byte
|
||||
}
|
||||
|
||||
func (b *buffer) init(n int) { b.a = make([]byte, 0, n) }
|
||||
|
||||
func (b *buffer) clear() { b.a = b.a[0:0] }
|
||||
|
||||
func (b *buffer) Write(buf []byte) (written int, err error) {
|
||||
n := len(b.a)
|
||||
m := len(buf)
|
||||
if n+m <= cap(b.a) {
|
||||
b.a = b.a[0 : n+m]
|
||||
for i := 0; i < m; i++ {
|
||||
b.a[n+i] = buf[i]
|
||||
}
|
||||
} else {
|
||||
panic("buffer.Write: buffer too small")
|
||||
}
|
||||
return len(buf), nil
|
||||
}
|
||||
|
||||
func (b *buffer) String() string { return string(b.a) }
|
||||
|
||||
func write(t *testing.T, testname string, w *Writer, src string) {
|
||||
written, err := io.WriteString(w, src)
|
||||
if err != nil {
|
||||
t.Errorf("--- test: %s\n--- src:\n%q\n--- write error: %v\n", testname, src, err)
|
||||
}
|
||||
if written != len(src) {
|
||||
t.Errorf("--- test: %s\n--- src:\n%q\n--- written = %d, len(src) = %d\n", testname, src, written, len(src))
|
||||
}
|
||||
}
|
||||
|
||||
func verify(t *testing.T, testname string, w *Writer, b *buffer, src, expected string) {
|
||||
err := w.Flush()
|
||||
if err != nil {
|
||||
t.Errorf("--- test: %s\n--- src:\n%q\n--- flush error: %v\n", testname, src, err)
|
||||
}
|
||||
|
||||
res := b.String()
|
||||
if res != expected {
|
||||
t.Errorf("--- test: %s\n--- src:\n%q\n--- found:\n%q\n--- expected:\n%q\n", testname, src, res, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func check(t *testing.T, testname string, minwidth, tabwidth, padding int, padchar byte, flags uint, src, expected string) {
|
||||
var b buffer
|
||||
b.init(1000)
|
||||
|
||||
var w Writer
|
||||
w.Init(&b, minwidth, tabwidth, padding, padchar, flags)
|
||||
|
||||
// write all at once
|
||||
title := testname + " (written all at once)"
|
||||
b.clear()
|
||||
write(t, title, &w, src)
|
||||
verify(t, title, &w, &b, src, expected)
|
||||
|
||||
// write byte-by-byte
|
||||
title = testname + " (written byte-by-byte)"
|
||||
b.clear()
|
||||
for i := 0; i < len(src); i++ {
|
||||
write(t, title, &w, src[i:i+1])
|
||||
}
|
||||
verify(t, title, &w, &b, src, expected)
|
||||
|
||||
// write using Fibonacci slice sizes
|
||||
title = testname + " (written in fibonacci slices)"
|
||||
b.clear()
|
||||
for i, d := 0, 0; i < len(src); {
|
||||
write(t, title, &w, src[i:i+d])
|
||||
i, d = i+d, d+1
|
||||
if i+d > len(src) {
|
||||
d = len(src) - i
|
||||
}
|
||||
}
|
||||
verify(t, title, &w, &b, src, expected)
|
||||
}
|
||||
|
||||
var tests = []struct {
|
||||
testname string
|
||||
minwidth, tabwidth, padding int
|
||||
padchar byte
|
||||
flags uint
|
||||
src, expected string
|
||||
}{
|
||||
{
|
||||
"1a",
|
||||
8, 0, 1, '.', 0,
|
||||
"",
|
||||
"",
|
||||
},
|
||||
|
||||
{
|
||||
"1a debug",
|
||||
8, 0, 1, '.', Debug,
|
||||
"",
|
||||
"",
|
||||
},
|
||||
|
||||
{
|
||||
"1b esc stripped",
|
||||
8, 0, 1, '.', StripEscape,
|
||||
"\xff\xff",
|
||||
"",
|
||||
},
|
||||
|
||||
{
|
||||
"1b esc",
|
||||
8, 0, 1, '.', 0,
|
||||
"\xff\xff",
|
||||
"\xff\xff",
|
||||
},
|
||||
|
||||
{
|
||||
"1c esc stripped",
|
||||
8, 0, 1, '.', StripEscape,
|
||||
"\xff\t\xff",
|
||||
"\t",
|
||||
},
|
||||
|
||||
{
|
||||
"1c esc",
|
||||
8, 0, 1, '.', 0,
|
||||
"\xff\t\xff",
|
||||
"\xff\t\xff",
|
||||
},
|
||||
|
||||
{
|
||||
"1d esc stripped",
|
||||
8, 0, 1, '.', StripEscape,
|
||||
"\xff\"foo\t\n\tbar\"\xff",
|
||||
"\"foo\t\n\tbar\"",
|
||||
},
|
||||
|
||||
{
|
||||
"1d esc",
|
||||
8, 0, 1, '.', 0,
|
||||
"\xff\"foo\t\n\tbar\"\xff",
|
||||
"\xff\"foo\t\n\tbar\"\xff",
|
||||
},
|
||||
|
||||
{
|
||||
"1e esc stripped",
|
||||
8, 0, 1, '.', StripEscape,
|
||||
"abc\xff\tdef", // unterminated escape
|
||||
"abc\tdef",
|
||||
},
|
||||
|
||||
{
|
||||
"1e esc",
|
||||
8, 0, 1, '.', 0,
|
||||
"abc\xff\tdef", // unterminated escape
|
||||
"abc\xff\tdef",
|
||||
},
|
||||
|
||||
{
|
||||
"2",
|
||||
8, 0, 1, '.', 0,
|
||||
"\n\n\n",
|
||||
"\n\n\n",
|
||||
},
|
||||
|
||||
{
|
||||
"3",
|
||||
8, 0, 1, '.', 0,
|
||||
"a\nb\nc",
|
||||
"a\nb\nc",
|
||||
},
|
||||
|
||||
{
|
||||
"4a",
|
||||
8, 0, 1, '.', 0,
|
||||
"\t", // '\t' terminates an empty cell on last line - nothing to print
|
||||
"",
|
||||
},
|
||||
|
||||
{
|
||||
"4b",
|
||||
8, 0, 1, '.', AlignRight,
|
||||
"\t", // '\t' terminates an empty cell on last line - nothing to print
|
||||
"",
|
||||
},
|
||||
|
||||
{
|
||||
"5",
|
||||
8, 0, 1, '.', 0,
|
||||
"*\t*",
|
||||
"*.......*",
|
||||
},
|
||||
|
||||
{
|
||||
"5b",
|
||||
8, 0, 1, '.', 0,
|
||||
"*\t*\n",
|
||||
"*.......*\n",
|
||||
},
|
||||
|
||||
{
|
||||
"5c",
|
||||
8, 0, 1, '.', 0,
|
||||
"*\t*\t",
|
||||
"*.......*",
|
||||
},
|
||||
|
||||
{
|
||||
"5c debug",
|
||||
8, 0, 1, '.', Debug,
|
||||
"*\t*\t",
|
||||
"*.......|*",
|
||||
},
|
||||
|
||||
{
|
||||
"5d",
|
||||
8, 0, 1, '.', AlignRight,
|
||||
"*\t*\t",
|
||||
".......**",
|
||||
},
|
||||
|
||||
{
|
||||
"6",
|
||||
8, 0, 1, '.', 0,
|
||||
"\t\n",
|
||||
"........\n",
|
||||
},
|
||||
|
||||
{
|
||||
"7a",
|
||||
8, 0, 1, '.', 0,
|
||||
"a) foo",
|
||||
"a) foo",
|
||||
},
|
||||
|
||||
{
|
||||
"7b",
|
||||
8, 0, 1, ' ', 0,
|
||||
"b) foo\tbar",
|
||||
"b) foo bar",
|
||||
},
|
||||
|
||||
{
|
||||
"7c",
|
||||
8, 0, 1, '.', 0,
|
||||
"c) foo\tbar\t",
|
||||
"c) foo..bar",
|
||||
},
|
||||
|
||||
{
|
||||
"7d",
|
||||
8, 0, 1, '.', 0,
|
||||
"d) foo\tbar\n",
|
||||
"d) foo..bar\n",
|
||||
},
|
||||
|
||||
{
|
||||
"7e",
|
||||
8, 0, 1, '.', 0,
|
||||
"e) foo\tbar\t\n",
|
||||
"e) foo..bar.....\n",
|
||||
},
|
||||
|
||||
{
|
||||
"7f",
|
||||
8, 0, 1, '.', FilterHTML,
|
||||
"f) f<o\t<b>bar</b>\t\n",
|
||||
"f) f<o..<b>bar</b>.....\n",
|
||||
},
|
||||
|
||||
{
|
||||
"7g",
|
||||
8, 0, 1, '.', FilterHTML,
|
||||
"g) f<o\t<b>bar</b>\t non-terminated entity &",
|
||||
"g) f<o..<b>bar</b>..... non-terminated entity &",
|
||||
},
|
||||
|
||||
{
|
||||
"7g debug",
|
||||
8, 0, 1, '.', FilterHTML | Debug,
|
||||
"g) f<o\t<b>bar</b>\t non-terminated entity &",
|
||||
"g) f<o..|<b>bar</b>.....| non-terminated entity &",
|
||||
},
|
||||
|
||||
{
|
||||
"8",
|
||||
8, 0, 1, '*', 0,
|
||||
"Hello, world!\n",
|
||||
"Hello, world!\n",
|
||||
},
|
||||
|
||||
{
|
||||
"9a",
|
||||
1, 0, 0, '.', 0,
|
||||
"1\t2\t3\t4\n" +
|
||||
"11\t222\t3333\t44444\n",
|
||||
|
||||
"1.2..3...4\n" +
|
||||
"11222333344444\n",
|
||||
},
|
||||
|
||||
{
|
||||
"9b",
|
||||
1, 0, 0, '.', FilterHTML,
|
||||
"1\t2<!---\f--->\t3\t4\n" + // \f inside HTML is ignored
|
||||
"11\t222\t3333\t44444\n",
|
||||
|
||||
"1.2<!---\f--->..3...4\n" +
|
||||
"11222333344444\n",
|
||||
},
|
||||
|
||||
{
|
||||
"9c",
|
||||
1, 0, 0, '.', 0,
|
||||
"1\t2\t3\t4\f" + // \f causes a newline and flush
|
||||
"11\t222\t3333\t44444\n",
|
||||
|
||||
"1234\n" +
|
||||
"11222333344444\n",
|
||||
},
|
||||
|
||||
{
|
||||
"9c debug",
|
||||
1, 0, 0, '.', Debug,
|
||||
"1\t2\t3\t4\f" + // \f causes a newline and flush
|
||||
"11\t222\t3333\t44444\n",
|
||||
|
||||
"1|2|3|4\n" +
|
||||
"---\n" +
|
||||
"11|222|3333|44444\n",
|
||||
},
|
||||
|
||||
{
|
||||
"10a",
|
||||
5, 0, 0, '.', 0,
|
||||
"1\t2\t3\t4\n",
|
||||
"1....2....3....4\n",
|
||||
},
|
||||
|
||||
{
|
||||
"10b",
|
||||
5, 0, 0, '.', 0,
|
||||
"1\t2\t3\t4\t\n",
|
||||
"1....2....3....4....\n",
|
||||
},
|
||||
|
||||
{
|
||||
"11",
|
||||
8, 0, 1, '.', 0,
|
||||
"本\tb\tc\n" +
|
||||
"aa\t\u672c\u672c\u672c\tcccc\tddddd\n" +
|
||||
"aaa\tbbbb\n",
|
||||
|
||||
"本.......b.......c\n" +
|
||||
"aa......本本本.....cccc....ddddd\n" +
|
||||
"aaa.....bbbb\n",
|
||||
},
|
||||
|
||||
{
|
||||
"12a",
|
||||
8, 0, 1, ' ', AlignRight,
|
||||
"a\tè\tc\t\n" +
|
||||
"aa\tèèè\tcccc\tddddd\t\n" +
|
||||
"aaa\tèèèè\t\n",
|
||||
|
||||
" a è c\n" +
|
||||
" aa èèè cccc ddddd\n" +
|
||||
" aaa èèèè\n",
|
||||
},
|
||||
|
||||
{
|
||||
"12b",
|
||||
2, 0, 0, ' ', 0,
|
||||
"a\tb\tc\n" +
|
||||
"aa\tbbb\tcccc\n" +
|
||||
"aaa\tbbbb\n",
|
||||
|
||||
"a b c\n" +
|
||||
"aa bbbcccc\n" +
|
||||
"aaabbbb\n",
|
||||
},
|
||||
|
||||
{
|
||||
"12c",
|
||||
8, 0, 1, '_', 0,
|
||||
"a\tb\tc\n" +
|
||||
"aa\tbbb\tcccc\n" +
|
||||
"aaa\tbbbb\n",
|
||||
|
||||
"a_______b_______c\n" +
|
||||
"aa______bbb_____cccc\n" +
|
||||
"aaa_____bbbb\n",
|
||||
},
|
||||
|
||||
{
|
||||
"13a",
|
||||
4, 0, 1, '-', 0,
|
||||
"4444\t日本語\t22\t1\t333\n" +
|
||||
"999999999\t22\n" +
|
||||
"7\t22\n" +
|
||||
"\t\t\t88888888\n" +
|
||||
"\n" +
|
||||
"666666\t666666\t666666\t4444\n" +
|
||||
"1\t1\t999999999\t0000000000\n",
|
||||
|
||||
"4444------日本語-22--1---333\n" +
|
||||
"999999999-22\n" +
|
||||
"7---------22\n" +
|
||||
"------------------88888888\n" +
|
||||
"\n" +
|
||||
"666666-666666-666666----4444\n" +
|
||||
"1------1------999999999-0000000000\n",
|
||||
},
|
||||
|
||||
{
|
||||
"13b",
|
||||
4, 0, 3, '.', 0,
|
||||
"4444\t333\t22\t1\t333\n" +
|
||||
"999999999\t22\n" +
|
||||
"7\t22\n" +
|
||||
"\t\t\t88888888\n" +
|
||||
"\n" +
|
||||
"666666\t666666\t666666\t4444\n" +
|
||||
"1\t1\t999999999\t0000000000\n",
|
||||
|
||||
"4444........333...22...1...333\n" +
|
||||
"999999999...22\n" +
|
||||
"7...........22\n" +
|
||||
"....................88888888\n" +
|
||||
"\n" +
|
||||
"666666...666666...666666......4444\n" +
|
||||
"1........1........999999999...0000000000\n",
|
||||
},
|
||||
|
||||
{
|
||||
"13c",
|
||||
8, 8, 1, '\t', FilterHTML,
|
||||
"4444\t333\t22\t1\t333\n" +
|
||||
"999999999\t22\n" +
|
||||
"7\t22\n" +
|
||||
"\t\t\t88888888\n" +
|
||||
"\n" +
|
||||
"666666\t666666\t666666\t4444\n" +
|
||||
"1\t1\t<font color=red attr=日本語>999999999</font>\t0000000000\n",
|
||||
|
||||
"4444\t\t333\t22\t1\t333\n" +
|
||||
"999999999\t22\n" +
|
||||
"7\t\t22\n" +
|
||||
"\t\t\t\t88888888\n" +
|
||||
"\n" +
|
||||
"666666\t666666\t666666\t\t4444\n" +
|
||||
"1\t1\t<font color=red attr=日本語>999999999</font>\t0000000000\n",
|
||||
},
|
||||
|
||||
{
|
||||
"14",
|
||||
1, 0, 2, ' ', AlignRight,
|
||||
".0\t.3\t2.4\t-5.1\t\n" +
|
||||
"23.0\t12345678.9\t2.4\t-989.4\t\n" +
|
||||
"5.1\t12.0\t2.4\t-7.0\t\n" +
|
||||
".0\t0.0\t332.0\t8908.0\t\n" +
|
||||
".0\t-.3\t456.4\t22.1\t\n" +
|
||||
".0\t1.2\t44.4\t-13.3\t\t",
|
||||
|
||||
" .0 .3 2.4 -5.1\n" +
|
||||
" 23.0 12345678.9 2.4 -989.4\n" +
|
||||
" 5.1 12.0 2.4 -7.0\n" +
|
||||
" .0 0.0 332.0 8908.0\n" +
|
||||
" .0 -.3 456.4 22.1\n" +
|
||||
" .0 1.2 44.4 -13.3",
|
||||
},
|
||||
|
||||
{
|
||||
"14 debug",
|
||||
1, 0, 2, ' ', AlignRight | Debug,
|
||||
".0\t.3\t2.4\t-5.1\t\n" +
|
||||
"23.0\t12345678.9\t2.4\t-989.4\t\n" +
|
||||
"5.1\t12.0\t2.4\t-7.0\t\n" +
|
||||
".0\t0.0\t332.0\t8908.0\t\n" +
|
||||
".0\t-.3\t456.4\t22.1\t\n" +
|
||||
".0\t1.2\t44.4\t-13.3\t\t",
|
||||
|
||||
" .0| .3| 2.4| -5.1|\n" +
|
||||
" 23.0| 12345678.9| 2.4| -989.4|\n" +
|
||||
" 5.1| 12.0| 2.4| -7.0|\n" +
|
||||
" .0| 0.0| 332.0| 8908.0|\n" +
|
||||
" .0| -.3| 456.4| 22.1|\n" +
|
||||
" .0| 1.2| 44.4| -13.3|",
|
||||
},
|
||||
|
||||
{
|
||||
"15a",
|
||||
4, 0, 0, '.', 0,
|
||||
"a\t\tb",
|
||||
"a.......b",
|
||||
},
|
||||
|
||||
{
|
||||
"15b",
|
||||
4, 0, 0, '.', DiscardEmptyColumns,
|
||||
"a\t\tb", // htabs - do not discard column
|
||||
"a.......b",
|
||||
},
|
||||
|
||||
{
|
||||
"15c",
|
||||
4, 0, 0, '.', DiscardEmptyColumns,
|
||||
"a\v\vb",
|
||||
"a...b",
|
||||
},
|
||||
|
||||
{
|
||||
"15d",
|
||||
4, 0, 0, '.', AlignRight | DiscardEmptyColumns,
|
||||
"a\v\vb",
|
||||
"...ab",
|
||||
},
|
||||
|
||||
{
|
||||
"16a",
|
||||
100, 100, 0, '\t', 0,
|
||||
"a\tb\t\td\n" +
|
||||
"a\tb\t\td\te\n" +
|
||||
"a\n" +
|
||||
"a\tb\tc\td\n" +
|
||||
"a\tb\tc\td\te\n",
|
||||
|
||||
"a\tb\t\td\n" +
|
||||
"a\tb\t\td\te\n" +
|
||||
"a\n" +
|
||||
"a\tb\tc\td\n" +
|
||||
"a\tb\tc\td\te\n",
|
||||
},
|
||||
|
||||
{
|
||||
"16b",
|
||||
100, 100, 0, '\t', DiscardEmptyColumns,
|
||||
"a\vb\v\vd\n" +
|
||||
"a\vb\v\vd\ve\n" +
|
||||
"a\n" +
|
||||
"a\vb\vc\vd\n" +
|
||||
"a\vb\vc\vd\ve\n",
|
||||
|
||||
"a\tb\td\n" +
|
||||
"a\tb\td\te\n" +
|
||||
"a\n" +
|
||||
"a\tb\tc\td\n" +
|
||||
"a\tb\tc\td\te\n",
|
||||
},
|
||||
|
||||
{
|
||||
"16b debug",
|
||||
100, 100, 0, '\t', DiscardEmptyColumns | Debug,
|
||||
"a\vb\v\vd\n" +
|
||||
"a\vb\v\vd\ve\n" +
|
||||
"a\n" +
|
||||
"a\vb\vc\vd\n" +
|
||||
"a\vb\vc\vd\ve\n",
|
||||
|
||||
"a\t|b\t||d\n" +
|
||||
"a\t|b\t||d\t|e\n" +
|
||||
"a\n" +
|
||||
"a\t|b\t|c\t|d\n" +
|
||||
"a\t|b\t|c\t|d\t|e\n",
|
||||
},
|
||||
|
||||
{
|
||||
"16c",
|
||||
100, 100, 0, '\t', DiscardEmptyColumns,
|
||||
"a\tb\t\td\n" + // hard tabs - do not discard column
|
||||
"a\tb\t\td\te\n" +
|
||||
"a\n" +
|
||||
"a\tb\tc\td\n" +
|
||||
"a\tb\tc\td\te\n",
|
||||
|
||||
"a\tb\t\td\n" +
|
||||
"a\tb\t\td\te\n" +
|
||||
"a\n" +
|
||||
"a\tb\tc\td\n" +
|
||||
"a\tb\tc\td\te\n",
|
||||
},
|
||||
|
||||
{
|
||||
"16c debug",
|
||||
100, 100, 0, '\t', DiscardEmptyColumns | Debug,
|
||||
"a\tb\t\td\n" + // hard tabs - do not discard column
|
||||
"a\tb\t\td\te\n" +
|
||||
"a\n" +
|
||||
"a\tb\tc\td\n" +
|
||||
"a\tb\tc\td\te\n",
|
||||
|
||||
"a\t|b\t|\t|d\n" +
|
||||
"a\t|b\t|\t|d\t|e\n" +
|
||||
"a\n" +
|
||||
"a\t|b\t|c\t|d\n" +
|
||||
"a\t|b\t|c\t|d\t|e\n",
|
||||
},
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
for _, e := range tests {
|
||||
check(t, e.testname, e.minwidth, e.tabwidth, e.padding, e.padchar, e.flags, e.src, e.expected)
|
||||
}
|
||||
}
|
||||
|
||||
type panicWriter struct{}
|
||||
|
||||
func (panicWriter) Write([]byte) (int, error) {
|
||||
panic("cannot write")
|
||||
}
|
||||
|
||||
func wantPanicString(t *testing.T, want string) {
|
||||
if e := recover(); e != nil {
|
||||
got, ok := e.(string)
|
||||
switch {
|
||||
case !ok:
|
||||
t.Errorf("got %v (%T), want panic string", e, e)
|
||||
case got != want:
|
||||
t.Errorf("wrong panic message: got %q, want %q", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPanicDuringFlush(t *testing.T) {
|
||||
defer wantPanicString(t, "tabwriter: panic during Flush (cannot write)")
|
||||
var p panicWriter
|
||||
w := new(Writer)
|
||||
w.Init(p, 0, 0, 5, ' ', 0)
|
||||
io.WriteString(w, "a")
|
||||
w.Flush()
|
||||
t.Errorf("failed to panic during Flush")
|
||||
}
|
||||
|
||||
func TestPanicDuringWrite(t *testing.T) {
|
||||
defer wantPanicString(t, "tabwriter: panic during Write (cannot write)")
|
||||
var p panicWriter
|
||||
w := new(Writer)
|
||||
w.Init(p, 0, 0, 5, ' ', 0)
|
||||
io.WriteString(w, "a\n\n") // the second \n triggers a call to w.Write and thus a panic
|
||||
t.Errorf("failed to panic during Write")
|
||||
}
|
||||
|
||||
func BenchmarkTable(b *testing.B) {
|
||||
for _, w := range [...]int{1, 10, 100} {
|
||||
// Build a line with w cells.
|
||||
line := bytes.Repeat([]byte("a\t"), w)
|
||||
line = append(line, '\n')
|
||||
for _, h := range [...]int{10, 1000, 100000} {
|
||||
b.Run(fmt.Sprintf("%dx%d", w, h), func(b *testing.B) {
|
||||
b.Run("new", func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
w := NewWriter(io.Discard, 4, 4, 1, ' ', 0) // no particular reason for these settings
|
||||
// Write the line h times.
|
||||
for j := 0; j < h; j++ {
|
||||
w.Write(line)
|
||||
}
|
||||
w.Flush()
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("reuse", func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
w := NewWriter(io.Discard, 4, 4, 1, ' ', 0) // no particular reason for these settings
|
||||
for i := 0; i < b.N; i++ {
|
||||
// Write the line h times.
|
||||
for j := 0; j < h; j++ {
|
||||
w.Write(line)
|
||||
}
|
||||
w.Flush()
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkPyramid(b *testing.B) {
|
||||
for _, x := range [...]int{10, 100, 1000} {
|
||||
// Build a line with x cells.
|
||||
line := bytes.Repeat([]byte("a\t"), x)
|
||||
b.Run(fmt.Sprintf("%d", x), func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
w := NewWriter(io.Discard, 4, 4, 1, ' ', 0) // no particular reason for these settings
|
||||
// Write increasing prefixes of that line.
|
||||
for j := 0; j < x; j++ {
|
||||
w.Write(line[:j*2])
|
||||
w.Write([]byte{'\n'})
|
||||
}
|
||||
w.Flush()
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRagged(b *testing.B) {
|
||||
var lines [8][]byte
|
||||
for i, w := range [8]int{6, 2, 9, 5, 5, 7, 3, 8} {
|
||||
// Build a line with w cells.
|
||||
lines[i] = bytes.Repeat([]byte("a\t"), w)
|
||||
}
|
||||
for _, h := range [...]int{10, 100, 1000} {
|
||||
b.Run(fmt.Sprintf("%d", h), func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
w := NewWriter(io.Discard, 4, 4, 1, ' ', 0) // no particular reason for these settings
|
||||
// Write the lines in turn h times.
|
||||
for j := 0; j < h; j++ {
|
||||
w.Write(lines[j%len(lines)])
|
||||
w.Write([]byte{'\n'})
|
||||
}
|
||||
w.Flush()
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const codeSnippet = `
|
||||
some command
|
||||
|
||||
foo # aligned
|
||||
barbaz # comments
|
||||
|
||||
but
|
||||
mostly
|
||||
single
|
||||
cell
|
||||
lines
|
||||
`
|
||||
|
||||
func BenchmarkCode(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
w := NewWriter(io.Discard, 4, 4, 1, ' ', 0) // no particular reason for these settings
|
||||
// The code is small, so it's reasonable for the tabwriter user
|
||||
// to write it all at once, or buffer the writes.
|
||||
w.Write([]byte(codeSnippet))
|
||||
w.Flush()
|
||||
}
|
||||
}
|
||||
471
src/text/template/doc.go
Normal file
471
src/text/template/doc.go
Normal file
@@ -0,0 +1,471 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package template implements data-driven templates for generating textual output.
|
||||
|
||||
To generate HTML output, see [html/template], which has the same interface
|
||||
as this package but automatically secures HTML output against certain attacks.
|
||||
|
||||
Templates are executed by applying them to a data structure. Annotations in the
|
||||
template refer to elements of the data structure (typically a field of a struct
|
||||
or a key in a map) to control execution and derive values to be displayed.
|
||||
Execution of the template walks the structure and sets the cursor, represented
|
||||
by a period '.' and called "dot", to the value at the current location in the
|
||||
structure as execution proceeds.
|
||||
|
||||
The input text for a template is UTF-8-encoded text in any format.
|
||||
"Actions"--data evaluations or control structures--are delimited by
|
||||
"{{" and "}}"; all text outside actions is copied to the output unchanged.
|
||||
|
||||
Once parsed, a template may be executed safely in parallel, although if parallel
|
||||
executions share a Writer the output may be interleaved.
|
||||
|
||||
Here is a trivial example that prints "17 items are made of wool".
|
||||
|
||||
type Inventory struct {
|
||||
Material string
|
||||
Count uint
|
||||
}
|
||||
sweaters := Inventory{"wool", 17}
|
||||
tmpl, err := template.New("test").Parse("{{.Count}} items are made of {{.Material}}")
|
||||
if err != nil { panic(err) }
|
||||
err = tmpl.Execute(os.Stdout, sweaters)
|
||||
if err != nil { panic(err) }
|
||||
|
||||
More intricate examples appear below.
|
||||
|
||||
Text and spaces
|
||||
|
||||
By default, all text between actions is copied verbatim when the template is
|
||||
executed. For example, the string " items are made of " in the example above
|
||||
appears on standard output when the program is run.
|
||||
|
||||
However, to aid in formatting template source code, if an action's left
|
||||
delimiter (by default "{{") is followed immediately by a minus sign and white
|
||||
space, all trailing white space is trimmed from the immediately preceding text.
|
||||
Similarly, if the right delimiter ("}}") is preceded by white space and a minus
|
||||
sign, all leading white space is trimmed from the immediately following text.
|
||||
In these trim markers, the white space must be present:
|
||||
"{{- 3}}" is like "{{3}}" but trims the immediately preceding text, while
|
||||
"{{-3}}" parses as an action containing the number -3.
|
||||
|
||||
For instance, when executing the template whose source is
|
||||
|
||||
"{{23 -}} < {{- 45}}"
|
||||
|
||||
the generated output would be
|
||||
|
||||
"23<45"
|
||||
|
||||
For this trimming, the definition of white space characters is the same as in Go:
|
||||
space, horizontal tab, carriage return, and newline.
|
||||
|
||||
Actions
|
||||
|
||||
Here is the list of actions. "Arguments" and "pipelines" are evaluations of
|
||||
data, defined in detail in the corresponding sections that follow.
|
||||
|
||||
*/
|
||||
// {{/* a comment */}}
|
||||
// {{- /* a comment with white space trimmed from preceding and following text */ -}}
|
||||
// A comment; discarded. May contain newlines.
|
||||
// Comments do not nest and must start and end at the
|
||||
// delimiters, as shown here.
|
||||
/*
|
||||
|
||||
{{pipeline}}
|
||||
The default textual representation (the same as would be
|
||||
printed by fmt.Print) of the value of the pipeline is copied
|
||||
to the output.
|
||||
|
||||
{{if pipeline}} T1 {{end}}
|
||||
If the value of the pipeline is empty, no output is generated;
|
||||
otherwise, T1 is executed. The empty values are false, 0, any
|
||||
nil pointer or interface value, and any array, slice, map, or
|
||||
string of length zero.
|
||||
Dot is unaffected.
|
||||
|
||||
{{if pipeline}} T1 {{else}} T0 {{end}}
|
||||
If the value of the pipeline is empty, T0 is executed;
|
||||
otherwise, T1 is executed. Dot is unaffected.
|
||||
|
||||
{{if pipeline}} T1 {{else if pipeline}} T0 {{end}}
|
||||
To simplify the appearance of if-else chains, the else action
|
||||
of an if may include another if directly; the effect is exactly
|
||||
the same as writing
|
||||
{{if pipeline}} T1 {{else}}{{if pipeline}} T0 {{end}}{{end}}
|
||||
|
||||
{{range pipeline}} T1 {{end}}
|
||||
The value of the pipeline must be an array, slice, map, or channel.
|
||||
If the value of the pipeline has length zero, nothing is output;
|
||||
otherwise, dot is set to the successive elements of the array,
|
||||
slice, or map and T1 is executed. If the value is a map and the
|
||||
keys are of basic type with a defined order, the elements will be
|
||||
visited in sorted key order.
|
||||
|
||||
{{range pipeline}} T1 {{else}} T0 {{end}}
|
||||
The value of the pipeline must be an array, slice, map, or channel.
|
||||
If the value of the pipeline has length zero, dot is unaffected and
|
||||
T0 is executed; otherwise, dot is set to the successive elements
|
||||
of the array, slice, or map and T1 is executed.
|
||||
|
||||
{{break}}
|
||||
The innermost {{range pipeline}} loop is ended early, stopping the
|
||||
current iteration and bypassing all remaining iterations.
|
||||
|
||||
{{continue}}
|
||||
The current iteration of the innermost {{range pipeline}} loop is
|
||||
stopped, and the loop starts the next iteration.
|
||||
|
||||
{{template "name"}}
|
||||
The template with the specified name is executed with nil data.
|
||||
|
||||
{{template "name" pipeline}}
|
||||
The template with the specified name is executed with dot set
|
||||
to the value of the pipeline.
|
||||
|
||||
{{block "name" pipeline}} T1 {{end}}
|
||||
A block is shorthand for defining a template
|
||||
{{define "name"}} T1 {{end}}
|
||||
and then executing it in place
|
||||
{{template "name" pipeline}}
|
||||
The typical use is to define a set of root templates that are
|
||||
then customized by redefining the block templates within.
|
||||
|
||||
{{with pipeline}} T1 {{end}}
|
||||
If the value of the pipeline is empty, no output is generated;
|
||||
otherwise, dot is set to the value of the pipeline and T1 is
|
||||
executed.
|
||||
|
||||
{{with pipeline}} T1 {{else}} T0 {{end}}
|
||||
If the value of the pipeline is empty, dot is unaffected and T0
|
||||
is executed; otherwise, dot is set to the value of the pipeline
|
||||
and T1 is executed.
|
||||
|
||||
{{with pipeline}} T1 {{else with pipeline}} T0 {{end}}
|
||||
To simplify the appearance of with-else chains, the else action
|
||||
of a with may include another with directly; the effect is exactly
|
||||
the same as writing
|
||||
{{with pipeline}} T1 {{else}}{{with pipeline}} T0 {{end}}{{end}}
|
||||
|
||||
|
||||
Arguments
|
||||
|
||||
An argument is a simple value, denoted by one of the following.
|
||||
|
||||
- A boolean, string, character, integer, floating-point, imaginary
|
||||
or complex constant in Go syntax. These behave like Go's untyped
|
||||
constants. Note that, as in Go, whether a large integer constant
|
||||
overflows when assigned or passed to a function can depend on whether
|
||||
the host machine's ints are 32 or 64 bits.
|
||||
- The keyword nil, representing an untyped Go nil.
|
||||
- The character '.' (period):
|
||||
.
|
||||
The result is the value of dot.
|
||||
- A variable name, which is a (possibly empty) alphanumeric string
|
||||
preceded by a dollar sign, such as
|
||||
$piOver2
|
||||
or
|
||||
$
|
||||
The result is the value of the variable.
|
||||
Variables are described below.
|
||||
- The name of a field of the data, which must be a struct, preceded
|
||||
by a period, such as
|
||||
.Field
|
||||
The result is the value of the field. Field invocations may be
|
||||
chained:
|
||||
.Field1.Field2
|
||||
Fields can also be evaluated on variables, including chaining:
|
||||
$x.Field1.Field2
|
||||
- The name of a key of the data, which must be a map, preceded
|
||||
by a period, such as
|
||||
.Key
|
||||
The result is the map element value indexed by the key.
|
||||
Key invocations may be chained and combined with fields to any
|
||||
depth:
|
||||
.Field1.Key1.Field2.Key2
|
||||
Although the key must be an alphanumeric identifier, unlike with
|
||||
field names they do not need to start with an upper case letter.
|
||||
Keys can also be evaluated on variables, including chaining:
|
||||
$x.key1.key2
|
||||
- The name of a niladic method of the data, preceded by a period,
|
||||
such as
|
||||
.Method
|
||||
The result is the value of invoking the method with dot as the
|
||||
receiver, dot.Method(). Such a method must have one return value (of
|
||||
any type) or two return values, the second of which is an error.
|
||||
If it has two and the returned error is non-nil, execution terminates
|
||||
and an error is returned to the caller as the value of Execute.
|
||||
Method invocations may be chained and combined with fields and keys
|
||||
to any depth:
|
||||
.Field1.Key1.Method1.Field2.Key2.Method2
|
||||
Methods can also be evaluated on variables, including chaining:
|
||||
$x.Method1.Field
|
||||
- The name of a niladic function, such as
|
||||
fun
|
||||
The result is the value of invoking the function, fun(). The return
|
||||
types and values behave as in methods. Functions and function
|
||||
names are described below.
|
||||
- A parenthesized instance of one the above, for grouping. The result
|
||||
may be accessed by a field or map key invocation.
|
||||
print (.F1 arg1) (.F2 arg2)
|
||||
(.StructValuedMethod "arg").Field
|
||||
|
||||
Arguments may evaluate to any type; if they are pointers the implementation
|
||||
automatically indirects to the base type when required.
|
||||
If an evaluation yields a function value, such as a function-valued
|
||||
field of a struct, the function is not invoked automatically, but it
|
||||
can be used as a truth value for an if action and the like. To invoke
|
||||
it, use the call function, defined below.
|
||||
|
||||
Pipelines
|
||||
|
||||
A pipeline is a possibly chained sequence of "commands". A command is a simple
|
||||
value (argument) or a function or method call, possibly with multiple arguments:
|
||||
|
||||
Argument
|
||||
The result is the value of evaluating the argument.
|
||||
.Method [Argument...]
|
||||
The method can be alone or the last element of a chain but,
|
||||
unlike methods in the middle of a chain, it can take arguments.
|
||||
The result is the value of calling the method with the
|
||||
arguments:
|
||||
dot.Method(Argument1, etc.)
|
||||
functionName [Argument...]
|
||||
The result is the value of calling the function associated
|
||||
with the name:
|
||||
function(Argument1, etc.)
|
||||
Functions and function names are described below.
|
||||
|
||||
A pipeline may be "chained" by separating a sequence of commands with pipeline
|
||||
characters '|'. In a chained pipeline, the result of each command is
|
||||
passed as the last argument of the following command. The output of the final
|
||||
command in the pipeline is the value of the pipeline.
|
||||
|
||||
The output of a command will be either one value or two values, the second of
|
||||
which has type error. If that second value is present and evaluates to
|
||||
non-nil, execution terminates and the error is returned to the caller of
|
||||
Execute.
|
||||
|
||||
Variables
|
||||
|
||||
A pipeline inside an action may initialize a variable to capture the result.
|
||||
The initialization has syntax
|
||||
|
||||
$variable := pipeline
|
||||
|
||||
where $variable is the name of the variable. An action that declares a
|
||||
variable produces no output.
|
||||
|
||||
Variables previously declared can also be assigned, using the syntax
|
||||
|
||||
$variable = pipeline
|
||||
|
||||
If a "range" action initializes a variable, the variable is set to the
|
||||
successive elements of the iteration. Also, a "range" may declare two
|
||||
variables, separated by a comma:
|
||||
|
||||
range $index, $element := pipeline
|
||||
|
||||
in which case $index and $element are set to the successive values of the
|
||||
array/slice index or map key and element, respectively. Note that if there is
|
||||
only one variable, it is assigned the element; this is opposite to the
|
||||
convention in Go range clauses.
|
||||
|
||||
A variable's scope extends to the "end" action of the control structure ("if",
|
||||
"with", or "range") in which it is declared, or to the end of the template if
|
||||
there is no such control structure. A template invocation does not inherit
|
||||
variables from the point of its invocation.
|
||||
|
||||
When execution begins, $ is set to the data argument passed to Execute, that is,
|
||||
to the starting value of dot.
|
||||
|
||||
Examples
|
||||
|
||||
Here are some example one-line templates demonstrating pipelines and variables.
|
||||
All produce the quoted word "output":
|
||||
|
||||
{{"\"output\""}}
|
||||
A string constant.
|
||||
{{`"output"`}}
|
||||
A raw string constant.
|
||||
{{printf "%q" "output"}}
|
||||
A function call.
|
||||
{{"output" | printf "%q"}}
|
||||
A function call whose final argument comes from the previous
|
||||
command.
|
||||
{{printf "%q" (print "out" "put")}}
|
||||
A parenthesized argument.
|
||||
{{"put" | printf "%s%s" "out" | printf "%q"}}
|
||||
A more elaborate call.
|
||||
{{"output" | printf "%s" | printf "%q"}}
|
||||
A longer chain.
|
||||
{{with "output"}}{{printf "%q" .}}{{end}}
|
||||
A with action using dot.
|
||||
{{with $x := "output" | printf "%q"}}{{$x}}{{end}}
|
||||
A with action that creates and uses a variable.
|
||||
{{with $x := "output"}}{{printf "%q" $x}}{{end}}
|
||||
A with action that uses the variable in another action.
|
||||
{{with $x := "output"}}{{$x | printf "%q"}}{{end}}
|
||||
The same, but pipelined.
|
||||
|
||||
Functions
|
||||
|
||||
During execution functions are found in two function maps: first in the
|
||||
template, then in the global function map. By default, no functions are defined
|
||||
in the template but the Funcs method can be used to add them.
|
||||
|
||||
Predefined global functions are named as follows.
|
||||
|
||||
and
|
||||
Returns the boolean AND of its arguments by returning the
|
||||
first empty argument or the last argument. That is,
|
||||
"and x y" behaves as "if x then y else x."
|
||||
Evaluation proceeds through the arguments left to right
|
||||
and returns when the result is determined.
|
||||
call
|
||||
Returns the result of calling the first argument, which
|
||||
must be a function, with the remaining arguments as parameters.
|
||||
Thus "call .X.Y 1 2" is, in Go notation, dot.X.Y(1, 2) where
|
||||
Y is a func-valued field, map entry, or the like.
|
||||
The first argument must be the result of an evaluation
|
||||
that yields a value of function type (as distinct from
|
||||
a predefined function such as print). The function must
|
||||
return either one or two result values, the second of which
|
||||
is of type error. If the arguments don't match the function
|
||||
or the returned error value is non-nil, execution stops.
|
||||
html
|
||||
Returns the escaped HTML equivalent of the textual
|
||||
representation of its arguments. This function is unavailable
|
||||
in html/template, with a few exceptions.
|
||||
index
|
||||
Returns the result of indexing its first argument by the
|
||||
following arguments. Thus "index x 1 2 3" is, in Go syntax,
|
||||
x[1][2][3]. Each indexed item must be a map, slice, or array.
|
||||
slice
|
||||
slice returns the result of slicing its first argument by the
|
||||
remaining arguments. Thus "slice x 1 2" is, in Go syntax, x[1:2],
|
||||
while "slice x" is x[:], "slice x 1" is x[1:], and "slice x 1 2 3"
|
||||
is x[1:2:3]. The first argument must be a string, slice, or array.
|
||||
js
|
||||
Returns the escaped JavaScript equivalent of the textual
|
||||
representation of its arguments.
|
||||
len
|
||||
Returns the integer length of its argument.
|
||||
not
|
||||
Returns the boolean negation of its single argument.
|
||||
or
|
||||
Returns the boolean OR of its arguments by returning the
|
||||
first non-empty argument or the last argument, that is,
|
||||
"or x y" behaves as "if x then x else y".
|
||||
Evaluation proceeds through the arguments left to right
|
||||
and returns when the result is determined.
|
||||
print
|
||||
An alias for fmt.Sprint
|
||||
printf
|
||||
An alias for fmt.Sprintf
|
||||
println
|
||||
An alias for fmt.Sprintln
|
||||
urlquery
|
||||
Returns the escaped value of the textual representation of
|
||||
its arguments in a form suitable for embedding in a URL query.
|
||||
This function is unavailable in html/template, with a few
|
||||
exceptions.
|
||||
|
||||
The boolean functions take any zero value to be false and a non-zero
|
||||
value to be true.
|
||||
|
||||
There is also a set of binary comparison operators defined as
|
||||
functions:
|
||||
|
||||
eq
|
||||
Returns the boolean truth of arg1 == arg2
|
||||
ne
|
||||
Returns the boolean truth of arg1 != arg2
|
||||
lt
|
||||
Returns the boolean truth of arg1 < arg2
|
||||
le
|
||||
Returns the boolean truth of arg1 <= arg2
|
||||
gt
|
||||
Returns the boolean truth of arg1 > arg2
|
||||
ge
|
||||
Returns the boolean truth of arg1 >= arg2
|
||||
|
||||
For simpler multi-way equality tests, eq (only) accepts two or more
|
||||
arguments and compares the second and subsequent to the first,
|
||||
returning in effect
|
||||
|
||||
arg1==arg2 || arg1==arg3 || arg1==arg4 ...
|
||||
|
||||
(Unlike with || in Go, however, eq is a function call and all the
|
||||
arguments will be evaluated.)
|
||||
|
||||
The comparison functions work on any values whose type Go defines as
|
||||
comparable. For basic types such as integers, the rules are relaxed:
|
||||
size and exact type are ignored, so any integer value, signed or unsigned,
|
||||
may be compared with any other integer value. (The arithmetic value is compared,
|
||||
not the bit pattern, so all negative integers are less than all unsigned integers.)
|
||||
However, as usual, one may not compare an int with a float32 and so on.
|
||||
|
||||
Associated templates
|
||||
|
||||
Each template is named by a string specified when it is created. Also, each
|
||||
template is associated with zero or more other templates that it may invoke by
|
||||
name; such associations are transitive and form a name space of templates.
|
||||
|
||||
A template may use a template invocation to instantiate another associated
|
||||
template; see the explanation of the "template" action above. The name must be
|
||||
that of a template associated with the template that contains the invocation.
|
||||
|
||||
Nested template definitions
|
||||
|
||||
When parsing a template, another template may be defined and associated with the
|
||||
template being parsed. Template definitions must appear at the top level of the
|
||||
template, much like global variables in a Go program.
|
||||
|
||||
The syntax of such definitions is to surround each template declaration with a
|
||||
"define" and "end" action.
|
||||
|
||||
The define action names the template being created by providing a string
|
||||
constant. Here is a simple example:
|
||||
|
||||
{{define "T1"}}ONE{{end}}
|
||||
{{define "T2"}}TWO{{end}}
|
||||
{{define "T3"}}{{template "T1"}} {{template "T2"}}{{end}}
|
||||
{{template "T3"}}
|
||||
|
||||
This defines two templates, T1 and T2, and a third T3 that invokes the other two
|
||||
when it is executed. Finally it invokes T3. If executed this template will
|
||||
produce the text
|
||||
|
||||
ONE TWO
|
||||
|
||||
By construction, a template may reside in only one association. If it's
|
||||
necessary to have a template addressable from multiple associations, the
|
||||
template definition must be parsed multiple times to create distinct *Template
|
||||
values, or must be copied with [Template.Clone] or [Template.AddParseTree].
|
||||
|
||||
Parse may be called multiple times to assemble the various associated templates;
|
||||
see [ParseFiles], [ParseGlob], [Template.ParseFiles] and [Template.ParseGlob]
|
||||
for simple ways to parse related templates stored in files.
|
||||
|
||||
A template may be executed directly or through [Template.ExecuteTemplate], which executes
|
||||
an associated template identified by name. To invoke our example above, we
|
||||
might write,
|
||||
|
||||
err := tmpl.Execute(os.Stdout, "no data needed")
|
||||
if err != nil {
|
||||
log.Fatalf("execution failed: %s", err)
|
||||
}
|
||||
|
||||
or to invoke a particular template explicitly by name,
|
||||
|
||||
err := tmpl.ExecuteTemplate(os.Stdout, "T2", "no data needed")
|
||||
if err != nil {
|
||||
log.Fatalf("execution failed: %s", err)
|
||||
}
|
||||
|
||||
*/
|
||||
package template
|
||||
110
src/text/template/example_test.go
Normal file
110
src/text/template/example_test.go
Normal file
@@ -0,0 +1,110 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package template_test
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
func ExampleTemplate() {
|
||||
// Define a template.
|
||||
const letter = `
|
||||
Dear {{.Name}},
|
||||
{{if .Attended}}
|
||||
It was a pleasure to see you at the wedding.
|
||||
{{- else}}
|
||||
It is a shame you couldn't make it to the wedding.
|
||||
{{- end}}
|
||||
{{with .Gift -}}
|
||||
Thank you for the lovely {{.}}.
|
||||
{{end}}
|
||||
Best wishes,
|
||||
Josie
|
||||
`
|
||||
|
||||
// Prepare some data to insert into the template.
|
||||
type Recipient struct {
|
||||
Name, Gift string
|
||||
Attended bool
|
||||
}
|
||||
var recipients = []Recipient{
|
||||
{"Aunt Mildred", "bone china tea set", true},
|
||||
{"Uncle John", "moleskin pants", false},
|
||||
{"Cousin Rodney", "", false},
|
||||
}
|
||||
|
||||
// Create a new template and parse the letter into it.
|
||||
t := template.Must(template.New("letter").Parse(letter))
|
||||
|
||||
// Execute the template for each recipient.
|
||||
for _, r := range recipients {
|
||||
err := t.Execute(os.Stdout, r)
|
||||
if err != nil {
|
||||
log.Println("executing template:", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Output:
|
||||
// Dear Aunt Mildred,
|
||||
//
|
||||
// It was a pleasure to see you at the wedding.
|
||||
// Thank you for the lovely bone china tea set.
|
||||
//
|
||||
// Best wishes,
|
||||
// Josie
|
||||
//
|
||||
// Dear Uncle John,
|
||||
//
|
||||
// It is a shame you couldn't make it to the wedding.
|
||||
// Thank you for the lovely moleskin pants.
|
||||
//
|
||||
// Best wishes,
|
||||
// Josie
|
||||
//
|
||||
// Dear Cousin Rodney,
|
||||
//
|
||||
// It is a shame you couldn't make it to the wedding.
|
||||
//
|
||||
// Best wishes,
|
||||
// Josie
|
||||
}
|
||||
|
||||
// The following example is duplicated in html/template; keep them in sync.
|
||||
|
||||
func ExampleTemplate_block() {
|
||||
const (
|
||||
master = `Names:{{block "list" .}}{{"\n"}}{{range .}}{{println "-" .}}{{end}}{{end}}`
|
||||
overlay = `{{define "list"}} {{join . ", "}}{{end}} `
|
||||
)
|
||||
var (
|
||||
funcs = template.FuncMap{"join": strings.Join}
|
||||
guardians = []string{"Gamora", "Groot", "Nebula", "Rocket", "Star-Lord"}
|
||||
)
|
||||
masterTmpl, err := template.New("master").Funcs(funcs).Parse(master)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
overlayTmpl, err := template.Must(masterTmpl.Clone()).Parse(overlay)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := masterTmpl.Execute(os.Stdout, guardians); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := overlayTmpl.Execute(os.Stdout, guardians); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// Output:
|
||||
// Names:
|
||||
// - Gamora
|
||||
// - Groot
|
||||
// - Nebula
|
||||
// - Rocket
|
||||
// - Star-Lord
|
||||
// Names: Gamora, Groot, Nebula, Rocket, Star-Lord
|
||||
}
|
||||
181
src/text/template/examplefiles_test.go
Normal file
181
src/text/template/examplefiles_test.go
Normal file
@@ -0,0 +1,181 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package template_test
|
||||
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
// templateFile defines the contents of a template to be stored in a file, for testing.
|
||||
type templateFile struct {
|
||||
name string
|
||||
contents string
|
||||
}
|
||||
|
||||
func createTestDir(files []templateFile) string {
|
||||
dir, err := os.MkdirTemp("", "template")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
for _, file := range files {
|
||||
f, err := os.Create(filepath.Join(dir, file.name))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
_, err = io.WriteString(f, file.contents)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
return dir
|
||||
}
|
||||
|
||||
// Here we demonstrate loading a set of templates from a directory.
|
||||
func ExampleTemplate_glob() {
|
||||
// Here we create a temporary directory and populate it with our sample
|
||||
// template definition files; usually the template files would already
|
||||
// exist in some location known to the program.
|
||||
dir := createTestDir([]templateFile{
|
||||
// T0.tmpl is a plain template file that just invokes T1.
|
||||
{"T0.tmpl", `T0 invokes T1: ({{template "T1"}})`},
|
||||
// T1.tmpl defines a template, T1 that invokes T2.
|
||||
{"T1.tmpl", `{{define "T1"}}T1 invokes T2: ({{template "T2"}}){{end}}`},
|
||||
// T2.tmpl defines a template T2.
|
||||
{"T2.tmpl", `{{define "T2"}}This is T2{{end}}`},
|
||||
})
|
||||
// Clean up after the test; another quirk of running as an example.
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
// pattern is the glob pattern used to find all the template files.
|
||||
pattern := filepath.Join(dir, "*.tmpl")
|
||||
|
||||
// Here starts the example proper.
|
||||
// T0.tmpl is the first name matched, so it becomes the starting template,
|
||||
// the value returned by ParseGlob.
|
||||
tmpl := template.Must(template.ParseGlob(pattern))
|
||||
|
||||
err := tmpl.Execute(os.Stdout, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("template execution: %s", err)
|
||||
}
|
||||
// Output:
|
||||
// T0 invokes T1: (T1 invokes T2: (This is T2))
|
||||
}
|
||||
|
||||
// This example demonstrates one way to share some templates
|
||||
// and use them in different contexts. In this variant we add multiple driver
|
||||
// templates by hand to an existing bundle of templates.
|
||||
func ExampleTemplate_helpers() {
|
||||
// Here we create a temporary directory and populate it with our sample
|
||||
// template definition files; usually the template files would already
|
||||
// exist in some location known to the program.
|
||||
dir := createTestDir([]templateFile{
|
||||
// T1.tmpl defines a template, T1 that invokes T2.
|
||||
{"T1.tmpl", `{{define "T1"}}T1 invokes T2: ({{template "T2"}}){{end}}`},
|
||||
// T2.tmpl defines a template T2.
|
||||
{"T2.tmpl", `{{define "T2"}}This is T2{{end}}`},
|
||||
})
|
||||
// Clean up after the test; another quirk of running as an example.
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
// pattern is the glob pattern used to find all the template files.
|
||||
pattern := filepath.Join(dir, "*.tmpl")
|
||||
|
||||
// Here starts the example proper.
|
||||
// Load the helpers.
|
||||
templates := template.Must(template.ParseGlob(pattern))
|
||||
// Add one driver template to the bunch; we do this with an explicit template definition.
|
||||
_, err := templates.Parse("{{define `driver1`}}Driver 1 calls T1: ({{template `T1`}})\n{{end}}")
|
||||
if err != nil {
|
||||
log.Fatal("parsing driver1: ", err)
|
||||
}
|
||||
// Add another driver template.
|
||||
_, err = templates.Parse("{{define `driver2`}}Driver 2 calls T2: ({{template `T2`}})\n{{end}}")
|
||||
if err != nil {
|
||||
log.Fatal("parsing driver2: ", err)
|
||||
}
|
||||
// We load all the templates before execution. This package does not require
|
||||
// that behavior but html/template's escaping does, so it's a good habit.
|
||||
err = templates.ExecuteTemplate(os.Stdout, "driver1", nil)
|
||||
if err != nil {
|
||||
log.Fatalf("driver1 execution: %s", err)
|
||||
}
|
||||
err = templates.ExecuteTemplate(os.Stdout, "driver2", nil)
|
||||
if err != nil {
|
||||
log.Fatalf("driver2 execution: %s", err)
|
||||
}
|
||||
// Output:
|
||||
// Driver 1 calls T1: (T1 invokes T2: (This is T2))
|
||||
// Driver 2 calls T2: (This is T2)
|
||||
}
|
||||
|
||||
// This example demonstrates how to use one group of driver
|
||||
// templates with distinct sets of helper templates.
|
||||
func ExampleTemplate_share() {
|
||||
// Here we create a temporary directory and populate it with our sample
|
||||
// template definition files; usually the template files would already
|
||||
// exist in some location known to the program.
|
||||
dir := createTestDir([]templateFile{
|
||||
// T0.tmpl is a plain template file that just invokes T1.
|
||||
{"T0.tmpl", "T0 ({{.}} version) invokes T1: ({{template `T1`}})\n"},
|
||||
// T1.tmpl defines a template, T1 that invokes T2. Note T2 is not defined
|
||||
{"T1.tmpl", `{{define "T1"}}T1 invokes T2: ({{template "T2"}}){{end}}`},
|
||||
})
|
||||
// Clean up after the test; another quirk of running as an example.
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
// pattern is the glob pattern used to find all the template files.
|
||||
pattern := filepath.Join(dir, "*.tmpl")
|
||||
|
||||
// Here starts the example proper.
|
||||
// Load the drivers.
|
||||
drivers := template.Must(template.ParseGlob(pattern))
|
||||
|
||||
// We must define an implementation of the T2 template. First we clone
|
||||
// the drivers, then add a definition of T2 to the template name space.
|
||||
|
||||
// 1. Clone the helper set to create a new name space from which to run them.
|
||||
first, err := drivers.Clone()
|
||||
if err != nil {
|
||||
log.Fatal("cloning helpers: ", err)
|
||||
}
|
||||
// 2. Define T2, version A, and parse it.
|
||||
_, err = first.Parse("{{define `T2`}}T2, version A{{end}}")
|
||||
if err != nil {
|
||||
log.Fatal("parsing T2: ", err)
|
||||
}
|
||||
|
||||
// Now repeat the whole thing, using a different version of T2.
|
||||
// 1. Clone the drivers.
|
||||
second, err := drivers.Clone()
|
||||
if err != nil {
|
||||
log.Fatal("cloning drivers: ", err)
|
||||
}
|
||||
// 2. Define T2, version B, and parse it.
|
||||
_, err = second.Parse("{{define `T2`}}T2, version B{{end}}")
|
||||
if err != nil {
|
||||
log.Fatal("parsing T2: ", err)
|
||||
}
|
||||
|
||||
// Execute the templates in the reverse order to verify the
|
||||
// first is unaffected by the second.
|
||||
err = second.ExecuteTemplate(os.Stdout, "T0.tmpl", "second")
|
||||
if err != nil {
|
||||
log.Fatalf("second execution: %s", err)
|
||||
}
|
||||
err = first.ExecuteTemplate(os.Stdout, "T0.tmpl", "first")
|
||||
if err != nil {
|
||||
log.Fatalf("first: execution: %s", err)
|
||||
}
|
||||
|
||||
// Output:
|
||||
// T0 (second version) invokes T1: (T1 invokes T2: (T2, version B))
|
||||
// T0 (first version) invokes T1: (T1 invokes T2: (T2, version A))
|
||||
}
|
||||
54
src/text/template/examplefunc_test.go
Normal file
54
src/text/template/examplefunc_test.go
Normal file
@@ -0,0 +1,54 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package template_test
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
// This example demonstrates a custom function to process template text.
|
||||
// It installs the strings.Title function and uses it to
|
||||
// Make Title Text Look Good In Our Template's Output.
|
||||
func ExampleTemplate_func() {
|
||||
// First we create a FuncMap with which to register the function.
|
||||
funcMap := template.FuncMap{
|
||||
// The name "title" is what the function will be called in the template text.
|
||||
"title": strings.Title,
|
||||
}
|
||||
|
||||
// A simple template definition to test our function.
|
||||
// We print the input text several ways:
|
||||
// - the original
|
||||
// - title-cased
|
||||
// - title-cased and then printed with %q
|
||||
// - printed with %q and then title-cased.
|
||||
const templateText = `
|
||||
Input: {{printf "%q" .}}
|
||||
Output 0: {{title .}}
|
||||
Output 1: {{title . | printf "%q"}}
|
||||
Output 2: {{printf "%q" . | title}}
|
||||
`
|
||||
|
||||
// Create a template, add the function map, and parse the text.
|
||||
tmpl, err := template.New("titleTest").Funcs(funcMap).Parse(templateText)
|
||||
if err != nil {
|
||||
log.Fatalf("parsing: %s", err)
|
||||
}
|
||||
|
||||
// Run the template to verify the output.
|
||||
err = tmpl.Execute(os.Stdout, "the go programming language")
|
||||
if err != nil {
|
||||
log.Fatalf("execution: %s", err)
|
||||
}
|
||||
|
||||
// Output:
|
||||
// Input: "the go programming language"
|
||||
// Output 0: The Go Programming Language
|
||||
// Output 1: "The Go Programming Language"
|
||||
// Output 2: "The Go Programming Language"
|
||||
}
|
||||
1075
src/text/template/exec.go
Normal file
1075
src/text/template/exec.go
Normal file
File diff suppressed because it is too large
Load Diff
1903
src/text/template/exec_test.go
Normal file
1903
src/text/template/exec_test.go
Normal file
File diff suppressed because it is too large
Load Diff
784
src/text/template/funcs.go
Normal file
784
src/text/template/funcs.go
Normal file
@@ -0,0 +1,784 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// FuncMap is the type of the map defining the mapping from names to functions.
|
||||
// Each function must have either a single return value, or two return values of
|
||||
// which the second has type error. In that case, if the second (error)
|
||||
// return value evaluates to non-nil during execution, execution terminates and
|
||||
// Execute returns that error.
|
||||
//
|
||||
// Errors returned by Execute wrap the underlying error; call [errors.As] to
|
||||
// unwrap them.
|
||||
//
|
||||
// When template execution invokes a function with an argument list, that list
|
||||
// must be assignable to the function's parameter types. Functions meant to
|
||||
// apply to arguments of arbitrary type can use parameters of type interface{} or
|
||||
// of type [reflect.Value]. Similarly, functions meant to return a result of arbitrary
|
||||
// type can return interface{} or [reflect.Value].
|
||||
type FuncMap map[string]any
|
||||
|
||||
// builtins returns the FuncMap.
|
||||
// It is not a global variable so the linker can dead code eliminate
|
||||
// more when this isn't called. See golang.org/issue/36021.
|
||||
// TODO: revert this back to a global map once golang.org/issue/2559 is fixed.
|
||||
func builtins() FuncMap {
|
||||
return FuncMap{
|
||||
"and": and,
|
||||
"call": emptyCall,
|
||||
"html": HTMLEscaper,
|
||||
"index": index,
|
||||
"slice": slice,
|
||||
"js": JSEscaper,
|
||||
"len": length,
|
||||
"not": not,
|
||||
"or": or,
|
||||
"print": fmt.Sprint,
|
||||
"printf": fmt.Sprintf,
|
||||
"println": fmt.Sprintln,
|
||||
"urlquery": URLQueryEscaper,
|
||||
|
||||
// Comparisons
|
||||
"eq": eq, // ==
|
||||
"ge": ge, // >=
|
||||
"gt": gt, // >
|
||||
"le": le, // <=
|
||||
"lt": lt, // <
|
||||
"ne": ne, // !=
|
||||
}
|
||||
}
|
||||
|
||||
var builtinFuncsOnce struct {
|
||||
sync.Once
|
||||
v map[string]reflect.Value
|
||||
}
|
||||
|
||||
// builtinFuncsOnce lazily computes & caches the builtinFuncs map.
|
||||
// TODO: revert this back to a global map once golang.org/issue/2559 is fixed.
|
||||
func builtinFuncs() map[string]reflect.Value {
|
||||
builtinFuncsOnce.Do(func() {
|
||||
builtinFuncsOnce.v = createValueFuncs(builtins())
|
||||
})
|
||||
return builtinFuncsOnce.v
|
||||
}
|
||||
|
||||
// createValueFuncs turns a FuncMap into a map[string]reflect.Value
|
||||
func createValueFuncs(funcMap FuncMap) map[string]reflect.Value {
|
||||
m := make(map[string]reflect.Value)
|
||||
addValueFuncs(m, funcMap)
|
||||
return m
|
||||
}
|
||||
|
||||
// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
|
||||
func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
|
||||
for name, fn := range in {
|
||||
if !goodName(name) {
|
||||
panic(fmt.Errorf("function name %q is not a valid identifier", name))
|
||||
}
|
||||
v := reflect.ValueOf(fn)
|
||||
if v.Kind() != reflect.Func {
|
||||
panic("value for " + name + " not a function")
|
||||
}
|
||||
if err := goodFunc(name, v.Type()); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
out[name] = v
|
||||
}
|
||||
}
|
||||
|
||||
// addFuncs adds to values the functions in funcs. It does no checking of the input -
|
||||
// call addValueFuncs first.
|
||||
func addFuncs(out, in FuncMap) {
|
||||
for name, fn := range in {
|
||||
out[name] = fn
|
||||
}
|
||||
}
|
||||
|
||||
// goodFunc reports whether the function or method has the right result signature.
|
||||
func goodFunc(name string, typ reflect.Type) error {
|
||||
// We allow functions with 1 result or 2 results where the second is an error.
|
||||
switch numOut := typ.NumOut(); {
|
||||
case numOut == 1:
|
||||
return nil
|
||||
case numOut == 2 && typ.Out(1) == errorType:
|
||||
return nil
|
||||
case numOut == 2:
|
||||
return fmt.Errorf("invalid function signature for %s: second return value should be error; is %s", name, typ.Out(1))
|
||||
default:
|
||||
return fmt.Errorf("function %s has %d return values; should be 1 or 2", name, typ.NumOut())
|
||||
}
|
||||
}
|
||||
|
||||
// goodName reports whether the function name is a valid identifier.
|
||||
func goodName(name string) bool {
|
||||
if name == "" {
|
||||
return false
|
||||
}
|
||||
for i, r := range name {
|
||||
switch {
|
||||
case r == '_':
|
||||
case i == 0 && !unicode.IsLetter(r):
|
||||
return false
|
||||
case !unicode.IsLetter(r) && !unicode.IsDigit(r):
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// findFunction looks for a function in the template, and global map.
|
||||
func findFunction(name string, tmpl *Template) (v reflect.Value, isBuiltin, ok bool) {
|
||||
if tmpl != nil && tmpl.common != nil {
|
||||
tmpl.muFuncs.RLock()
|
||||
defer tmpl.muFuncs.RUnlock()
|
||||
if fn := tmpl.execFuncs[name]; fn.IsValid() {
|
||||
return fn, false, true
|
||||
}
|
||||
}
|
||||
if fn := builtinFuncs()[name]; fn.IsValid() {
|
||||
return fn, true, true
|
||||
}
|
||||
return reflect.Value{}, false, false
|
||||
}
|
||||
|
||||
// prepareArg checks if value can be used as an argument of type argType, and
|
||||
// converts an invalid value to appropriate zero if possible.
|
||||
func prepareArg(value reflect.Value, argType reflect.Type) (reflect.Value, error) {
|
||||
if !value.IsValid() {
|
||||
if !canBeNil(argType) {
|
||||
return reflect.Value{}, fmt.Errorf("value is nil; should be of type %s", argType)
|
||||
}
|
||||
value = reflect.Zero(argType)
|
||||
}
|
||||
if value.Type().AssignableTo(argType) {
|
||||
return value, nil
|
||||
}
|
||||
if intLike(value.Kind()) && intLike(argType.Kind()) && value.Type().ConvertibleTo(argType) {
|
||||
value = value.Convert(argType)
|
||||
return value, nil
|
||||
}
|
||||
return reflect.Value{}, fmt.Errorf("value has type %s; should be %s", value.Type(), argType)
|
||||
}
|
||||
|
||||
func intLike(typ reflect.Kind) bool {
|
||||
switch typ {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return true
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// indexArg checks if a reflect.Value can be used as an index, and converts it to int if possible.
|
||||
func indexArg(index reflect.Value, cap int) (int, error) {
|
||||
var x int64
|
||||
switch index.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
x = index.Int()
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
x = int64(index.Uint())
|
||||
case reflect.Invalid:
|
||||
return 0, fmt.Errorf("cannot index slice/array with nil")
|
||||
default:
|
||||
return 0, fmt.Errorf("cannot index slice/array with type %s", index.Type())
|
||||
}
|
||||
if x < 0 || int(x) < 0 || int(x) > cap {
|
||||
return 0, fmt.Errorf("index out of range: %d", x)
|
||||
}
|
||||
return int(x), nil
|
||||
}
|
||||
|
||||
// Indexing.
|
||||
|
||||
// index returns the result of indexing its first argument by the following
|
||||
// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each
|
||||
// indexed item must be a map, slice, or array.
|
||||
func index(item reflect.Value, indexes ...reflect.Value) (reflect.Value, error) {
|
||||
item = indirectInterface(item)
|
||||
if !item.IsValid() {
|
||||
return reflect.Value{}, fmt.Errorf("index of untyped nil")
|
||||
}
|
||||
for _, index := range indexes {
|
||||
index = indirectInterface(index)
|
||||
var isNil bool
|
||||
if item, isNil = indirect(item); isNil {
|
||||
return reflect.Value{}, fmt.Errorf("index of nil pointer")
|
||||
}
|
||||
switch item.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.String:
|
||||
x, err := indexArg(index, item.Len())
|
||||
if err != nil {
|
||||
return reflect.Value{}, err
|
||||
}
|
||||
item = item.Index(x)
|
||||
case reflect.Map:
|
||||
index, err := prepareArg(index, item.Type().Key())
|
||||
if err != nil {
|
||||
return reflect.Value{}, err
|
||||
}
|
||||
if x := item.MapIndex(index); x.IsValid() {
|
||||
item = x
|
||||
} else {
|
||||
item = reflect.Zero(item.Type().Elem())
|
||||
}
|
||||
case reflect.Invalid:
|
||||
// the loop holds invariant: item.IsValid()
|
||||
panic("unreachable")
|
||||
default:
|
||||
return reflect.Value{}, fmt.Errorf("can't index item of type %s", item.Type())
|
||||
}
|
||||
}
|
||||
return item, nil
|
||||
}
|
||||
|
||||
// Slicing.
|
||||
|
||||
// slice returns the result of slicing its first argument by the remaining
|
||||
// arguments. Thus "slice x 1 2" is, in Go syntax, x[1:2], while "slice x"
|
||||
// is x[:], "slice x 1" is x[1:], and "slice x 1 2 3" is x[1:2:3]. The first
|
||||
// argument must be a string, slice, or array.
|
||||
func slice(item reflect.Value, indexes ...reflect.Value) (reflect.Value, error) {
|
||||
item = indirectInterface(item)
|
||||
if !item.IsValid() {
|
||||
return reflect.Value{}, fmt.Errorf("slice of untyped nil")
|
||||
}
|
||||
if len(indexes) > 3 {
|
||||
return reflect.Value{}, fmt.Errorf("too many slice indexes: %d", len(indexes))
|
||||
}
|
||||
var cap int
|
||||
switch item.Kind() {
|
||||
case reflect.String:
|
||||
if len(indexes) == 3 {
|
||||
return reflect.Value{}, fmt.Errorf("cannot 3-index slice a string")
|
||||
}
|
||||
cap = item.Len()
|
||||
case reflect.Array, reflect.Slice:
|
||||
cap = item.Cap()
|
||||
default:
|
||||
return reflect.Value{}, fmt.Errorf("can't slice item of type %s", item.Type())
|
||||
}
|
||||
// set default values for cases item[:], item[i:].
|
||||
idx := [3]int{0, item.Len()}
|
||||
for i, index := range indexes {
|
||||
x, err := indexArg(index, cap)
|
||||
if err != nil {
|
||||
return reflect.Value{}, err
|
||||
}
|
||||
idx[i] = x
|
||||
}
|
||||
// given item[i:j], make sure i <= j.
|
||||
if idx[0] > idx[1] {
|
||||
return reflect.Value{}, fmt.Errorf("invalid slice index: %d > %d", idx[0], idx[1])
|
||||
}
|
||||
if len(indexes) < 3 {
|
||||
return item.Slice(idx[0], idx[1]), nil
|
||||
}
|
||||
// given item[i:j:k], make sure i <= j <= k.
|
||||
if idx[1] > idx[2] {
|
||||
return reflect.Value{}, fmt.Errorf("invalid slice index: %d > %d", idx[1], idx[2])
|
||||
}
|
||||
return item.Slice3(idx[0], idx[1], idx[2]), nil
|
||||
}
|
||||
|
||||
// Length
|
||||
|
||||
// length returns the length of the item, with an error if it has no defined length.
|
||||
func length(item reflect.Value) (int, error) {
|
||||
item, isNil := indirect(item)
|
||||
if isNil {
|
||||
return 0, fmt.Errorf("len of nil pointer")
|
||||
}
|
||||
switch item.Kind() {
|
||||
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
|
||||
return item.Len(), nil
|
||||
}
|
||||
return 0, fmt.Errorf("len of type %s", item.Type())
|
||||
}
|
||||
|
||||
// Function invocation
|
||||
|
||||
func emptyCall(fn reflect.Value, args ...reflect.Value) reflect.Value {
|
||||
panic("unreachable") // implemented as a special case in evalCall
|
||||
}
|
||||
|
||||
// call returns the result of evaluating the first argument as a function.
|
||||
// The function must return 1 result, or 2 results, the second of which is an error.
|
||||
func call(name string, fn reflect.Value, args ...reflect.Value) (reflect.Value, error) {
|
||||
fn = indirectInterface(fn)
|
||||
if !fn.IsValid() {
|
||||
return reflect.Value{}, fmt.Errorf("call of nil")
|
||||
}
|
||||
typ := fn.Type()
|
||||
if typ.Kind() != reflect.Func {
|
||||
return reflect.Value{}, fmt.Errorf("non-function %s of type %s", name, typ)
|
||||
}
|
||||
|
||||
if err := goodFunc(name, typ); err != nil {
|
||||
return reflect.Value{}, err
|
||||
}
|
||||
numIn := typ.NumIn()
|
||||
var dddType reflect.Type
|
||||
if typ.IsVariadic() {
|
||||
if len(args) < numIn-1 {
|
||||
return reflect.Value{}, fmt.Errorf("wrong number of args for %s: got %d want at least %d", name, len(args), numIn-1)
|
||||
}
|
||||
dddType = typ.In(numIn - 1).Elem()
|
||||
} else {
|
||||
if len(args) != numIn {
|
||||
return reflect.Value{}, fmt.Errorf("wrong number of args for %s: got %d want %d", name, len(args), numIn)
|
||||
}
|
||||
}
|
||||
argv := make([]reflect.Value, len(args))
|
||||
for i, arg := range args {
|
||||
arg = indirectInterface(arg)
|
||||
// Compute the expected type. Clumsy because of variadics.
|
||||
argType := dddType
|
||||
if !typ.IsVariadic() || i < numIn-1 {
|
||||
argType = typ.In(i)
|
||||
}
|
||||
|
||||
var err error
|
||||
if argv[i], err = prepareArg(arg, argType); err != nil {
|
||||
return reflect.Value{}, fmt.Errorf("arg %d: %w", i, err)
|
||||
}
|
||||
}
|
||||
return safeCall(fn, argv)
|
||||
}
|
||||
|
||||
// safeCall runs fun.Call(args), and returns the resulting value and error, if
|
||||
// any. If the call panics, the panic value is returned as an error.
|
||||
func safeCall(fun reflect.Value, args []reflect.Value) (val reflect.Value, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
if e, ok := r.(error); ok {
|
||||
err = e
|
||||
} else {
|
||||
err = fmt.Errorf("%v", r)
|
||||
}
|
||||
}
|
||||
}()
|
||||
ret := fun.Call(args)
|
||||
if len(ret) == 2 && !ret[1].IsNil() {
|
||||
return ret[0], ret[1].Interface().(error)
|
||||
}
|
||||
return ret[0], nil
|
||||
}
|
||||
|
||||
// Boolean logic.
|
||||
|
||||
func truth(arg reflect.Value) bool {
|
||||
t, _ := isTrue(indirectInterface(arg))
|
||||
return t
|
||||
}
|
||||
|
||||
// and computes the Boolean AND of its arguments, returning
|
||||
// the first false argument it encounters, or the last argument.
|
||||
func and(arg0 reflect.Value, args ...reflect.Value) reflect.Value {
|
||||
panic("unreachable") // implemented as a special case in evalCall
|
||||
}
|
||||
|
||||
// or computes the Boolean OR of its arguments, returning
|
||||
// the first true argument it encounters, or the last argument.
|
||||
func or(arg0 reflect.Value, args ...reflect.Value) reflect.Value {
|
||||
panic("unreachable") // implemented as a special case in evalCall
|
||||
}
|
||||
|
||||
// not returns the Boolean negation of its argument.
|
||||
func not(arg reflect.Value) bool {
|
||||
return !truth(arg)
|
||||
}
|
||||
|
||||
// Comparison.
|
||||
|
||||
// TODO: Perhaps allow comparison between signed and unsigned integers.
|
||||
|
||||
var (
|
||||
errBadComparisonType = errors.New("invalid type for comparison")
|
||||
errBadComparison = errors.New("incompatible types for comparison")
|
||||
errNoComparison = errors.New("missing argument for comparison")
|
||||
)
|
||||
|
||||
type kind int
|
||||
|
||||
const (
|
||||
invalidKind kind = iota
|
||||
boolKind
|
||||
complexKind
|
||||
intKind
|
||||
floatKind
|
||||
stringKind
|
||||
uintKind
|
||||
)
|
||||
|
||||
func basicKind(v reflect.Value) (kind, error) {
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
return boolKind, nil
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return intKind, nil
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return uintKind, nil
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return floatKind, nil
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return complexKind, nil
|
||||
case reflect.String:
|
||||
return stringKind, nil
|
||||
}
|
||||
return invalidKind, errBadComparisonType
|
||||
}
|
||||
|
||||
// isNil returns true if v is the zero reflect.Value, or nil of its type.
|
||||
func isNil(v reflect.Value) bool {
|
||||
if !v.IsValid() {
|
||||
return true
|
||||
}
|
||||
switch v.Kind() {
|
||||
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Pointer, reflect.Slice:
|
||||
return v.IsNil()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// canCompare reports whether v1 and v2 are both the same kind, or one is nil.
|
||||
// Called only when dealing with nillable types, or there's about to be an error.
|
||||
func canCompare(v1, v2 reflect.Value) bool {
|
||||
k1 := v1.Kind()
|
||||
k2 := v2.Kind()
|
||||
if k1 == k2 {
|
||||
return true
|
||||
}
|
||||
// We know the type can be compared to nil.
|
||||
return k1 == reflect.Invalid || k2 == reflect.Invalid
|
||||
}
|
||||
|
||||
// eq evaluates the comparison a == b || a == c || ...
|
||||
func eq(arg1 reflect.Value, arg2 ...reflect.Value) (bool, error) {
|
||||
arg1 = indirectInterface(arg1)
|
||||
if len(arg2) == 0 {
|
||||
return false, errNoComparison
|
||||
}
|
||||
k1, _ := basicKind(arg1)
|
||||
for _, arg := range arg2 {
|
||||
arg = indirectInterface(arg)
|
||||
k2, _ := basicKind(arg)
|
||||
truth := false
|
||||
if k1 != k2 {
|
||||
// Special case: Can compare integer values regardless of type's sign.
|
||||
switch {
|
||||
case k1 == intKind && k2 == uintKind:
|
||||
truth = arg1.Int() >= 0 && uint64(arg1.Int()) == arg.Uint()
|
||||
case k1 == uintKind && k2 == intKind:
|
||||
truth = arg.Int() >= 0 && arg1.Uint() == uint64(arg.Int())
|
||||
default:
|
||||
if arg1.IsValid() && arg.IsValid() {
|
||||
return false, errBadComparison
|
||||
}
|
||||
}
|
||||
} else {
|
||||
switch k1 {
|
||||
case boolKind:
|
||||
truth = arg1.Bool() == arg.Bool()
|
||||
case complexKind:
|
||||
truth = arg1.Complex() == arg.Complex()
|
||||
case floatKind:
|
||||
truth = arg1.Float() == arg.Float()
|
||||
case intKind:
|
||||
truth = arg1.Int() == arg.Int()
|
||||
case stringKind:
|
||||
truth = arg1.String() == arg.String()
|
||||
case uintKind:
|
||||
truth = arg1.Uint() == arg.Uint()
|
||||
default:
|
||||
if !canCompare(arg1, arg) {
|
||||
return false, fmt.Errorf("non-comparable types %s: %v, %s: %v", arg1, arg1.Type(), arg.Type(), arg)
|
||||
}
|
||||
if isNil(arg1) || isNil(arg) {
|
||||
truth = isNil(arg) == isNil(arg1)
|
||||
} else {
|
||||
if !arg.Type().Comparable() {
|
||||
return false, fmt.Errorf("non-comparable type %s: %v", arg, arg.Type())
|
||||
}
|
||||
truth = arg1.Interface() == arg.Interface()
|
||||
}
|
||||
}
|
||||
}
|
||||
if truth {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// ne evaluates the comparison a != b.
|
||||
func ne(arg1, arg2 reflect.Value) (bool, error) {
|
||||
// != is the inverse of ==.
|
||||
equal, err := eq(arg1, arg2)
|
||||
return !equal, err
|
||||
}
|
||||
|
||||
// lt evaluates the comparison a < b.
|
||||
func lt(arg1, arg2 reflect.Value) (bool, error) {
|
||||
arg1 = indirectInterface(arg1)
|
||||
k1, err := basicKind(arg1)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
arg2 = indirectInterface(arg2)
|
||||
k2, err := basicKind(arg2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
truth := false
|
||||
if k1 != k2 {
|
||||
// Special case: Can compare integer values regardless of type's sign.
|
||||
switch {
|
||||
case k1 == intKind && k2 == uintKind:
|
||||
truth = arg1.Int() < 0 || uint64(arg1.Int()) < arg2.Uint()
|
||||
case k1 == uintKind && k2 == intKind:
|
||||
truth = arg2.Int() >= 0 && arg1.Uint() < uint64(arg2.Int())
|
||||
default:
|
||||
return false, errBadComparison
|
||||
}
|
||||
} else {
|
||||
switch k1 {
|
||||
case boolKind, complexKind:
|
||||
return false, errBadComparisonType
|
||||
case floatKind:
|
||||
truth = arg1.Float() < arg2.Float()
|
||||
case intKind:
|
||||
truth = arg1.Int() < arg2.Int()
|
||||
case stringKind:
|
||||
truth = arg1.String() < arg2.String()
|
||||
case uintKind:
|
||||
truth = arg1.Uint() < arg2.Uint()
|
||||
default:
|
||||
panic("invalid kind")
|
||||
}
|
||||
}
|
||||
return truth, nil
|
||||
}
|
||||
|
||||
// le evaluates the comparison <= b.
|
||||
func le(arg1, arg2 reflect.Value) (bool, error) {
|
||||
// <= is < or ==.
|
||||
lessThan, err := lt(arg1, arg2)
|
||||
if lessThan || err != nil {
|
||||
return lessThan, err
|
||||
}
|
||||
return eq(arg1, arg2)
|
||||
}
|
||||
|
||||
// gt evaluates the comparison a > b.
|
||||
func gt(arg1, arg2 reflect.Value) (bool, error) {
|
||||
// > is the inverse of <=.
|
||||
lessOrEqual, err := le(arg1, arg2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !lessOrEqual, nil
|
||||
}
|
||||
|
||||
// ge evaluates the comparison a >= b.
|
||||
func ge(arg1, arg2 reflect.Value) (bool, error) {
|
||||
// >= is the inverse of <.
|
||||
lessThan, err := lt(arg1, arg2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !lessThan, nil
|
||||
}
|
||||
|
||||
// HTML escaping.
|
||||
|
||||
var (
|
||||
htmlQuot = []byte(""") // shorter than """
|
||||
htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5
|
||||
htmlAmp = []byte("&")
|
||||
htmlLt = []byte("<")
|
||||
htmlGt = []byte(">")
|
||||
htmlNull = []byte("\uFFFD")
|
||||
)
|
||||
|
||||
// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
|
||||
func HTMLEscape(w io.Writer, b []byte) {
|
||||
last := 0
|
||||
for i, c := range b {
|
||||
var html []byte
|
||||
switch c {
|
||||
case '\000':
|
||||
html = htmlNull
|
||||
case '"':
|
||||
html = htmlQuot
|
||||
case '\'':
|
||||
html = htmlApos
|
||||
case '&':
|
||||
html = htmlAmp
|
||||
case '<':
|
||||
html = htmlLt
|
||||
case '>':
|
||||
html = htmlGt
|
||||
default:
|
||||
continue
|
||||
}
|
||||
w.Write(b[last:i])
|
||||
w.Write(html)
|
||||
last = i + 1
|
||||
}
|
||||
w.Write(b[last:])
|
||||
}
|
||||
|
||||
// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
|
||||
func HTMLEscapeString(s string) string {
|
||||
// Avoid allocation if we can.
|
||||
if !strings.ContainsAny(s, "'\"&<>\000") {
|
||||
return s
|
||||
}
|
||||
var b strings.Builder
|
||||
HTMLEscape(&b, []byte(s))
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// HTMLEscaper returns the escaped HTML equivalent of the textual
|
||||
// representation of its arguments.
|
||||
func HTMLEscaper(args ...any) string {
|
||||
return HTMLEscapeString(evalArgs(args))
|
||||
}
|
||||
|
||||
// JavaScript escaping.
|
||||
|
||||
var (
|
||||
jsLowUni = []byte(`\u00`)
|
||||
hex = []byte("0123456789ABCDEF")
|
||||
|
||||
jsBackslash = []byte(`\\`)
|
||||
jsApos = []byte(`\'`)
|
||||
jsQuot = []byte(`\"`)
|
||||
jsLt = []byte(`\u003C`)
|
||||
jsGt = []byte(`\u003E`)
|
||||
jsAmp = []byte(`\u0026`)
|
||||
jsEq = []byte(`\u003D`)
|
||||
)
|
||||
|
||||
// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
|
||||
func JSEscape(w io.Writer, b []byte) {
|
||||
last := 0
|
||||
for i := 0; i < len(b); i++ {
|
||||
c := b[i]
|
||||
|
||||
if !jsIsSpecial(rune(c)) {
|
||||
// fast path: nothing to do
|
||||
continue
|
||||
}
|
||||
w.Write(b[last:i])
|
||||
|
||||
if c < utf8.RuneSelf {
|
||||
// Quotes, slashes and angle brackets get quoted.
|
||||
// Control characters get written as \u00XX.
|
||||
switch c {
|
||||
case '\\':
|
||||
w.Write(jsBackslash)
|
||||
case '\'':
|
||||
w.Write(jsApos)
|
||||
case '"':
|
||||
w.Write(jsQuot)
|
||||
case '<':
|
||||
w.Write(jsLt)
|
||||
case '>':
|
||||
w.Write(jsGt)
|
||||
case '&':
|
||||
w.Write(jsAmp)
|
||||
case '=':
|
||||
w.Write(jsEq)
|
||||
default:
|
||||
w.Write(jsLowUni)
|
||||
t, b := c>>4, c&0x0f
|
||||
w.Write(hex[t : t+1])
|
||||
w.Write(hex[b : b+1])
|
||||
}
|
||||
} else {
|
||||
// Unicode rune.
|
||||
r, size := utf8.DecodeRune(b[i:])
|
||||
if unicode.IsPrint(r) {
|
||||
w.Write(b[i : i+size])
|
||||
} else {
|
||||
fmt.Fprintf(w, "\\u%04X", r)
|
||||
}
|
||||
i += size - 1
|
||||
}
|
||||
last = i + 1
|
||||
}
|
||||
w.Write(b[last:])
|
||||
}
|
||||
|
||||
// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
|
||||
func JSEscapeString(s string) string {
|
||||
// Avoid allocation if we can.
|
||||
if strings.IndexFunc(s, jsIsSpecial) < 0 {
|
||||
return s
|
||||
}
|
||||
var b strings.Builder
|
||||
JSEscape(&b, []byte(s))
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func jsIsSpecial(r rune) bool {
|
||||
switch r {
|
||||
case '\\', '\'', '"', '<', '>', '&', '=':
|
||||
return true
|
||||
}
|
||||
return r < ' ' || utf8.RuneSelf <= r
|
||||
}
|
||||
|
||||
// JSEscaper returns the escaped JavaScript equivalent of the textual
|
||||
// representation of its arguments.
|
||||
func JSEscaper(args ...any) string {
|
||||
return JSEscapeString(evalArgs(args))
|
||||
}
|
||||
|
||||
// URLQueryEscaper returns the escaped value of the textual representation of
|
||||
// its arguments in a form suitable for embedding in a URL query.
|
||||
func URLQueryEscaper(args ...any) string {
|
||||
return url.QueryEscape(evalArgs(args))
|
||||
}
|
||||
|
||||
// evalArgs formats the list of arguments into a string. It is therefore equivalent to
|
||||
//
|
||||
// fmt.Sprint(args...)
|
||||
//
|
||||
// except that each argument is indirected (if a pointer), as required,
|
||||
// using the same rules as the default string evaluation during template
|
||||
// execution.
|
||||
func evalArgs(args []any) string {
|
||||
ok := false
|
||||
var s string
|
||||
// Fast path for simple common case.
|
||||
if len(args) == 1 {
|
||||
s, ok = args[0].(string)
|
||||
}
|
||||
if !ok {
|
||||
for i, arg := range args {
|
||||
a, ok := printableValue(reflect.ValueOf(arg))
|
||||
if ok {
|
||||
args[i] = a
|
||||
} // else let fmt do its thing
|
||||
}
|
||||
s = fmt.Sprint(args...)
|
||||
}
|
||||
return s
|
||||
}
|
||||
178
src/text/template/helper.go
Normal file
178
src/text/template/helper.go
Normal file
@@ -0,0 +1,178 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Helper functions to make constructing templates easier.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// Functions and methods to parse templates.
|
||||
|
||||
// Must is a helper that wraps a call to a function returning ([*Template], error)
|
||||
// and panics if the error is non-nil. It is intended for use in variable
|
||||
// initializations such as
|
||||
//
|
||||
// var t = template.Must(template.New("name").Parse("text"))
|
||||
func Must(t *Template, err error) *Template {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// ParseFiles creates a new [Template] and parses the template definitions from
|
||||
// the named files. The returned template's name will have the base name and
|
||||
// parsed contents of the first file. There must be at least one file.
|
||||
// If an error occurs, parsing stops and the returned *Template is nil.
|
||||
//
|
||||
// When parsing multiple files with the same name in different directories,
|
||||
// the last one mentioned will be the one that results.
|
||||
// For instance, ParseFiles("a/foo", "b/foo") stores "b/foo" as the template
|
||||
// named "foo", while "a/foo" is unavailable.
|
||||
func ParseFiles(filenames ...string) (*Template, error) {
|
||||
return parseFiles(nil, readFileOS, filenames...)
|
||||
}
|
||||
|
||||
// ParseFiles parses the named files and associates the resulting templates with
|
||||
// t. If an error occurs, parsing stops and the returned template is nil;
|
||||
// otherwise it is t. There must be at least one file.
|
||||
// Since the templates created by ParseFiles are named by the base
|
||||
// (see [filepath.Base]) names of the argument files, t should usually have the
|
||||
// name of one of the (base) names of the files. If it does not, depending on
|
||||
// t's contents before calling ParseFiles, t.Execute may fail. In that
|
||||
// case use t.ExecuteTemplate to execute a valid template.
|
||||
//
|
||||
// When parsing multiple files with the same name in different directories,
|
||||
// the last one mentioned will be the one that results.
|
||||
func (t *Template) ParseFiles(filenames ...string) (*Template, error) {
|
||||
t.init()
|
||||
return parseFiles(t, readFileOS, filenames...)
|
||||
}
|
||||
|
||||
// parseFiles is the helper for the method and function. If the argument
|
||||
// template is nil, it is created from the first file.
|
||||
func parseFiles(t *Template, readFile func(string) (string, []byte, error), filenames ...string) (*Template, error) {
|
||||
if len(filenames) == 0 {
|
||||
// Not really a problem, but be consistent.
|
||||
return nil, fmt.Errorf("template: no files named in call to ParseFiles")
|
||||
}
|
||||
for _, filename := range filenames {
|
||||
name, b, err := readFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := string(b)
|
||||
// First template becomes return value if not already defined,
|
||||
// and we use that one for subsequent New calls to associate
|
||||
// all the templates together. Also, if this file has the same name
|
||||
// as t, this file becomes the contents of t, so
|
||||
// t, err := New(name).Funcs(xxx).ParseFiles(name)
|
||||
// works. Otherwise we create a new template associated with t.
|
||||
var tmpl *Template
|
||||
if t == nil {
|
||||
t = New(name)
|
||||
}
|
||||
if name == t.Name() {
|
||||
tmpl = t
|
||||
} else {
|
||||
tmpl = t.New(name)
|
||||
}
|
||||
_, err = tmpl.Parse(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// ParseGlob creates a new [Template] and parses the template definitions from
|
||||
// the files identified by the pattern. The files are matched according to the
|
||||
// semantics of [filepath.Match], and the pattern must match at least one file.
|
||||
// The returned template will have the [filepath.Base] name and (parsed)
|
||||
// contents of the first file matched by the pattern. ParseGlob is equivalent to
|
||||
// calling [ParseFiles] with the list of files matched by the pattern.
|
||||
//
|
||||
// When parsing multiple files with the same name in different directories,
|
||||
// the last one mentioned will be the one that results.
|
||||
func ParseGlob(pattern string) (*Template, error) {
|
||||
return parseGlob(nil, pattern)
|
||||
}
|
||||
|
||||
// ParseGlob parses the template definitions in the files identified by the
|
||||
// pattern and associates the resulting templates with t. The files are matched
|
||||
// according to the semantics of [filepath.Match], and the pattern must match at
|
||||
// least one file. ParseGlob is equivalent to calling [Template.ParseFiles] with
|
||||
// the list of files matched by the pattern.
|
||||
//
|
||||
// When parsing multiple files with the same name in different directories,
|
||||
// the last one mentioned will be the one that results.
|
||||
func (t *Template) ParseGlob(pattern string) (*Template, error) {
|
||||
t.init()
|
||||
return parseGlob(t, pattern)
|
||||
}
|
||||
|
||||
// parseGlob is the implementation of the function and method ParseGlob.
|
||||
func parseGlob(t *Template, pattern string) (*Template, error) {
|
||||
filenames, err := filepath.Glob(pattern)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(filenames) == 0 {
|
||||
return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern)
|
||||
}
|
||||
return parseFiles(t, readFileOS, filenames...)
|
||||
}
|
||||
|
||||
// ParseFS is like [Template.ParseFiles] or [Template.ParseGlob] but reads from the file system fsys
|
||||
// instead of the host operating system's file system.
|
||||
// It accepts a list of glob patterns (see [path.Match]).
|
||||
// (Note that most file names serve as glob patterns matching only themselves.)
|
||||
func ParseFS(fsys fs.FS, patterns ...string) (*Template, error) {
|
||||
return parseFS(nil, fsys, patterns)
|
||||
}
|
||||
|
||||
// ParseFS is like [Template.ParseFiles] or [Template.ParseGlob] but reads from the file system fsys
|
||||
// instead of the host operating system's file system.
|
||||
// It accepts a list of glob patterns (see [path.Match]).
|
||||
// (Note that most file names serve as glob patterns matching only themselves.)
|
||||
func (t *Template) ParseFS(fsys fs.FS, patterns ...string) (*Template, error) {
|
||||
t.init()
|
||||
return parseFS(t, fsys, patterns)
|
||||
}
|
||||
|
||||
func parseFS(t *Template, fsys fs.FS, patterns []string) (*Template, error) {
|
||||
var filenames []string
|
||||
for _, pattern := range patterns {
|
||||
list, err := fs.Glob(fsys, pattern)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(list) == 0 {
|
||||
return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern)
|
||||
}
|
||||
filenames = append(filenames, list...)
|
||||
}
|
||||
return parseFiles(t, readFileFS(fsys), filenames...)
|
||||
}
|
||||
|
||||
func readFileOS(file string) (name string, b []byte, err error) {
|
||||
name = filepath.Base(file)
|
||||
b, err = os.ReadFile(file)
|
||||
return
|
||||
}
|
||||
|
||||
func readFileFS(fsys fs.FS) func(string) (string, []byte, error) {
|
||||
return func(file string) (name string, b []byte, err error) {
|
||||
name = path.Base(file)
|
||||
b, err = fs.ReadFile(fsys, file)
|
||||
return
|
||||
}
|
||||
}
|
||||
59
src/text/template/link_test.go
Normal file
59
src/text/template/link_test.go
Normal file
@@ -0,0 +1,59 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package template_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"internal/testenv"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Issue 36021: verify that text/template doesn't prevent the linker from removing
|
||||
// unused methods.
|
||||
func TestLinkerGC(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping in short mode")
|
||||
}
|
||||
testenv.MustHaveGoBuild(t)
|
||||
const prog = `package main
|
||||
|
||||
import (
|
||||
_ "text/template"
|
||||
)
|
||||
|
||||
type T struct{}
|
||||
|
||||
func (t *T) Unused() { println("THIS SHOULD BE ELIMINATED") }
|
||||
func (t *T) Used() {}
|
||||
|
||||
var sink *T
|
||||
|
||||
func main() {
|
||||
var t T
|
||||
sink = &t
|
||||
t.Used()
|
||||
}
|
||||
`
|
||||
td := t.TempDir()
|
||||
|
||||
if err := os.WriteFile(filepath.Join(td, "x.go"), []byte(prog), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", "x.exe", "x.go")
|
||||
cmd.Dir = td
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("go build: %v, %s", err, out)
|
||||
}
|
||||
slurp, err := os.ReadFile(filepath.Join(td, "x.exe"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if bytes.Contains(slurp, []byte("THIS SHOULD BE ELIMINATED")) {
|
||||
t.Error("binary contains code that should be deadcode eliminated")
|
||||
}
|
||||
}
|
||||
464
src/text/template/multi_test.go
Normal file
464
src/text/template/multi_test.go
Normal file
@@ -0,0 +1,464 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package template
|
||||
|
||||
// Tests for multiple-template parsing and execution.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"text/template/parse"
|
||||
)
|
||||
|
||||
const (
|
||||
noError = true
|
||||
hasError = false
|
||||
)
|
||||
|
||||
type multiParseTest struct {
|
||||
name string
|
||||
input string
|
||||
ok bool
|
||||
names []string
|
||||
results []string
|
||||
}
|
||||
|
||||
var multiParseTests = []multiParseTest{
|
||||
{"empty", "", noError,
|
||||
nil,
|
||||
nil},
|
||||
{"one", `{{define "foo"}} FOO {{end}}`, noError,
|
||||
[]string{"foo"},
|
||||
[]string{" FOO "}},
|
||||
{"two", `{{define "foo"}} FOO {{end}}{{define "bar"}} BAR {{end}}`, noError,
|
||||
[]string{"foo", "bar"},
|
||||
[]string{" FOO ", " BAR "}},
|
||||
// errors
|
||||
{"missing end", `{{define "foo"}} FOO `, hasError,
|
||||
nil,
|
||||
nil},
|
||||
{"malformed name", `{{define "foo}} FOO `, hasError,
|
||||
nil,
|
||||
nil},
|
||||
}
|
||||
|
||||
func TestMultiParse(t *testing.T) {
|
||||
for _, test := range multiParseTests {
|
||||
template, err := New("root").Parse(test.input)
|
||||
switch {
|
||||
case err == nil && !test.ok:
|
||||
t.Errorf("%q: expected error; got none", test.name)
|
||||
continue
|
||||
case err != nil && test.ok:
|
||||
t.Errorf("%q: unexpected error: %v", test.name, err)
|
||||
continue
|
||||
case err != nil && !test.ok:
|
||||
// expected error, got one
|
||||
if *debug {
|
||||
fmt.Printf("%s: %s\n\t%s\n", test.name, test.input, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if template == nil {
|
||||
continue
|
||||
}
|
||||
if len(template.tmpl) != len(test.names)+1 { // +1 for root
|
||||
t.Errorf("%s: wrong number of templates; wanted %d got %d", test.name, len(test.names), len(template.tmpl))
|
||||
continue
|
||||
}
|
||||
for i, name := range test.names {
|
||||
tmpl, ok := template.tmpl[name]
|
||||
if !ok {
|
||||
t.Errorf("%s: can't find template %q", test.name, name)
|
||||
continue
|
||||
}
|
||||
result := tmpl.Root.String()
|
||||
if result != test.results[i] {
|
||||
t.Errorf("%s=(%q): got\n\t%v\nexpected\n\t%v", test.name, test.input, result, test.results[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var multiExecTests = []execTest{
|
||||
{"empty", "", "", nil, true},
|
||||
{"text", "some text", "some text", nil, true},
|
||||
{"invoke x", `{{template "x" .SI}}`, "TEXT", tVal, true},
|
||||
{"invoke x no args", `{{template "x"}}`, "TEXT", tVal, true},
|
||||
{"invoke dot int", `{{template "dot" .I}}`, "17", tVal, true},
|
||||
{"invoke dot []int", `{{template "dot" .SI}}`, "[3 4 5]", tVal, true},
|
||||
{"invoke dotV", `{{template "dotV" .U}}`, "v", tVal, true},
|
||||
{"invoke nested int", `{{template "nested" .I}}`, "17", tVal, true},
|
||||
{"variable declared by template", `{{template "nested" $x:=.SI}},{{index $x 1}}`, "[3 4 5],4", tVal, true},
|
||||
|
||||
// User-defined function: test argument evaluator.
|
||||
{"testFunc literal", `{{oneArg "joe"}}`, "oneArg=joe", tVal, true},
|
||||
{"testFunc .", `{{oneArg .}}`, "oneArg=joe", "joe", true},
|
||||
}
|
||||
|
||||
// These strings are also in testdata/*.
|
||||
const multiText1 = `
|
||||
{{define "x"}}TEXT{{end}}
|
||||
{{define "dotV"}}{{.V}}{{end}}
|
||||
`
|
||||
|
||||
const multiText2 = `
|
||||
{{define "dot"}}{{.}}{{end}}
|
||||
{{define "nested"}}{{template "dot" .}}{{end}}
|
||||
`
|
||||
|
||||
func TestMultiExecute(t *testing.T) {
|
||||
// Declare a couple of templates first.
|
||||
template, err := New("root").Parse(multiText1)
|
||||
if err != nil {
|
||||
t.Fatalf("parse error for 1: %s", err)
|
||||
}
|
||||
_, err = template.Parse(multiText2)
|
||||
if err != nil {
|
||||
t.Fatalf("parse error for 2: %s", err)
|
||||
}
|
||||
testExecute(multiExecTests, template, t)
|
||||
}
|
||||
|
||||
func TestParseFiles(t *testing.T) {
|
||||
_, err := ParseFiles("DOES NOT EXIST")
|
||||
if err == nil {
|
||||
t.Error("expected error for non-existent file; got none")
|
||||
}
|
||||
template := New("root")
|
||||
_, err = template.ParseFiles("testdata/file1.tmpl", "testdata/file2.tmpl")
|
||||
if err != nil {
|
||||
t.Fatalf("error parsing files: %v", err)
|
||||
}
|
||||
testExecute(multiExecTests, template, t)
|
||||
}
|
||||
|
||||
func TestParseGlob(t *testing.T) {
|
||||
_, err := ParseGlob("DOES NOT EXIST")
|
||||
if err == nil {
|
||||
t.Error("expected error for non-existent file; got none")
|
||||
}
|
||||
_, err = New("error").ParseGlob("[x")
|
||||
if err == nil {
|
||||
t.Error("expected error for bad pattern; got none")
|
||||
}
|
||||
template := New("root")
|
||||
_, err = template.ParseGlob("testdata/file*.tmpl")
|
||||
if err != nil {
|
||||
t.Fatalf("error parsing files: %v", err)
|
||||
}
|
||||
testExecute(multiExecTests, template, t)
|
||||
}
|
||||
|
||||
func TestParseFS(t *testing.T) {
|
||||
fs := os.DirFS("testdata")
|
||||
|
||||
{
|
||||
_, err := ParseFS(fs, "DOES NOT EXIST")
|
||||
if err == nil {
|
||||
t.Error("expected error for non-existent file; got none")
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
template := New("root")
|
||||
_, err := template.ParseFS(fs, "file1.tmpl", "file2.tmpl")
|
||||
if err != nil {
|
||||
t.Fatalf("error parsing files: %v", err)
|
||||
}
|
||||
testExecute(multiExecTests, template, t)
|
||||
}
|
||||
|
||||
{
|
||||
template := New("root")
|
||||
_, err := template.ParseFS(fs, "file*.tmpl")
|
||||
if err != nil {
|
||||
t.Fatalf("error parsing files: %v", err)
|
||||
}
|
||||
testExecute(multiExecTests, template, t)
|
||||
}
|
||||
}
|
||||
|
||||
// In these tests, actual content (not just template definitions) comes from the parsed files.
|
||||
|
||||
var templateFileExecTests = []execTest{
|
||||
{"test", `{{template "tmpl1.tmpl"}}{{template "tmpl2.tmpl"}}`, "template1\n\ny\ntemplate2\n\nx\n", 0, true},
|
||||
}
|
||||
|
||||
func TestParseFilesWithData(t *testing.T) {
|
||||
template, err := New("root").ParseFiles("testdata/tmpl1.tmpl", "testdata/tmpl2.tmpl")
|
||||
if err != nil {
|
||||
t.Fatalf("error parsing files: %v", err)
|
||||
}
|
||||
testExecute(templateFileExecTests, template, t)
|
||||
}
|
||||
|
||||
func TestParseGlobWithData(t *testing.T) {
|
||||
template, err := New("root").ParseGlob("testdata/tmpl*.tmpl")
|
||||
if err != nil {
|
||||
t.Fatalf("error parsing files: %v", err)
|
||||
}
|
||||
testExecute(templateFileExecTests, template, t)
|
||||
}
|
||||
|
||||
const (
|
||||
cloneText1 = `{{define "a"}}{{template "b"}}{{template "c"}}{{end}}`
|
||||
cloneText2 = `{{define "b"}}b{{end}}`
|
||||
cloneText3 = `{{define "c"}}root{{end}}`
|
||||
cloneText4 = `{{define "c"}}clone{{end}}`
|
||||
)
|
||||
|
||||
func TestClone(t *testing.T) {
|
||||
// Create some templates and clone the root.
|
||||
root, err := New("root").Parse(cloneText1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = root.Parse(cloneText2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
clone := Must(root.Clone())
|
||||
// Add variants to both.
|
||||
_, err = root.Parse(cloneText3)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = clone.Parse(cloneText4)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Verify that the clone is self-consistent.
|
||||
for k, v := range clone.tmpl {
|
||||
if k == clone.name && v.tmpl[k] != clone {
|
||||
t.Error("clone does not contain root")
|
||||
}
|
||||
if v != v.tmpl[v.name] {
|
||||
t.Errorf("clone does not contain self for %q", k)
|
||||
}
|
||||
}
|
||||
// Execute root.
|
||||
var b strings.Builder
|
||||
err = root.ExecuteTemplate(&b, "a", 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if b.String() != "broot" {
|
||||
t.Errorf("expected %q got %q", "broot", b.String())
|
||||
}
|
||||
// Execute copy.
|
||||
b.Reset()
|
||||
err = clone.ExecuteTemplate(&b, "a", 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if b.String() != "bclone" {
|
||||
t.Errorf("expected %q got %q", "bclone", b.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddParseTree(t *testing.T) {
|
||||
// Create some templates.
|
||||
root, err := New("root").Parse(cloneText1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = root.Parse(cloneText2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Add a new parse tree.
|
||||
tree, err := parse.Parse("cloneText3", cloneText3, "", "", nil, builtins())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
added, err := root.AddParseTree("c", tree["c"])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Execute.
|
||||
var b strings.Builder
|
||||
err = added.ExecuteTemplate(&b, "a", 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if b.String() != "broot" {
|
||||
t.Errorf("expected %q got %q", "broot", b.String())
|
||||
}
|
||||
}
|
||||
|
||||
// Issue 7032
|
||||
func TestAddParseTreeToUnparsedTemplate(t *testing.T) {
|
||||
master := "{{define \"master\"}}{{end}}"
|
||||
tmpl := New("master")
|
||||
tree, err := parse.Parse("master", master, "", "", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected parse err: %v", err)
|
||||
}
|
||||
masterTree := tree["master"]
|
||||
tmpl.AddParseTree("master", masterTree) // used to panic
|
||||
}
|
||||
|
||||
func TestRedefinition(t *testing.T) {
|
||||
var tmpl *Template
|
||||
var err error
|
||||
if tmpl, err = New("tmpl1").Parse(`{{define "test"}}foo{{end}}`); err != nil {
|
||||
t.Fatalf("parse 1: %v", err)
|
||||
}
|
||||
if _, err = tmpl.Parse(`{{define "test"}}bar{{end}}`); err != nil {
|
||||
t.Fatalf("got error %v, expected nil", err)
|
||||
}
|
||||
if _, err = tmpl.New("tmpl2").Parse(`{{define "test"}}bar{{end}}`); err != nil {
|
||||
t.Fatalf("got error %v, expected nil", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Issue 10879
|
||||
func TestEmptyTemplateCloneCrash(t *testing.T) {
|
||||
t1 := New("base")
|
||||
t1.Clone() // used to panic
|
||||
}
|
||||
|
||||
// Issue 10910, 10926
|
||||
func TestTemplateLookUp(t *testing.T) {
|
||||
t1 := New("foo")
|
||||
if t1.Lookup("foo") != nil {
|
||||
t.Error("Lookup returned non-nil value for undefined template foo")
|
||||
}
|
||||
t1.New("bar")
|
||||
if t1.Lookup("bar") != nil {
|
||||
t.Error("Lookup returned non-nil value for undefined template bar")
|
||||
}
|
||||
t1.Parse(`{{define "foo"}}test{{end}}`)
|
||||
if t1.Lookup("foo") == nil {
|
||||
t.Error("Lookup returned nil value for defined template")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
// template with same name already exists
|
||||
t1, _ := New("test").Parse(`{{define "test"}}foo{{end}}`)
|
||||
t2 := t1.New("test")
|
||||
|
||||
if t1.common != t2.common {
|
||||
t.Errorf("t1 & t2 didn't share common struct; got %v != %v", t1.common, t2.common)
|
||||
}
|
||||
if t1.Tree == nil {
|
||||
t.Error("defined template got nil Tree")
|
||||
}
|
||||
if t2.Tree != nil {
|
||||
t.Error("undefined template got non-nil Tree")
|
||||
}
|
||||
|
||||
containsT1 := false
|
||||
for _, tmpl := range t1.Templates() {
|
||||
if tmpl == t2 {
|
||||
t.Error("Templates included undefined template")
|
||||
}
|
||||
if tmpl == t1 {
|
||||
containsT1 = true
|
||||
}
|
||||
}
|
||||
if !containsT1 {
|
||||
t.Error("Templates didn't include defined template")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
// In multiple calls to Parse with the same receiver template, only one call
|
||||
// can contain text other than space, comments, and template definitions
|
||||
t1 := New("test")
|
||||
if _, err := t1.Parse(`{{define "test"}}{{end}}`); err != nil {
|
||||
t.Fatalf("parsing test: %s", err)
|
||||
}
|
||||
if _, err := t1.Parse(`{{define "test"}}{{/* this is a comment */}}{{end}}`); err != nil {
|
||||
t.Fatalf("parsing test: %s", err)
|
||||
}
|
||||
if _, err := t1.Parse(`{{define "test"}}foo{{end}}`); err != nil {
|
||||
t.Fatalf("parsing test: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmptyTemplate(t *testing.T) {
|
||||
cases := []struct {
|
||||
defn []string
|
||||
in string
|
||||
want string
|
||||
}{
|
||||
{[]string{"x", "y"}, "", "y"},
|
||||
{[]string{""}, "once", ""},
|
||||
{[]string{"", ""}, "twice", ""},
|
||||
{[]string{"{{.}}", "{{.}}"}, "twice", "twice"},
|
||||
{[]string{"{{/* a comment */}}", "{{/* a comment */}}"}, "comment", ""},
|
||||
{[]string{"{{.}}", ""}, "twice", ""},
|
||||
}
|
||||
|
||||
for i, c := range cases {
|
||||
root := New("root")
|
||||
|
||||
var (
|
||||
m *Template
|
||||
err error
|
||||
)
|
||||
for _, d := range c.defn {
|
||||
m, err = root.New(c.in).Parse(d)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
buf := &strings.Builder{}
|
||||
if err := m.Execute(buf, c.in); err != nil {
|
||||
t.Error(i, err)
|
||||
continue
|
||||
}
|
||||
if buf.String() != c.want {
|
||||
t.Errorf("expected string %q: got %q", c.want, buf.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Issue 19249 was a regression in 1.8 caused by the handling of empty
|
||||
// templates added in that release, which got different answers depending
|
||||
// on the order templates appeared in the internal map.
|
||||
func TestIssue19294(t *testing.T) {
|
||||
// The empty block in "xhtml" should be replaced during execution
|
||||
// by the contents of "stylesheet", but if the internal map associating
|
||||
// names with templates is built in the wrong order, the empty block
|
||||
// looks non-empty and this doesn't happen.
|
||||
var inlined = map[string]string{
|
||||
"stylesheet": `{{define "stylesheet"}}stylesheet{{end}}`,
|
||||
"xhtml": `{{block "stylesheet" .}}{{end}}`,
|
||||
}
|
||||
all := []string{"stylesheet", "xhtml"}
|
||||
for i := 0; i < 100; i++ {
|
||||
res, err := New("title.xhtml").Parse(`{{template "xhtml" .}}`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, name := range all {
|
||||
_, err := res.New(name).Parse(inlined[name])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
var buf strings.Builder
|
||||
res.Execute(&buf, 0)
|
||||
if buf.String() != "stylesheet" {
|
||||
t.Fatalf("iteration %d: got %q; expected %q", i, buf.String(), "stylesheet")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Issue 48436
|
||||
func TestAddToZeroTemplate(t *testing.T) {
|
||||
tree, err := parse.Parse("c", cloneText3, "", "", nil, builtins())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var tmpl Template
|
||||
tmpl.AddParseTree("x", tree["c"])
|
||||
}
|
||||
72
src/text/template/option.go
Normal file
72
src/text/template/option.go
Normal file
@@ -0,0 +1,72 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file contains the code to handle template options.
|
||||
|
||||
package template
|
||||
|
||||
import "strings"
|
||||
|
||||
// missingKeyAction defines how to respond to indexing a map with a key that is not present.
|
||||
type missingKeyAction int
|
||||
|
||||
const (
|
||||
mapInvalid missingKeyAction = iota // Return an invalid reflect.Value.
|
||||
mapZeroValue // Return the zero value for the map element.
|
||||
mapError // Error out
|
||||
)
|
||||
|
||||
type option struct {
|
||||
missingKey missingKeyAction
|
||||
}
|
||||
|
||||
// Option sets options for the template. Options are described by
|
||||
// strings, either a simple string or "key=value". There can be at
|
||||
// most one equals sign in an option string. If the option string
|
||||
// is unrecognized or otherwise invalid, Option panics.
|
||||
//
|
||||
// Known options:
|
||||
//
|
||||
// missingkey: Control the behavior during execution if a map is
|
||||
// indexed with a key that is not present in the map.
|
||||
//
|
||||
// "missingkey=default" or "missingkey=invalid"
|
||||
// The default behavior: Do nothing and continue execution.
|
||||
// If printed, the result of the index operation is the string
|
||||
// "<no value>".
|
||||
// "missingkey=zero"
|
||||
// The operation returns the zero value for the map type's element.
|
||||
// "missingkey=error"
|
||||
// Execution stops immediately with an error.
|
||||
func (t *Template) Option(opt ...string) *Template {
|
||||
t.init()
|
||||
for _, s := range opt {
|
||||
t.setOption(s)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *Template) setOption(opt string) {
|
||||
if opt == "" {
|
||||
panic("empty option string")
|
||||
}
|
||||
// key=value
|
||||
if key, value, ok := strings.Cut(opt, "="); ok {
|
||||
switch key {
|
||||
case "missingkey":
|
||||
switch value {
|
||||
case "invalid", "default":
|
||||
t.option.missingKey = mapInvalid
|
||||
return
|
||||
case "zero":
|
||||
t.option.missingKey = mapZeroValue
|
||||
return
|
||||
case "error":
|
||||
t.option.missingKey = mapError
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
panic("unrecognized option: " + opt)
|
||||
}
|
||||
686
src/text/template/parse/lex.go
Normal file
686
src/text/template/parse/lex.go
Normal file
@@ -0,0 +1,686 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package parse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// item represents a token or text string returned from the scanner.
|
||||
type item struct {
|
||||
typ itemType // The type of this item.
|
||||
pos Pos // The starting position, in bytes, of this item in the input string.
|
||||
val string // The value of this item.
|
||||
line int // The line number at the start of this item.
|
||||
}
|
||||
|
||||
func (i item) String() string {
|
||||
switch {
|
||||
case i.typ == itemEOF:
|
||||
return "EOF"
|
||||
case i.typ == itemError:
|
||||
return i.val
|
||||
case i.typ > itemKeyword:
|
||||
return fmt.Sprintf("<%s>", i.val)
|
||||
case len(i.val) > 10:
|
||||
return fmt.Sprintf("%.10q...", i.val)
|
||||
}
|
||||
return fmt.Sprintf("%q", i.val)
|
||||
}
|
||||
|
||||
// itemType identifies the type of lex items.
|
||||
type itemType int
|
||||
|
||||
const (
|
||||
itemError itemType = iota // error occurred; value is text of error
|
||||
itemBool // boolean constant
|
||||
itemChar // printable ASCII character; grab bag for comma etc.
|
||||
itemCharConstant // character constant
|
||||
itemComment // comment text
|
||||
itemComplex // complex constant (1+2i); imaginary is just a number
|
||||
itemAssign // equals ('=') introducing an assignment
|
||||
itemDeclare // colon-equals (':=') introducing a declaration
|
||||
itemEOF
|
||||
itemField // alphanumeric identifier starting with '.'
|
||||
itemIdentifier // alphanumeric identifier not starting with '.'
|
||||
itemLeftDelim // left action delimiter
|
||||
itemLeftParen // '(' inside action
|
||||
itemNumber // simple number, including imaginary
|
||||
itemPipe // pipe symbol
|
||||
itemRawString // raw quoted string (includes quotes)
|
||||
itemRightDelim // right action delimiter
|
||||
itemRightParen // ')' inside action
|
||||
itemSpace // run of spaces separating arguments
|
||||
itemString // quoted string (includes quotes)
|
||||
itemText // plain text
|
||||
itemVariable // variable starting with '$', such as '$' or '$1' or '$hello'
|
||||
// Keywords appear after all the rest.
|
||||
itemKeyword // used only to delimit the keywords
|
||||
itemBlock // block keyword
|
||||
itemBreak // break keyword
|
||||
itemContinue // continue keyword
|
||||
itemDot // the cursor, spelled '.'
|
||||
itemDefine // define keyword
|
||||
itemElse // else keyword
|
||||
itemEnd // end keyword
|
||||
itemIf // if keyword
|
||||
itemNil // the untyped nil constant, easiest to treat as a keyword
|
||||
itemRange // range keyword
|
||||
itemTemplate // template keyword
|
||||
itemWith // with keyword
|
||||
)
|
||||
|
||||
var key = map[string]itemType{
|
||||
".": itemDot,
|
||||
"block": itemBlock,
|
||||
"break": itemBreak,
|
||||
"continue": itemContinue,
|
||||
"define": itemDefine,
|
||||
"else": itemElse,
|
||||
"end": itemEnd,
|
||||
"if": itemIf,
|
||||
"range": itemRange,
|
||||
"nil": itemNil,
|
||||
"template": itemTemplate,
|
||||
"with": itemWith,
|
||||
}
|
||||
|
||||
const eof = -1
|
||||
|
||||
// Trimming spaces.
|
||||
// If the action begins "{{- " rather than "{{", then all space/tab/newlines
|
||||
// preceding the action are trimmed; conversely if it ends " -}}" the
|
||||
// leading spaces are trimmed. This is done entirely in the lexer; the
|
||||
// parser never sees it happen. We require an ASCII space (' ', \t, \r, \n)
|
||||
// to be present to avoid ambiguity with things like "{{-3}}". It reads
|
||||
// better with the space present anyway. For simplicity, only ASCII
|
||||
// does the job.
|
||||
const (
|
||||
spaceChars = " \t\r\n" // These are the space characters defined by Go itself.
|
||||
trimMarker = '-' // Attached to left/right delimiter, trims trailing spaces from preceding/following text.
|
||||
trimMarkerLen = Pos(1 + 1) // marker plus space before or after
|
||||
)
|
||||
|
||||
// stateFn represents the state of the scanner as a function that returns the next state.
|
||||
type stateFn func(*lexer) stateFn
|
||||
|
||||
// lexer holds the state of the scanner.
|
||||
type lexer struct {
|
||||
name string // the name of the input; used only for error reports
|
||||
input string // the string being scanned
|
||||
leftDelim string // start of action marker
|
||||
rightDelim string // end of action marker
|
||||
pos Pos // current position in the input
|
||||
start Pos // start position of this item
|
||||
atEOF bool // we have hit the end of input and returned eof
|
||||
parenDepth int // nesting depth of ( ) exprs
|
||||
line int // 1+number of newlines seen
|
||||
startLine int // start line of this item
|
||||
item item // item to return to parser
|
||||
insideAction bool // are we inside an action?
|
||||
options lexOptions
|
||||
}
|
||||
|
||||
// lexOptions control behavior of the lexer. All default to false.
|
||||
type lexOptions struct {
|
||||
emitComment bool // emit itemComment tokens.
|
||||
breakOK bool // break keyword allowed
|
||||
continueOK bool // continue keyword allowed
|
||||
}
|
||||
|
||||
// next returns the next rune in the input.
|
||||
func (l *lexer) next() rune {
|
||||
if int(l.pos) >= len(l.input) {
|
||||
l.atEOF = true
|
||||
return eof
|
||||
}
|
||||
r, w := utf8.DecodeRuneInString(l.input[l.pos:])
|
||||
l.pos += Pos(w)
|
||||
if r == '\n' {
|
||||
l.line++
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next rune in the input.
|
||||
func (l *lexer) peek() rune {
|
||||
r := l.next()
|
||||
l.backup()
|
||||
return r
|
||||
}
|
||||
|
||||
// backup steps back one rune.
|
||||
func (l *lexer) backup() {
|
||||
if !l.atEOF && l.pos > 0 {
|
||||
r, w := utf8.DecodeLastRuneInString(l.input[:l.pos])
|
||||
l.pos -= Pos(w)
|
||||
// Correct newline count.
|
||||
if r == '\n' {
|
||||
l.line--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// thisItem returns the item at the current input point with the specified type
|
||||
// and advances the input.
|
||||
func (l *lexer) thisItem(t itemType) item {
|
||||
i := item{t, l.start, l.input[l.start:l.pos], l.startLine}
|
||||
l.start = l.pos
|
||||
l.startLine = l.line
|
||||
return i
|
||||
}
|
||||
|
||||
// emit passes the trailing text as an item back to the parser.
|
||||
func (l *lexer) emit(t itemType) stateFn {
|
||||
return l.emitItem(l.thisItem(t))
|
||||
}
|
||||
|
||||
// emitItem passes the specified item to the parser.
|
||||
func (l *lexer) emitItem(i item) stateFn {
|
||||
l.item = i
|
||||
return nil
|
||||
}
|
||||
|
||||
// ignore skips over the pending input before this point.
|
||||
// It tracks newlines in the ignored text, so use it only
|
||||
// for text that is skipped without calling l.next.
|
||||
func (l *lexer) ignore() {
|
||||
l.line += strings.Count(l.input[l.start:l.pos], "\n")
|
||||
l.start = l.pos
|
||||
l.startLine = l.line
|
||||
}
|
||||
|
||||
// accept consumes the next rune if it's from the valid set.
|
||||
func (l *lexer) accept(valid string) bool {
|
||||
if strings.ContainsRune(valid, l.next()) {
|
||||
return true
|
||||
}
|
||||
l.backup()
|
||||
return false
|
||||
}
|
||||
|
||||
// acceptRun consumes a run of runes from the valid set.
|
||||
func (l *lexer) acceptRun(valid string) {
|
||||
for strings.ContainsRune(valid, l.next()) {
|
||||
}
|
||||
l.backup()
|
||||
}
|
||||
|
||||
// errorf returns an error token and terminates the scan by passing
|
||||
// back a nil pointer that will be the next state, terminating l.nextItem.
|
||||
func (l *lexer) errorf(format string, args ...any) stateFn {
|
||||
l.item = item{itemError, l.start, fmt.Sprintf(format, args...), l.startLine}
|
||||
l.start = 0
|
||||
l.pos = 0
|
||||
l.input = l.input[:0]
|
||||
return nil
|
||||
}
|
||||
|
||||
// nextItem returns the next item from the input.
|
||||
// Called by the parser, not in the lexing goroutine.
|
||||
func (l *lexer) nextItem() item {
|
||||
l.item = item{itemEOF, l.pos, "EOF", l.startLine}
|
||||
state := lexText
|
||||
if l.insideAction {
|
||||
state = lexInsideAction
|
||||
}
|
||||
for {
|
||||
state = state(l)
|
||||
if state == nil {
|
||||
return l.item
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// lex creates a new scanner for the input string.
|
||||
func lex(name, input, left, right string) *lexer {
|
||||
if left == "" {
|
||||
left = leftDelim
|
||||
}
|
||||
if right == "" {
|
||||
right = rightDelim
|
||||
}
|
||||
l := &lexer{
|
||||
name: name,
|
||||
input: input,
|
||||
leftDelim: left,
|
||||
rightDelim: right,
|
||||
line: 1,
|
||||
startLine: 1,
|
||||
insideAction: false,
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// state functions
|
||||
|
||||
const (
|
||||
leftDelim = "{{"
|
||||
rightDelim = "}}"
|
||||
leftComment = "/*"
|
||||
rightComment = "*/"
|
||||
)
|
||||
|
||||
// lexText scans until an opening action delimiter, "{{".
|
||||
func lexText(l *lexer) stateFn {
|
||||
if x := strings.Index(l.input[l.pos:], l.leftDelim); x >= 0 {
|
||||
if x > 0 {
|
||||
l.pos += Pos(x)
|
||||
// Do we trim any trailing space?
|
||||
trimLength := Pos(0)
|
||||
delimEnd := l.pos + Pos(len(l.leftDelim))
|
||||
if hasLeftTrimMarker(l.input[delimEnd:]) {
|
||||
trimLength = rightTrimLength(l.input[l.start:l.pos])
|
||||
}
|
||||
l.pos -= trimLength
|
||||
l.line += strings.Count(l.input[l.start:l.pos], "\n")
|
||||
i := l.thisItem(itemText)
|
||||
l.pos += trimLength
|
||||
l.ignore()
|
||||
if len(i.val) > 0 {
|
||||
return l.emitItem(i)
|
||||
}
|
||||
}
|
||||
return lexLeftDelim
|
||||
}
|
||||
l.pos = Pos(len(l.input))
|
||||
// Correctly reached EOF.
|
||||
if l.pos > l.start {
|
||||
l.line += strings.Count(l.input[l.start:l.pos], "\n")
|
||||
return l.emit(itemText)
|
||||
}
|
||||
return l.emit(itemEOF)
|
||||
}
|
||||
|
||||
// rightTrimLength returns the length of the spaces at the end of the string.
|
||||
func rightTrimLength(s string) Pos {
|
||||
return Pos(len(s) - len(strings.TrimRight(s, spaceChars)))
|
||||
}
|
||||
|
||||
// atRightDelim reports whether the lexer is at a right delimiter, possibly preceded by a trim marker.
|
||||
func (l *lexer) atRightDelim() (delim, trimSpaces bool) {
|
||||
if hasRightTrimMarker(l.input[l.pos:]) && strings.HasPrefix(l.input[l.pos+trimMarkerLen:], l.rightDelim) { // With trim marker.
|
||||
return true, true
|
||||
}
|
||||
if strings.HasPrefix(l.input[l.pos:], l.rightDelim) { // Without trim marker.
|
||||
return true, false
|
||||
}
|
||||
return false, false
|
||||
}
|
||||
|
||||
// leftTrimLength returns the length of the spaces at the beginning of the string.
|
||||
func leftTrimLength(s string) Pos {
|
||||
return Pos(len(s) - len(strings.TrimLeft(s, spaceChars)))
|
||||
}
|
||||
|
||||
// lexLeftDelim scans the left delimiter, which is known to be present, possibly with a trim marker.
|
||||
// (The text to be trimmed has already been emitted.)
|
||||
func lexLeftDelim(l *lexer) stateFn {
|
||||
l.pos += Pos(len(l.leftDelim))
|
||||
trimSpace := hasLeftTrimMarker(l.input[l.pos:])
|
||||
afterMarker := Pos(0)
|
||||
if trimSpace {
|
||||
afterMarker = trimMarkerLen
|
||||
}
|
||||
if strings.HasPrefix(l.input[l.pos+afterMarker:], leftComment) {
|
||||
l.pos += afterMarker
|
||||
l.ignore()
|
||||
return lexComment
|
||||
}
|
||||
i := l.thisItem(itemLeftDelim)
|
||||
l.insideAction = true
|
||||
l.pos += afterMarker
|
||||
l.ignore()
|
||||
l.parenDepth = 0
|
||||
return l.emitItem(i)
|
||||
}
|
||||
|
||||
// lexComment scans a comment. The left comment marker is known to be present.
|
||||
func lexComment(l *lexer) stateFn {
|
||||
l.pos += Pos(len(leftComment))
|
||||
x := strings.Index(l.input[l.pos:], rightComment)
|
||||
if x < 0 {
|
||||
return l.errorf("unclosed comment")
|
||||
}
|
||||
l.pos += Pos(x + len(rightComment))
|
||||
delim, trimSpace := l.atRightDelim()
|
||||
if !delim {
|
||||
return l.errorf("comment ends before closing delimiter")
|
||||
}
|
||||
i := l.thisItem(itemComment)
|
||||
if trimSpace {
|
||||
l.pos += trimMarkerLen
|
||||
}
|
||||
l.pos += Pos(len(l.rightDelim))
|
||||
if trimSpace {
|
||||
l.pos += leftTrimLength(l.input[l.pos:])
|
||||
}
|
||||
l.ignore()
|
||||
if l.options.emitComment {
|
||||
return l.emitItem(i)
|
||||
}
|
||||
return lexText
|
||||
}
|
||||
|
||||
// lexRightDelim scans the right delimiter, which is known to be present, possibly with a trim marker.
|
||||
func lexRightDelim(l *lexer) stateFn {
|
||||
_, trimSpace := l.atRightDelim()
|
||||
if trimSpace {
|
||||
l.pos += trimMarkerLen
|
||||
l.ignore()
|
||||
}
|
||||
l.pos += Pos(len(l.rightDelim))
|
||||
i := l.thisItem(itemRightDelim)
|
||||
if trimSpace {
|
||||
l.pos += leftTrimLength(l.input[l.pos:])
|
||||
l.ignore()
|
||||
}
|
||||
l.insideAction = false
|
||||
return l.emitItem(i)
|
||||
}
|
||||
|
||||
// lexInsideAction scans the elements inside action delimiters.
|
||||
func lexInsideAction(l *lexer) stateFn {
|
||||
// Either number, quoted string, or identifier.
|
||||
// Spaces separate arguments; runs of spaces turn into itemSpace.
|
||||
// Pipe symbols separate and are emitted.
|
||||
delim, _ := l.atRightDelim()
|
||||
if delim {
|
||||
if l.parenDepth == 0 {
|
||||
return lexRightDelim
|
||||
}
|
||||
return l.errorf("unclosed left paren")
|
||||
}
|
||||
switch r := l.next(); {
|
||||
case r == eof:
|
||||
return l.errorf("unclosed action")
|
||||
case isSpace(r):
|
||||
l.backup() // Put space back in case we have " -}}".
|
||||
return lexSpace
|
||||
case r == '=':
|
||||
return l.emit(itemAssign)
|
||||
case r == ':':
|
||||
if l.next() != '=' {
|
||||
return l.errorf("expected :=")
|
||||
}
|
||||
return l.emit(itemDeclare)
|
||||
case r == '|':
|
||||
return l.emit(itemPipe)
|
||||
case r == '"':
|
||||
return lexQuote
|
||||
case r == '`':
|
||||
return lexRawQuote
|
||||
case r == '$':
|
||||
return lexVariable
|
||||
case r == '\'':
|
||||
return lexChar
|
||||
case r == '.':
|
||||
// special look-ahead for ".field" so we don't break l.backup().
|
||||
if l.pos < Pos(len(l.input)) {
|
||||
r := l.input[l.pos]
|
||||
if r < '0' || '9' < r {
|
||||
return lexField
|
||||
}
|
||||
}
|
||||
fallthrough // '.' can start a number.
|
||||
case r == '+' || r == '-' || ('0' <= r && r <= '9'):
|
||||
l.backup()
|
||||
return lexNumber
|
||||
case isAlphaNumeric(r):
|
||||
l.backup()
|
||||
return lexIdentifier
|
||||
case r == '(':
|
||||
l.parenDepth++
|
||||
return l.emit(itemLeftParen)
|
||||
case r == ')':
|
||||
l.parenDepth--
|
||||
if l.parenDepth < 0 {
|
||||
return l.errorf("unexpected right paren")
|
||||
}
|
||||
return l.emit(itemRightParen)
|
||||
case r <= unicode.MaxASCII && unicode.IsPrint(r):
|
||||
return l.emit(itemChar)
|
||||
default:
|
||||
return l.errorf("unrecognized character in action: %#U", r)
|
||||
}
|
||||
}
|
||||
|
||||
// lexSpace scans a run of space characters.
|
||||
// We have not consumed the first space, which is known to be present.
|
||||
// Take care if there is a trim-marked right delimiter, which starts with a space.
|
||||
func lexSpace(l *lexer) stateFn {
|
||||
var r rune
|
||||
var numSpaces int
|
||||
for {
|
||||
r = l.peek()
|
||||
if !isSpace(r) {
|
||||
break
|
||||
}
|
||||
l.next()
|
||||
numSpaces++
|
||||
}
|
||||
// Be careful about a trim-marked closing delimiter, which has a minus
|
||||
// after a space. We know there is a space, so check for the '-' that might follow.
|
||||
if hasRightTrimMarker(l.input[l.pos-1:]) && strings.HasPrefix(l.input[l.pos-1+trimMarkerLen:], l.rightDelim) {
|
||||
l.backup() // Before the space.
|
||||
if numSpaces == 1 {
|
||||
return lexRightDelim // On the delim, so go right to that.
|
||||
}
|
||||
}
|
||||
return l.emit(itemSpace)
|
||||
}
|
||||
|
||||
// lexIdentifier scans an alphanumeric.
|
||||
func lexIdentifier(l *lexer) stateFn {
|
||||
for {
|
||||
switch r := l.next(); {
|
||||
case isAlphaNumeric(r):
|
||||
// absorb.
|
||||
default:
|
||||
l.backup()
|
||||
word := l.input[l.start:l.pos]
|
||||
if !l.atTerminator() {
|
||||
return l.errorf("bad character %#U", r)
|
||||
}
|
||||
switch {
|
||||
case key[word] > itemKeyword:
|
||||
item := key[word]
|
||||
if item == itemBreak && !l.options.breakOK || item == itemContinue && !l.options.continueOK {
|
||||
return l.emit(itemIdentifier)
|
||||
}
|
||||
return l.emit(item)
|
||||
case word[0] == '.':
|
||||
return l.emit(itemField)
|
||||
case word == "true", word == "false":
|
||||
return l.emit(itemBool)
|
||||
default:
|
||||
return l.emit(itemIdentifier)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// lexField scans a field: .Alphanumeric.
|
||||
// The . has been scanned.
|
||||
func lexField(l *lexer) stateFn {
|
||||
return lexFieldOrVariable(l, itemField)
|
||||
}
|
||||
|
||||
// lexVariable scans a Variable: $Alphanumeric.
|
||||
// The $ has been scanned.
|
||||
func lexVariable(l *lexer) stateFn {
|
||||
if l.atTerminator() { // Nothing interesting follows -> "$".
|
||||
return l.emit(itemVariable)
|
||||
}
|
||||
return lexFieldOrVariable(l, itemVariable)
|
||||
}
|
||||
|
||||
// lexFieldOrVariable scans a field or variable: [.$]Alphanumeric.
|
||||
// The . or $ has been scanned.
|
||||
func lexFieldOrVariable(l *lexer, typ itemType) stateFn {
|
||||
if l.atTerminator() { // Nothing interesting follows -> "." or "$".
|
||||
if typ == itemVariable {
|
||||
return l.emit(itemVariable)
|
||||
}
|
||||
return l.emit(itemDot)
|
||||
}
|
||||
var r rune
|
||||
for {
|
||||
r = l.next()
|
||||
if !isAlphaNumeric(r) {
|
||||
l.backup()
|
||||
break
|
||||
}
|
||||
}
|
||||
if !l.atTerminator() {
|
||||
return l.errorf("bad character %#U", r)
|
||||
}
|
||||
return l.emit(typ)
|
||||
}
|
||||
|
||||
// atTerminator reports whether the input is at valid termination character to
|
||||
// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases
|
||||
// like "$x+2" not being acceptable without a space, in case we decide one
|
||||
// day to implement arithmetic.
|
||||
func (l *lexer) atTerminator() bool {
|
||||
r := l.peek()
|
||||
if isSpace(r) {
|
||||
return true
|
||||
}
|
||||
switch r {
|
||||
case eof, '.', ',', '|', ':', ')', '(':
|
||||
return true
|
||||
}
|
||||
return strings.HasPrefix(l.input[l.pos:], l.rightDelim)
|
||||
}
|
||||
|
||||
// lexChar scans a character constant. The initial quote is already
|
||||
// scanned. Syntax checking is done by the parser.
|
||||
func lexChar(l *lexer) stateFn {
|
||||
Loop:
|
||||
for {
|
||||
switch l.next() {
|
||||
case '\\':
|
||||
if r := l.next(); r != eof && r != '\n' {
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
case eof, '\n':
|
||||
return l.errorf("unterminated character constant")
|
||||
case '\'':
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
return l.emit(itemCharConstant)
|
||||
}
|
||||
|
||||
// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This
|
||||
// isn't a perfect number scanner - for instance it accepts "." and "0x0.2"
|
||||
// and "089" - but when it's wrong the input is invalid and the parser (via
|
||||
// strconv) will notice.
|
||||
func lexNumber(l *lexer) stateFn {
|
||||
if !l.scanNumber() {
|
||||
return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
|
||||
}
|
||||
if sign := l.peek(); sign == '+' || sign == '-' {
|
||||
// Complex: 1+2i. No spaces, must end in 'i'.
|
||||
if !l.scanNumber() || l.input[l.pos-1] != 'i' {
|
||||
return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
|
||||
}
|
||||
return l.emit(itemComplex)
|
||||
}
|
||||
return l.emit(itemNumber)
|
||||
}
|
||||
|
||||
func (l *lexer) scanNumber() bool {
|
||||
// Optional leading sign.
|
||||
l.accept("+-")
|
||||
// Is it hex?
|
||||
digits := "0123456789_"
|
||||
if l.accept("0") {
|
||||
// Note: Leading 0 does not mean octal in floats.
|
||||
if l.accept("xX") {
|
||||
digits = "0123456789abcdefABCDEF_"
|
||||
} else if l.accept("oO") {
|
||||
digits = "01234567_"
|
||||
} else if l.accept("bB") {
|
||||
digits = "01_"
|
||||
}
|
||||
}
|
||||
l.acceptRun(digits)
|
||||
if l.accept(".") {
|
||||
l.acceptRun(digits)
|
||||
}
|
||||
if len(digits) == 10+1 && l.accept("eE") {
|
||||
l.accept("+-")
|
||||
l.acceptRun("0123456789_")
|
||||
}
|
||||
if len(digits) == 16+6+1 && l.accept("pP") {
|
||||
l.accept("+-")
|
||||
l.acceptRun("0123456789_")
|
||||
}
|
||||
// Is it imaginary?
|
||||
l.accept("i")
|
||||
// Next thing mustn't be alphanumeric.
|
||||
if isAlphaNumeric(l.peek()) {
|
||||
l.next()
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// lexQuote scans a quoted string.
|
||||
func lexQuote(l *lexer) stateFn {
|
||||
Loop:
|
||||
for {
|
||||
switch l.next() {
|
||||
case '\\':
|
||||
if r := l.next(); r != eof && r != '\n' {
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
case eof, '\n':
|
||||
return l.errorf("unterminated quoted string")
|
||||
case '"':
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
return l.emit(itemString)
|
||||
}
|
||||
|
||||
// lexRawQuote scans a raw quoted string.
|
||||
func lexRawQuote(l *lexer) stateFn {
|
||||
Loop:
|
||||
for {
|
||||
switch l.next() {
|
||||
case eof:
|
||||
return l.errorf("unterminated raw quoted string")
|
||||
case '`':
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
return l.emit(itemRawString)
|
||||
}
|
||||
|
||||
// isSpace reports whether r is a space character.
|
||||
func isSpace(r rune) bool {
|
||||
return r == ' ' || r == '\t' || r == '\r' || r == '\n'
|
||||
}
|
||||
|
||||
// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
|
||||
func isAlphaNumeric(r rune) bool {
|
||||
return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
|
||||
}
|
||||
|
||||
func hasLeftTrimMarker(s string) bool {
|
||||
return len(s) >= 2 && s[0] == trimMarker && isSpace(rune(s[1]))
|
||||
}
|
||||
|
||||
func hasRightTrimMarker(s string) bool {
|
||||
return len(s) >= 2 && isSpace(rune(s[0])) && s[1] == trimMarker
|
||||
}
|
||||
582
src/text/template/parse/lex_test.go
Normal file
582
src/text/template/parse/lex_test.go
Normal file
@@ -0,0 +1,582 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package parse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Make the types prettyprint.
|
||||
var itemName = map[itemType]string{
|
||||
itemError: "error",
|
||||
itemBool: "bool",
|
||||
itemChar: "char",
|
||||
itemCharConstant: "charconst",
|
||||
itemComment: "comment",
|
||||
itemComplex: "complex",
|
||||
itemDeclare: ":=",
|
||||
itemEOF: "EOF",
|
||||
itemField: "field",
|
||||
itemIdentifier: "identifier",
|
||||
itemLeftDelim: "left delim",
|
||||
itemLeftParen: "(",
|
||||
itemNumber: "number",
|
||||
itemPipe: "pipe",
|
||||
itemRawString: "raw string",
|
||||
itemRightDelim: "right delim",
|
||||
itemRightParen: ")",
|
||||
itemSpace: "space",
|
||||
itemString: "string",
|
||||
itemVariable: "variable",
|
||||
|
||||
// keywords
|
||||
itemDot: ".",
|
||||
itemBlock: "block",
|
||||
itemBreak: "break",
|
||||
itemContinue: "continue",
|
||||
itemDefine: "define",
|
||||
itemElse: "else",
|
||||
itemIf: "if",
|
||||
itemEnd: "end",
|
||||
itemNil: "nil",
|
||||
itemRange: "range",
|
||||
itemTemplate: "template",
|
||||
itemWith: "with",
|
||||
}
|
||||
|
||||
func (i itemType) String() string {
|
||||
s := itemName[i]
|
||||
if s == "" {
|
||||
return fmt.Sprintf("item%d", int(i))
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
type lexTest struct {
|
||||
name string
|
||||
input string
|
||||
items []item
|
||||
}
|
||||
|
||||
func mkItem(typ itemType, text string) item {
|
||||
return item{
|
||||
typ: typ,
|
||||
val: text,
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
tDot = mkItem(itemDot, ".")
|
||||
tBlock = mkItem(itemBlock, "block")
|
||||
tEOF = mkItem(itemEOF, "")
|
||||
tFor = mkItem(itemIdentifier, "for")
|
||||
tLeft = mkItem(itemLeftDelim, "{{")
|
||||
tLpar = mkItem(itemLeftParen, "(")
|
||||
tPipe = mkItem(itemPipe, "|")
|
||||
tQuote = mkItem(itemString, `"abc \n\t\" "`)
|
||||
tRange = mkItem(itemRange, "range")
|
||||
tRight = mkItem(itemRightDelim, "}}")
|
||||
tRpar = mkItem(itemRightParen, ")")
|
||||
tSpace = mkItem(itemSpace, " ")
|
||||
raw = "`" + `abc\n\t\" ` + "`"
|
||||
rawNL = "`now is{{\n}}the time`" // Contains newline inside raw quote.
|
||||
tRawQuote = mkItem(itemRawString, raw)
|
||||
tRawQuoteNL = mkItem(itemRawString, rawNL)
|
||||
)
|
||||
|
||||
var lexTests = []lexTest{
|
||||
{"empty", "", []item{tEOF}},
|
||||
{"spaces", " \t\n", []item{mkItem(itemText, " \t\n"), tEOF}},
|
||||
{"text", `now is the time`, []item{mkItem(itemText, "now is the time"), tEOF}},
|
||||
{"text with comment", "hello-{{/* this is a comment */}}-world", []item{
|
||||
mkItem(itemText, "hello-"),
|
||||
mkItem(itemComment, "/* this is a comment */"),
|
||||
mkItem(itemText, "-world"),
|
||||
tEOF,
|
||||
}},
|
||||
{"punctuation", "{{,@% }}", []item{
|
||||
tLeft,
|
||||
mkItem(itemChar, ","),
|
||||
mkItem(itemChar, "@"),
|
||||
mkItem(itemChar, "%"),
|
||||
tSpace,
|
||||
tRight,
|
||||
tEOF,
|
||||
}},
|
||||
{"parens", "{{((3))}}", []item{
|
||||
tLeft,
|
||||
tLpar,
|
||||
tLpar,
|
||||
mkItem(itemNumber, "3"),
|
||||
tRpar,
|
||||
tRpar,
|
||||
tRight,
|
||||
tEOF,
|
||||
}},
|
||||
{"empty action", `{{}}`, []item{tLeft, tRight, tEOF}},
|
||||
{"for", `{{for}}`, []item{tLeft, tFor, tRight, tEOF}},
|
||||
{"block", `{{block "foo" .}}`, []item{
|
||||
tLeft, tBlock, tSpace, mkItem(itemString, `"foo"`), tSpace, tDot, tRight, tEOF,
|
||||
}},
|
||||
{"quote", `{{"abc \n\t\" "}}`, []item{tLeft, tQuote, tRight, tEOF}},
|
||||
{"raw quote", "{{" + raw + "}}", []item{tLeft, tRawQuote, tRight, tEOF}},
|
||||
{"raw quote with newline", "{{" + rawNL + "}}", []item{tLeft, tRawQuoteNL, tRight, tEOF}},
|
||||
{"numbers", "{{1 02 0x14 0X14 -7.2i 1e3 1E3 +1.2e-4 4.2i 1+2i 1_2 0x1.e_fp4 0X1.E_FP4}}", []item{
|
||||
tLeft,
|
||||
mkItem(itemNumber, "1"),
|
||||
tSpace,
|
||||
mkItem(itemNumber, "02"),
|
||||
tSpace,
|
||||
mkItem(itemNumber, "0x14"),
|
||||
tSpace,
|
||||
mkItem(itemNumber, "0X14"),
|
||||
tSpace,
|
||||
mkItem(itemNumber, "-7.2i"),
|
||||
tSpace,
|
||||
mkItem(itemNumber, "1e3"),
|
||||
tSpace,
|
||||
mkItem(itemNumber, "1E3"),
|
||||
tSpace,
|
||||
mkItem(itemNumber, "+1.2e-4"),
|
||||
tSpace,
|
||||
mkItem(itemNumber, "4.2i"),
|
||||
tSpace,
|
||||
mkItem(itemComplex, "1+2i"),
|
||||
tSpace,
|
||||
mkItem(itemNumber, "1_2"),
|
||||
tSpace,
|
||||
mkItem(itemNumber, "0x1.e_fp4"),
|
||||
tSpace,
|
||||
mkItem(itemNumber, "0X1.E_FP4"),
|
||||
tRight,
|
||||
tEOF,
|
||||
}},
|
||||
{"characters", `{{'a' '\n' '\'' '\\' '\u00FF' '\xFF' '本'}}`, []item{
|
||||
tLeft,
|
||||
mkItem(itemCharConstant, `'a'`),
|
||||
tSpace,
|
||||
mkItem(itemCharConstant, `'\n'`),
|
||||
tSpace,
|
||||
mkItem(itemCharConstant, `'\''`),
|
||||
tSpace,
|
||||
mkItem(itemCharConstant, `'\\'`),
|
||||
tSpace,
|
||||
mkItem(itemCharConstant, `'\u00FF'`),
|
||||
tSpace,
|
||||
mkItem(itemCharConstant, `'\xFF'`),
|
||||
tSpace,
|
||||
mkItem(itemCharConstant, `'本'`),
|
||||
tRight,
|
||||
tEOF,
|
||||
}},
|
||||
{"bools", "{{true false}}", []item{
|
||||
tLeft,
|
||||
mkItem(itemBool, "true"),
|
||||
tSpace,
|
||||
mkItem(itemBool, "false"),
|
||||
tRight,
|
||||
tEOF,
|
||||
}},
|
||||
{"dot", "{{.}}", []item{
|
||||
tLeft,
|
||||
tDot,
|
||||
tRight,
|
||||
tEOF,
|
||||
}},
|
||||
{"nil", "{{nil}}", []item{
|
||||
tLeft,
|
||||
mkItem(itemNil, "nil"),
|
||||
tRight,
|
||||
tEOF,
|
||||
}},
|
||||
{"dots", "{{.x . .2 .x.y.z}}", []item{
|
||||
tLeft,
|
||||
mkItem(itemField, ".x"),
|
||||
tSpace,
|
||||
tDot,
|
||||
tSpace,
|
||||
mkItem(itemNumber, ".2"),
|
||||
tSpace,
|
||||
mkItem(itemField, ".x"),
|
||||
mkItem(itemField, ".y"),
|
||||
mkItem(itemField, ".z"),
|
||||
tRight,
|
||||
tEOF,
|
||||
}},
|
||||
{"keywords", "{{range if else end with}}", []item{
|
||||
tLeft,
|
||||
mkItem(itemRange, "range"),
|
||||
tSpace,
|
||||
mkItem(itemIf, "if"),
|
||||
tSpace,
|
||||
mkItem(itemElse, "else"),
|
||||
tSpace,
|
||||
mkItem(itemEnd, "end"),
|
||||
tSpace,
|
||||
mkItem(itemWith, "with"),
|
||||
tRight,
|
||||
tEOF,
|
||||
}},
|
||||
{"variables", "{{$c := printf $ $hello $23 $ $var.Field .Method}}", []item{
|
||||
tLeft,
|
||||
mkItem(itemVariable, "$c"),
|
||||
tSpace,
|
||||
mkItem(itemDeclare, ":="),
|
||||
tSpace,
|
||||
mkItem(itemIdentifier, "printf"),
|
||||
tSpace,
|
||||
mkItem(itemVariable, "$"),
|
||||
tSpace,
|
||||
mkItem(itemVariable, "$hello"),
|
||||
tSpace,
|
||||
mkItem(itemVariable, "$23"),
|
||||
tSpace,
|
||||
mkItem(itemVariable, "$"),
|
||||
tSpace,
|
||||
mkItem(itemVariable, "$var"),
|
||||
mkItem(itemField, ".Field"),
|
||||
tSpace,
|
||||
mkItem(itemField, ".Method"),
|
||||
tRight,
|
||||
tEOF,
|
||||
}},
|
||||
{"variable invocation", "{{$x 23}}", []item{
|
||||
tLeft,
|
||||
mkItem(itemVariable, "$x"),
|
||||
tSpace,
|
||||
mkItem(itemNumber, "23"),
|
||||
tRight,
|
||||
tEOF,
|
||||
}},
|
||||
{"pipeline", `intro {{echo hi 1.2 |noargs|args 1 "hi"}} outro`, []item{
|
||||
mkItem(itemText, "intro "),
|
||||
tLeft,
|
||||
mkItem(itemIdentifier, "echo"),
|
||||
tSpace,
|
||||
mkItem(itemIdentifier, "hi"),
|
||||
tSpace,
|
||||
mkItem(itemNumber, "1.2"),
|
||||
tSpace,
|
||||
tPipe,
|
||||
mkItem(itemIdentifier, "noargs"),
|
||||
tPipe,
|
||||
mkItem(itemIdentifier, "args"),
|
||||
tSpace,
|
||||
mkItem(itemNumber, "1"),
|
||||
tSpace,
|
||||
mkItem(itemString, `"hi"`),
|
||||
tRight,
|
||||
mkItem(itemText, " outro"),
|
||||
tEOF,
|
||||
}},
|
||||
{"declaration", "{{$v := 3}}", []item{
|
||||
tLeft,
|
||||
mkItem(itemVariable, "$v"),
|
||||
tSpace,
|
||||
mkItem(itemDeclare, ":="),
|
||||
tSpace,
|
||||
mkItem(itemNumber, "3"),
|
||||
tRight,
|
||||
tEOF,
|
||||
}},
|
||||
{"2 declarations", "{{$v , $w := 3}}", []item{
|
||||
tLeft,
|
||||
mkItem(itemVariable, "$v"),
|
||||
tSpace,
|
||||
mkItem(itemChar, ","),
|
||||
tSpace,
|
||||
mkItem(itemVariable, "$w"),
|
||||
tSpace,
|
||||
mkItem(itemDeclare, ":="),
|
||||
tSpace,
|
||||
mkItem(itemNumber, "3"),
|
||||
tRight,
|
||||
tEOF,
|
||||
}},
|
||||
{"field of parenthesized expression", "{{(.X).Y}}", []item{
|
||||
tLeft,
|
||||
tLpar,
|
||||
mkItem(itemField, ".X"),
|
||||
tRpar,
|
||||
mkItem(itemField, ".Y"),
|
||||
tRight,
|
||||
tEOF,
|
||||
}},
|
||||
{"trimming spaces before and after", "hello- {{- 3 -}} -world", []item{
|
||||
mkItem(itemText, "hello-"),
|
||||
tLeft,
|
||||
mkItem(itemNumber, "3"),
|
||||
tRight,
|
||||
mkItem(itemText, "-world"),
|
||||
tEOF,
|
||||
}},
|
||||
{"trimming spaces before and after comment", "hello- {{- /* hello */ -}} -world", []item{
|
||||
mkItem(itemText, "hello-"),
|
||||
mkItem(itemComment, "/* hello */"),
|
||||
mkItem(itemText, "-world"),
|
||||
tEOF,
|
||||
}},
|
||||
// errors
|
||||
{"badchar", "#{{\x01}}", []item{
|
||||
mkItem(itemText, "#"),
|
||||
tLeft,
|
||||
mkItem(itemError, "unrecognized character in action: U+0001"),
|
||||
}},
|
||||
{"unclosed action", "{{", []item{
|
||||
tLeft,
|
||||
mkItem(itemError, "unclosed action"),
|
||||
}},
|
||||
{"EOF in action", "{{range", []item{
|
||||
tLeft,
|
||||
tRange,
|
||||
mkItem(itemError, "unclosed action"),
|
||||
}},
|
||||
{"unclosed quote", "{{\"\n\"}}", []item{
|
||||
tLeft,
|
||||
mkItem(itemError, "unterminated quoted string"),
|
||||
}},
|
||||
{"unclosed raw quote", "{{`xx}}", []item{
|
||||
tLeft,
|
||||
mkItem(itemError, "unterminated raw quoted string"),
|
||||
}},
|
||||
{"unclosed char constant", "{{'\n}}", []item{
|
||||
tLeft,
|
||||
mkItem(itemError, "unterminated character constant"),
|
||||
}},
|
||||
{"bad number", "{{3k}}", []item{
|
||||
tLeft,
|
||||
mkItem(itemError, `bad number syntax: "3k"`),
|
||||
}},
|
||||
{"unclosed paren", "{{(3}}", []item{
|
||||
tLeft,
|
||||
tLpar,
|
||||
mkItem(itemNumber, "3"),
|
||||
mkItem(itemError, `unclosed left paren`),
|
||||
}},
|
||||
{"extra right paren", "{{3)}}", []item{
|
||||
tLeft,
|
||||
mkItem(itemNumber, "3"),
|
||||
mkItem(itemError, "unexpected right paren"),
|
||||
}},
|
||||
|
||||
// Fixed bugs
|
||||
// Many elements in an action blew the lookahead until
|
||||
// we made lexInsideAction not loop.
|
||||
{"long pipeline deadlock", "{{|||||}}", []item{
|
||||
tLeft,
|
||||
tPipe,
|
||||
tPipe,
|
||||
tPipe,
|
||||
tPipe,
|
||||
tPipe,
|
||||
tRight,
|
||||
tEOF,
|
||||
}},
|
||||
{"text with bad comment", "hello-{{/*/}}-world", []item{
|
||||
mkItem(itemText, "hello-"),
|
||||
mkItem(itemError, `unclosed comment`),
|
||||
}},
|
||||
{"text with comment close separated from delim", "hello-{{/* */ }}-world", []item{
|
||||
mkItem(itemText, "hello-"),
|
||||
mkItem(itemError, `comment ends before closing delimiter`),
|
||||
}},
|
||||
// This one is an error that we can't catch because it breaks templates with
|
||||
// minimized JavaScript. Should have fixed it before Go 1.1.
|
||||
{"unmatched right delimiter", "hello-{.}}-world", []item{
|
||||
mkItem(itemText, "hello-{.}}-world"),
|
||||
tEOF,
|
||||
}},
|
||||
}
|
||||
|
||||
// collect gathers the emitted items into a slice.
|
||||
func collect(t *lexTest, left, right string) (items []item) {
|
||||
l := lex(t.name, t.input, left, right)
|
||||
l.options = lexOptions{
|
||||
emitComment: true,
|
||||
breakOK: true,
|
||||
continueOK: true,
|
||||
}
|
||||
for {
|
||||
item := l.nextItem()
|
||||
items = append(items, item)
|
||||
if item.typ == itemEOF || item.typ == itemError {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func equal(i1, i2 []item, checkPos bool) bool {
|
||||
if len(i1) != len(i2) {
|
||||
return false
|
||||
}
|
||||
for k := range i1 {
|
||||
if i1[k].typ != i2[k].typ {
|
||||
return false
|
||||
}
|
||||
if i1[k].val != i2[k].val {
|
||||
return false
|
||||
}
|
||||
if checkPos && i1[k].pos != i2[k].pos {
|
||||
return false
|
||||
}
|
||||
if checkPos && i1[k].line != i2[k].line {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func TestLex(t *testing.T) {
|
||||
for _, test := range lexTests {
|
||||
items := collect(&test, "", "")
|
||||
if !equal(items, test.items, false) {
|
||||
t.Errorf("%s: got\n\t%+v\nexpected\n\t%v", test.name, items, test.items)
|
||||
return // TODO
|
||||
}
|
||||
t.Log(test.name, "OK")
|
||||
}
|
||||
}
|
||||
|
||||
// Some easy cases from above, but with delimiters $$ and @@
|
||||
var lexDelimTests = []lexTest{
|
||||
{"punctuation", "$$,@%{{}}@@", []item{
|
||||
tLeftDelim,
|
||||
mkItem(itemChar, ","),
|
||||
mkItem(itemChar, "@"),
|
||||
mkItem(itemChar, "%"),
|
||||
mkItem(itemChar, "{"),
|
||||
mkItem(itemChar, "{"),
|
||||
mkItem(itemChar, "}"),
|
||||
mkItem(itemChar, "}"),
|
||||
tRightDelim,
|
||||
tEOF,
|
||||
}},
|
||||
{"empty action", `$$@@`, []item{tLeftDelim, tRightDelim, tEOF}},
|
||||
{"for", `$$for@@`, []item{tLeftDelim, tFor, tRightDelim, tEOF}},
|
||||
{"quote", `$$"abc \n\t\" "@@`, []item{tLeftDelim, tQuote, tRightDelim, tEOF}},
|
||||
{"raw quote", "$$" + raw + "@@", []item{tLeftDelim, tRawQuote, tRightDelim, tEOF}},
|
||||
}
|
||||
|
||||
var (
|
||||
tLeftDelim = mkItem(itemLeftDelim, "$$")
|
||||
tRightDelim = mkItem(itemRightDelim, "@@")
|
||||
)
|
||||
|
||||
func TestDelims(t *testing.T) {
|
||||
for _, test := range lexDelimTests {
|
||||
items := collect(&test, "$$", "@@")
|
||||
if !equal(items, test.items, false) {
|
||||
t.Errorf("%s: got\n\t%v\nexpected\n\t%v", test.name, items, test.items)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDelimsAlphaNumeric(t *testing.T) {
|
||||
test := lexTest{"right delimiter with alphanumeric start", "{{hub .host hub}}", []item{
|
||||
mkItem(itemLeftDelim, "{{hub"),
|
||||
mkItem(itemSpace, " "),
|
||||
mkItem(itemField, ".host"),
|
||||
mkItem(itemSpace, " "),
|
||||
mkItem(itemRightDelim, "hub}}"),
|
||||
tEOF,
|
||||
}}
|
||||
items := collect(&test, "{{hub", "hub}}")
|
||||
|
||||
if !equal(items, test.items, false) {
|
||||
t.Errorf("%s: got\n\t%v\nexpected\n\t%v", test.name, items, test.items)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDelimsAndMarkers(t *testing.T) {
|
||||
test := lexTest{"delims that look like markers", "{{- .x -}} {{- - .x - -}}", []item{
|
||||
mkItem(itemLeftDelim, "{{- "),
|
||||
mkItem(itemField, ".x"),
|
||||
mkItem(itemRightDelim, " -}}"),
|
||||
mkItem(itemLeftDelim, "{{- "),
|
||||
mkItem(itemField, ".x"),
|
||||
mkItem(itemRightDelim, " -}}"),
|
||||
tEOF,
|
||||
}}
|
||||
items := collect(&test, "{{- ", " -}}")
|
||||
|
||||
if !equal(items, test.items, false) {
|
||||
t.Errorf("%s: got\n\t%v\nexpected\n\t%v", test.name, items, test.items)
|
||||
}
|
||||
}
|
||||
|
||||
var lexPosTests = []lexTest{
|
||||
{"empty", "", []item{{itemEOF, 0, "", 1}}},
|
||||
{"punctuation", "{{,@%#}}", []item{
|
||||
{itemLeftDelim, 0, "{{", 1},
|
||||
{itemChar, 2, ",", 1},
|
||||
{itemChar, 3, "@", 1},
|
||||
{itemChar, 4, "%", 1},
|
||||
{itemChar, 5, "#", 1},
|
||||
{itemRightDelim, 6, "}}", 1},
|
||||
{itemEOF, 8, "", 1},
|
||||
}},
|
||||
{"sample", "0123{{hello}}xyz", []item{
|
||||
{itemText, 0, "0123", 1},
|
||||
{itemLeftDelim, 4, "{{", 1},
|
||||
{itemIdentifier, 6, "hello", 1},
|
||||
{itemRightDelim, 11, "}}", 1},
|
||||
{itemText, 13, "xyz", 1},
|
||||
{itemEOF, 16, "", 1},
|
||||
}},
|
||||
{"trimafter", "{{x -}}\n{{y}}", []item{
|
||||
{itemLeftDelim, 0, "{{", 1},
|
||||
{itemIdentifier, 2, "x", 1},
|
||||
{itemRightDelim, 5, "}}", 1},
|
||||
{itemLeftDelim, 8, "{{", 2},
|
||||
{itemIdentifier, 10, "y", 2},
|
||||
{itemRightDelim, 11, "}}", 2},
|
||||
{itemEOF, 13, "", 2},
|
||||
}},
|
||||
{"trimbefore", "{{x}}\n{{- y}}", []item{
|
||||
{itemLeftDelim, 0, "{{", 1},
|
||||
{itemIdentifier, 2, "x", 1},
|
||||
{itemRightDelim, 3, "}}", 1},
|
||||
{itemLeftDelim, 6, "{{", 2},
|
||||
{itemIdentifier, 10, "y", 2},
|
||||
{itemRightDelim, 11, "}}", 2},
|
||||
{itemEOF, 13, "", 2},
|
||||
}},
|
||||
}
|
||||
|
||||
// The other tests don't check position, to make the test cases easier to construct.
|
||||
// This one does.
|
||||
func TestPos(t *testing.T) {
|
||||
for _, test := range lexPosTests {
|
||||
items := collect(&test, "", "")
|
||||
if !equal(items, test.items, true) {
|
||||
t.Errorf("%s: got\n\t%v\nexpected\n\t%v", test.name, items, test.items)
|
||||
if len(items) == len(test.items) {
|
||||
// Detailed print; avoid item.String() to expose the position value.
|
||||
for i := range items {
|
||||
if !equal(items[i:i+1], test.items[i:i+1], true) {
|
||||
i1 := items[i]
|
||||
i2 := test.items[i]
|
||||
t.Errorf("\t#%d: got {%v %d %q %d} expected {%v %d %q %d}",
|
||||
i, i1.typ, i1.pos, i1.val, i1.line, i2.typ, i2.pos, i2.val, i2.line)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// parseLexer is a local version of parse that lets us pass in the lexer instead of building it.
|
||||
// We expect an error, so the tree set and funcs list are explicitly nil.
|
||||
func (t *Tree) parseLexer(lex *lexer) (tree *Tree, err error) {
|
||||
defer t.recover(&err)
|
||||
t.ParseName = t.Name
|
||||
t.startParse(nil, lex, map[string]*Tree{})
|
||||
t.parse()
|
||||
t.add()
|
||||
t.stopParse()
|
||||
return t, nil
|
||||
}
|
||||
1011
src/text/template/parse/node.go
Normal file
1011
src/text/template/parse/node.go
Normal file
File diff suppressed because it is too large
Load Diff
831
src/text/template/parse/parse.go
Normal file
831
src/text/template/parse/parse.go
Normal file
@@ -0,0 +1,831 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package parse builds parse trees for templates as defined by text/template
|
||||
// and html/template. Clients should use those packages to construct templates
|
||||
// rather than this one, which provides shared internal data structures not
|
||||
// intended for general use.
|
||||
package parse
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Tree is the representation of a single parsed template.
|
||||
type Tree struct {
|
||||
Name string // name of the template represented by the tree.
|
||||
ParseName string // name of the top-level template during parsing, for error messages.
|
||||
Root *ListNode // top-level root of the tree.
|
||||
Mode Mode // parsing mode.
|
||||
text string // text parsed to create the template (or its parent)
|
||||
// Parsing only; cleared after parse.
|
||||
funcs []map[string]any
|
||||
lex *lexer
|
||||
token [3]item // three-token lookahead for parser.
|
||||
peekCount int
|
||||
vars []string // variables defined at the moment.
|
||||
treeSet map[string]*Tree
|
||||
actionLine int // line of left delim starting action
|
||||
rangeDepth int
|
||||
}
|
||||
|
||||
// A mode value is a set of flags (or 0). Modes control parser behavior.
|
||||
type Mode uint
|
||||
|
||||
const (
|
||||
ParseComments Mode = 1 << iota // parse comments and add them to AST
|
||||
SkipFuncCheck // do not check that functions are defined
|
||||
)
|
||||
|
||||
// Copy returns a copy of the [Tree]. Any parsing state is discarded.
|
||||
func (t *Tree) Copy() *Tree {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
return &Tree{
|
||||
Name: t.Name,
|
||||
ParseName: t.ParseName,
|
||||
Root: t.Root.CopyList(),
|
||||
text: t.text,
|
||||
}
|
||||
}
|
||||
|
||||
// Parse returns a map from template name to [Tree], created by parsing the
|
||||
// templates described in the argument string. The top-level template will be
|
||||
// given the specified name. If an error is encountered, parsing stops and an
|
||||
// empty map is returned with the error.
|
||||
func Parse(name, text, leftDelim, rightDelim string, funcs ...map[string]any) (map[string]*Tree, error) {
|
||||
treeSet := make(map[string]*Tree)
|
||||
t := New(name)
|
||||
t.text = text
|
||||
_, err := t.Parse(text, leftDelim, rightDelim, treeSet, funcs...)
|
||||
return treeSet, err
|
||||
}
|
||||
|
||||
// next returns the next token.
|
||||
func (t *Tree) next() item {
|
||||
if t.peekCount > 0 {
|
||||
t.peekCount--
|
||||
} else {
|
||||
t.token[0] = t.lex.nextItem()
|
||||
}
|
||||
return t.token[t.peekCount]
|
||||
}
|
||||
|
||||
// backup backs the input stream up one token.
|
||||
func (t *Tree) backup() {
|
||||
t.peekCount++
|
||||
}
|
||||
|
||||
// backup2 backs the input stream up two tokens.
|
||||
// The zeroth token is already there.
|
||||
func (t *Tree) backup2(t1 item) {
|
||||
t.token[1] = t1
|
||||
t.peekCount = 2
|
||||
}
|
||||
|
||||
// backup3 backs the input stream up three tokens
|
||||
// The zeroth token is already there.
|
||||
func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back.
|
||||
t.token[1] = t1
|
||||
t.token[2] = t2
|
||||
t.peekCount = 3
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next token.
|
||||
func (t *Tree) peek() item {
|
||||
if t.peekCount > 0 {
|
||||
return t.token[t.peekCount-1]
|
||||
}
|
||||
t.peekCount = 1
|
||||
t.token[0] = t.lex.nextItem()
|
||||
return t.token[0]
|
||||
}
|
||||
|
||||
// nextNonSpace returns the next non-space token.
|
||||
func (t *Tree) nextNonSpace() (token item) {
|
||||
for {
|
||||
token = t.next()
|
||||
if token.typ != itemSpace {
|
||||
break
|
||||
}
|
||||
}
|
||||
return token
|
||||
}
|
||||
|
||||
// peekNonSpace returns but does not consume the next non-space token.
|
||||
func (t *Tree) peekNonSpace() item {
|
||||
token := t.nextNonSpace()
|
||||
t.backup()
|
||||
return token
|
||||
}
|
||||
|
||||
// Parsing.
|
||||
|
||||
// New allocates a new parse tree with the given name.
|
||||
func New(name string, funcs ...map[string]any) *Tree {
|
||||
return &Tree{
|
||||
Name: name,
|
||||
funcs: funcs,
|
||||
}
|
||||
}
|
||||
|
||||
// ErrorContext returns a textual representation of the location of the node in the input text.
|
||||
// The receiver is only used when the node does not have a pointer to the tree inside,
|
||||
// which can occur in old code.
|
||||
func (t *Tree) ErrorContext(n Node) (location, context string) {
|
||||
pos := int(n.Position())
|
||||
tree := n.tree()
|
||||
if tree == nil {
|
||||
tree = t
|
||||
}
|
||||
text := tree.text[:pos]
|
||||
byteNum := strings.LastIndex(text, "\n")
|
||||
if byteNum == -1 {
|
||||
byteNum = pos // On first line.
|
||||
} else {
|
||||
byteNum++ // After the newline.
|
||||
byteNum = pos - byteNum
|
||||
}
|
||||
lineNum := 1 + strings.Count(text, "\n")
|
||||
context = n.String()
|
||||
return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context
|
||||
}
|
||||
|
||||
// errorf formats the error and terminates processing.
|
||||
func (t *Tree) errorf(format string, args ...any) {
|
||||
t.Root = nil
|
||||
format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.token[0].line, format)
|
||||
panic(fmt.Errorf(format, args...))
|
||||
}
|
||||
|
||||
// error terminates processing.
|
||||
func (t *Tree) error(err error) {
|
||||
t.errorf("%s", err)
|
||||
}
|
||||
|
||||
// expect consumes the next token and guarantees it has the required type.
|
||||
func (t *Tree) expect(expected itemType, context string) item {
|
||||
token := t.nextNonSpace()
|
||||
if token.typ != expected {
|
||||
t.unexpected(token, context)
|
||||
}
|
||||
return token
|
||||
}
|
||||
|
||||
// expectOneOf consumes the next token and guarantees it has one of the required types.
|
||||
func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item {
|
||||
token := t.nextNonSpace()
|
||||
if token.typ != expected1 && token.typ != expected2 {
|
||||
t.unexpected(token, context)
|
||||
}
|
||||
return token
|
||||
}
|
||||
|
||||
// unexpected complains about the token and terminates processing.
|
||||
func (t *Tree) unexpected(token item, context string) {
|
||||
if token.typ == itemError {
|
||||
extra := ""
|
||||
if t.actionLine != 0 && t.actionLine != token.line {
|
||||
extra = fmt.Sprintf(" in action started at %s:%d", t.ParseName, t.actionLine)
|
||||
if strings.HasSuffix(token.val, " action") {
|
||||
extra = extra[len(" in action"):] // avoid "action in action"
|
||||
}
|
||||
}
|
||||
t.errorf("%s%s", token, extra)
|
||||
}
|
||||
t.errorf("unexpected %s in %s", token, context)
|
||||
}
|
||||
|
||||
// recover is the handler that turns panics into returns from the top level of Parse.
|
||||
func (t *Tree) recover(errp *error) {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
if _, ok := e.(runtime.Error); ok {
|
||||
panic(e)
|
||||
}
|
||||
if t != nil {
|
||||
t.stopParse()
|
||||
}
|
||||
*errp = e.(error)
|
||||
}
|
||||
}
|
||||
|
||||
// startParse initializes the parser, using the lexer.
|
||||
func (t *Tree) startParse(funcs []map[string]any, lex *lexer, treeSet map[string]*Tree) {
|
||||
t.Root = nil
|
||||
t.lex = lex
|
||||
t.vars = []string{"$"}
|
||||
t.funcs = funcs
|
||||
t.treeSet = treeSet
|
||||
lex.options = lexOptions{
|
||||
emitComment: t.Mode&ParseComments != 0,
|
||||
breakOK: !t.hasFunction("break"),
|
||||
continueOK: !t.hasFunction("continue"),
|
||||
}
|
||||
}
|
||||
|
||||
// stopParse terminates parsing.
|
||||
func (t *Tree) stopParse() {
|
||||
t.lex = nil
|
||||
t.vars = nil
|
||||
t.funcs = nil
|
||||
t.treeSet = nil
|
||||
}
|
||||
|
||||
// Parse parses the template definition string to construct a representation of
|
||||
// the template for execution. If either action delimiter string is empty, the
|
||||
// default ("{{" or "}}") is used. Embedded template definitions are added to
|
||||
// the treeSet map.
|
||||
func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]any) (tree *Tree, err error) {
|
||||
defer t.recover(&err)
|
||||
t.ParseName = t.Name
|
||||
lexer := lex(t.Name, text, leftDelim, rightDelim)
|
||||
t.startParse(funcs, lexer, treeSet)
|
||||
t.text = text
|
||||
t.parse()
|
||||
t.add()
|
||||
t.stopParse()
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// add adds tree to t.treeSet.
|
||||
func (t *Tree) add() {
|
||||
tree := t.treeSet[t.Name]
|
||||
if tree == nil || IsEmptyTree(tree.Root) {
|
||||
t.treeSet[t.Name] = t
|
||||
return
|
||||
}
|
||||
if !IsEmptyTree(t.Root) {
|
||||
t.errorf("template: multiple definition of template %q", t.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// IsEmptyTree reports whether this tree (node) is empty of everything but space or comments.
|
||||
func IsEmptyTree(n Node) bool {
|
||||
switch n := n.(type) {
|
||||
case nil:
|
||||
return true
|
||||
case *ActionNode:
|
||||
case *CommentNode:
|
||||
return true
|
||||
case *IfNode:
|
||||
case *ListNode:
|
||||
for _, node := range n.Nodes {
|
||||
if !IsEmptyTree(node) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case *RangeNode:
|
||||
case *TemplateNode:
|
||||
case *TextNode:
|
||||
return len(bytes.TrimSpace(n.Text)) == 0
|
||||
case *WithNode:
|
||||
default:
|
||||
panic("unknown node: " + n.String())
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// parse is the top-level parser for a template, essentially the same
|
||||
// as itemList except it also parses {{define}} actions.
|
||||
// It runs to EOF.
|
||||
func (t *Tree) parse() {
|
||||
t.Root = t.newList(t.peek().pos)
|
||||
for t.peek().typ != itemEOF {
|
||||
if t.peek().typ == itemLeftDelim {
|
||||
delim := t.next()
|
||||
if t.nextNonSpace().typ == itemDefine {
|
||||
newT := New("definition") // name will be updated once we know it.
|
||||
newT.text = t.text
|
||||
newT.Mode = t.Mode
|
||||
newT.ParseName = t.ParseName
|
||||
newT.startParse(t.funcs, t.lex, t.treeSet)
|
||||
newT.parseDefinition()
|
||||
continue
|
||||
}
|
||||
t.backup2(delim)
|
||||
}
|
||||
switch n := t.textOrAction(); n.Type() {
|
||||
case nodeEnd, nodeElse:
|
||||
t.errorf("unexpected %s", n)
|
||||
default:
|
||||
t.Root.append(n)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// parseDefinition parses a {{define}} ... {{end}} template definition and
|
||||
// installs the definition in t.treeSet. The "define" keyword has already
|
||||
// been scanned.
|
||||
func (t *Tree) parseDefinition() {
|
||||
const context = "define clause"
|
||||
name := t.expectOneOf(itemString, itemRawString, context)
|
||||
var err error
|
||||
t.Name, err = strconv.Unquote(name.val)
|
||||
if err != nil {
|
||||
t.error(err)
|
||||
}
|
||||
t.expect(itemRightDelim, context)
|
||||
var end Node
|
||||
t.Root, end = t.itemList()
|
||||
if end.Type() != nodeEnd {
|
||||
t.errorf("unexpected %s in %s", end, context)
|
||||
}
|
||||
t.add()
|
||||
t.stopParse()
|
||||
}
|
||||
|
||||
// itemList:
|
||||
//
|
||||
// textOrAction*
|
||||
//
|
||||
// Terminates at {{end}} or {{else}}, returned separately.
|
||||
func (t *Tree) itemList() (list *ListNode, next Node) {
|
||||
list = t.newList(t.peekNonSpace().pos)
|
||||
for t.peekNonSpace().typ != itemEOF {
|
||||
n := t.textOrAction()
|
||||
switch n.Type() {
|
||||
case nodeEnd, nodeElse:
|
||||
return list, n
|
||||
}
|
||||
list.append(n)
|
||||
}
|
||||
t.errorf("unexpected EOF")
|
||||
return
|
||||
}
|
||||
|
||||
// textOrAction:
|
||||
//
|
||||
// text | comment | action
|
||||
func (t *Tree) textOrAction() Node {
|
||||
switch token := t.nextNonSpace(); token.typ {
|
||||
case itemText:
|
||||
return t.newText(token.pos, token.val)
|
||||
case itemLeftDelim:
|
||||
t.actionLine = token.line
|
||||
defer t.clearActionLine()
|
||||
return t.action()
|
||||
case itemComment:
|
||||
return t.newComment(token.pos, token.val)
|
||||
default:
|
||||
t.unexpected(token, "input")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Tree) clearActionLine() {
|
||||
t.actionLine = 0
|
||||
}
|
||||
|
||||
// Action:
|
||||
//
|
||||
// control
|
||||
// command ("|" command)*
|
||||
//
|
||||
// Left delim is past. Now get actions.
|
||||
// First word could be a keyword such as range.
|
||||
func (t *Tree) action() (n Node) {
|
||||
switch token := t.nextNonSpace(); token.typ {
|
||||
case itemBlock:
|
||||
return t.blockControl()
|
||||
case itemBreak:
|
||||
return t.breakControl(token.pos, token.line)
|
||||
case itemContinue:
|
||||
return t.continueControl(token.pos, token.line)
|
||||
case itemElse:
|
||||
return t.elseControl()
|
||||
case itemEnd:
|
||||
return t.endControl()
|
||||
case itemIf:
|
||||
return t.ifControl()
|
||||
case itemRange:
|
||||
return t.rangeControl()
|
||||
case itemTemplate:
|
||||
return t.templateControl()
|
||||
case itemWith:
|
||||
return t.withControl()
|
||||
}
|
||||
t.backup()
|
||||
token := t.peek()
|
||||
// Do not pop variables; they persist until "end".
|
||||
return t.newAction(token.pos, token.line, t.pipeline("command", itemRightDelim))
|
||||
}
|
||||
|
||||
// Break:
|
||||
//
|
||||
// {{break}}
|
||||
//
|
||||
// Break keyword is past.
|
||||
func (t *Tree) breakControl(pos Pos, line int) Node {
|
||||
if token := t.nextNonSpace(); token.typ != itemRightDelim {
|
||||
t.unexpected(token, "{{break}}")
|
||||
}
|
||||
if t.rangeDepth == 0 {
|
||||
t.errorf("{{break}} outside {{range}}")
|
||||
}
|
||||
return t.newBreak(pos, line)
|
||||
}
|
||||
|
||||
// Continue:
|
||||
//
|
||||
// {{continue}}
|
||||
//
|
||||
// Continue keyword is past.
|
||||
func (t *Tree) continueControl(pos Pos, line int) Node {
|
||||
if token := t.nextNonSpace(); token.typ != itemRightDelim {
|
||||
t.unexpected(token, "{{continue}}")
|
||||
}
|
||||
if t.rangeDepth == 0 {
|
||||
t.errorf("{{continue}} outside {{range}}")
|
||||
}
|
||||
return t.newContinue(pos, line)
|
||||
}
|
||||
|
||||
// Pipeline:
|
||||
//
|
||||
// declarations? command ('|' command)*
|
||||
func (t *Tree) pipeline(context string, end itemType) (pipe *PipeNode) {
|
||||
token := t.peekNonSpace()
|
||||
pipe = t.newPipeline(token.pos, token.line, nil)
|
||||
// Are there declarations or assignments?
|
||||
decls:
|
||||
if v := t.peekNonSpace(); v.typ == itemVariable {
|
||||
t.next()
|
||||
// Since space is a token, we need 3-token look-ahead here in the worst case:
|
||||
// in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an
|
||||
// argument variable rather than a declaration. So remember the token
|
||||
// adjacent to the variable so we can push it back if necessary.
|
||||
tokenAfterVariable := t.peek()
|
||||
next := t.peekNonSpace()
|
||||
switch {
|
||||
case next.typ == itemAssign, next.typ == itemDeclare:
|
||||
pipe.IsAssign = next.typ == itemAssign
|
||||
t.nextNonSpace()
|
||||
pipe.Decl = append(pipe.Decl, t.newVariable(v.pos, v.val))
|
||||
t.vars = append(t.vars, v.val)
|
||||
case next.typ == itemChar && next.val == ",":
|
||||
t.nextNonSpace()
|
||||
pipe.Decl = append(pipe.Decl, t.newVariable(v.pos, v.val))
|
||||
t.vars = append(t.vars, v.val)
|
||||
if context == "range" && len(pipe.Decl) < 2 {
|
||||
switch t.peekNonSpace().typ {
|
||||
case itemVariable, itemRightDelim, itemRightParen:
|
||||
// second initialized variable in a range pipeline
|
||||
goto decls
|
||||
default:
|
||||
t.errorf("range can only initialize variables")
|
||||
}
|
||||
}
|
||||
t.errorf("too many declarations in %s", context)
|
||||
case tokenAfterVariable.typ == itemSpace:
|
||||
t.backup3(v, tokenAfterVariable)
|
||||
default:
|
||||
t.backup2(v)
|
||||
}
|
||||
}
|
||||
for {
|
||||
switch token := t.nextNonSpace(); token.typ {
|
||||
case end:
|
||||
// At this point, the pipeline is complete
|
||||
t.checkPipeline(pipe, context)
|
||||
return
|
||||
case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier,
|
||||
itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen:
|
||||
t.backup()
|
||||
pipe.append(t.command())
|
||||
default:
|
||||
t.unexpected(token, context)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) checkPipeline(pipe *PipeNode, context string) {
|
||||
// Reject empty pipelines
|
||||
if len(pipe.Cmds) == 0 {
|
||||
t.errorf("missing value for %s", context)
|
||||
}
|
||||
// Only the first command of a pipeline can start with a non executable operand
|
||||
for i, c := range pipe.Cmds[1:] {
|
||||
switch c.Args[0].Type() {
|
||||
case NodeBool, NodeDot, NodeNil, NodeNumber, NodeString:
|
||||
// With A|B|C, pipeline stage 2 is B
|
||||
t.errorf("non executable command in pipeline stage %d", i+2)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) parseControl(context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) {
|
||||
defer t.popVars(len(t.vars))
|
||||
pipe = t.pipeline(context, itemRightDelim)
|
||||
if context == "range" {
|
||||
t.rangeDepth++
|
||||
}
|
||||
var next Node
|
||||
list, next = t.itemList()
|
||||
if context == "range" {
|
||||
t.rangeDepth--
|
||||
}
|
||||
switch next.Type() {
|
||||
case nodeEnd: //done
|
||||
case nodeElse:
|
||||
// Special case for "else if" and "else with".
|
||||
// If the "else" is followed immediately by an "if" or "with",
|
||||
// the elseControl will have left the "if" or "with" token pending. Treat
|
||||
// {{if a}}_{{else if b}}_{{end}}
|
||||
// {{with a}}_{{else with b}}_{{end}}
|
||||
// as
|
||||
// {{if a}}_{{else}}{{if b}}_{{end}}{{end}}
|
||||
// {{with a}}_{{else}}{{with b}}_{{end}}{{end}}.
|
||||
// To do this, parse the "if" or "with" as usual and stop at it {{end}};
|
||||
// the subsequent{{end}} is assumed. This technique works even for long if-else-if chains.
|
||||
if context == "if" && t.peek().typ == itemIf {
|
||||
t.next() // Consume the "if" token.
|
||||
elseList = t.newList(next.Position())
|
||||
elseList.append(t.ifControl())
|
||||
} else if context == "with" && t.peek().typ == itemWith {
|
||||
t.next()
|
||||
elseList = t.newList(next.Position())
|
||||
elseList.append(t.withControl())
|
||||
} else {
|
||||
elseList, next = t.itemList()
|
||||
if next.Type() != nodeEnd {
|
||||
t.errorf("expected end; found %s", next)
|
||||
}
|
||||
}
|
||||
}
|
||||
return pipe.Position(), pipe.Line, pipe, list, elseList
|
||||
}
|
||||
|
||||
// If:
|
||||
//
|
||||
// {{if pipeline}} itemList {{end}}
|
||||
// {{if pipeline}} itemList {{else}} itemList {{end}}
|
||||
//
|
||||
// If keyword is past.
|
||||
func (t *Tree) ifControl() Node {
|
||||
return t.newIf(t.parseControl("if"))
|
||||
}
|
||||
|
||||
// Range:
|
||||
//
|
||||
// {{range pipeline}} itemList {{end}}
|
||||
// {{range pipeline}} itemList {{else}} itemList {{end}}
|
||||
//
|
||||
// Range keyword is past.
|
||||
func (t *Tree) rangeControl() Node {
|
||||
r := t.newRange(t.parseControl("range"))
|
||||
return r
|
||||
}
|
||||
|
||||
// With:
|
||||
//
|
||||
// {{with pipeline}} itemList {{end}}
|
||||
// {{with pipeline}} itemList {{else}} itemList {{end}}
|
||||
//
|
||||
// If keyword is past.
|
||||
func (t *Tree) withControl() Node {
|
||||
return t.newWith(t.parseControl("with"))
|
||||
}
|
||||
|
||||
// End:
|
||||
//
|
||||
// {{end}}
|
||||
//
|
||||
// End keyword is past.
|
||||
func (t *Tree) endControl() Node {
|
||||
return t.newEnd(t.expect(itemRightDelim, "end").pos)
|
||||
}
|
||||
|
||||
// Else:
|
||||
//
|
||||
// {{else}}
|
||||
//
|
||||
// Else keyword is past.
|
||||
func (t *Tree) elseControl() Node {
|
||||
peek := t.peekNonSpace()
|
||||
// The "{{else if ... " and "{{else with ..." will be
|
||||
// treated as "{{else}}{{if ..." and "{{else}}{{with ...".
|
||||
// So return the else node here.
|
||||
if peek.typ == itemIf || peek.typ == itemWith {
|
||||
return t.newElse(peek.pos, peek.line)
|
||||
}
|
||||
token := t.expect(itemRightDelim, "else")
|
||||
return t.newElse(token.pos, token.line)
|
||||
}
|
||||
|
||||
// Block:
|
||||
//
|
||||
// {{block stringValue pipeline}}
|
||||
//
|
||||
// Block keyword is past.
|
||||
// The name must be something that can evaluate to a string.
|
||||
// The pipeline is mandatory.
|
||||
func (t *Tree) blockControl() Node {
|
||||
const context = "block clause"
|
||||
|
||||
token := t.nextNonSpace()
|
||||
name := t.parseTemplateName(token, context)
|
||||
pipe := t.pipeline(context, itemRightDelim)
|
||||
|
||||
block := New(name) // name will be updated once we know it.
|
||||
block.text = t.text
|
||||
block.Mode = t.Mode
|
||||
block.ParseName = t.ParseName
|
||||
block.startParse(t.funcs, t.lex, t.treeSet)
|
||||
var end Node
|
||||
block.Root, end = block.itemList()
|
||||
if end.Type() != nodeEnd {
|
||||
t.errorf("unexpected %s in %s", end, context)
|
||||
}
|
||||
block.add()
|
||||
block.stopParse()
|
||||
|
||||
return t.newTemplate(token.pos, token.line, name, pipe)
|
||||
}
|
||||
|
||||
// Template:
|
||||
//
|
||||
// {{template stringValue pipeline}}
|
||||
//
|
||||
// Template keyword is past. The name must be something that can evaluate
|
||||
// to a string.
|
||||
func (t *Tree) templateControl() Node {
|
||||
const context = "template clause"
|
||||
token := t.nextNonSpace()
|
||||
name := t.parseTemplateName(token, context)
|
||||
var pipe *PipeNode
|
||||
if t.nextNonSpace().typ != itemRightDelim {
|
||||
t.backup()
|
||||
// Do not pop variables; they persist until "end".
|
||||
pipe = t.pipeline(context, itemRightDelim)
|
||||
}
|
||||
return t.newTemplate(token.pos, token.line, name, pipe)
|
||||
}
|
||||
|
||||
func (t *Tree) parseTemplateName(token item, context string) (name string) {
|
||||
switch token.typ {
|
||||
case itemString, itemRawString:
|
||||
s, err := strconv.Unquote(token.val)
|
||||
if err != nil {
|
||||
t.error(err)
|
||||
}
|
||||
name = s
|
||||
default:
|
||||
t.unexpected(token, context)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// command:
|
||||
//
|
||||
// operand (space operand)*
|
||||
//
|
||||
// space-separated arguments up to a pipeline character or right delimiter.
|
||||
// we consume the pipe character but leave the right delim to terminate the action.
|
||||
func (t *Tree) command() *CommandNode {
|
||||
cmd := t.newCommand(t.peekNonSpace().pos)
|
||||
for {
|
||||
t.peekNonSpace() // skip leading spaces.
|
||||
operand := t.operand()
|
||||
if operand != nil {
|
||||
cmd.append(operand)
|
||||
}
|
||||
switch token := t.next(); token.typ {
|
||||
case itemSpace:
|
||||
continue
|
||||
case itemRightDelim, itemRightParen:
|
||||
t.backup()
|
||||
case itemPipe:
|
||||
// nothing here; break loop below
|
||||
default:
|
||||
t.unexpected(token, "operand")
|
||||
}
|
||||
break
|
||||
}
|
||||
if len(cmd.Args) == 0 {
|
||||
t.errorf("empty command")
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
// operand:
|
||||
//
|
||||
// term .Field*
|
||||
//
|
||||
// An operand is a space-separated component of a command,
|
||||
// a term possibly followed by field accesses.
|
||||
// A nil return means the next item is not an operand.
|
||||
func (t *Tree) operand() Node {
|
||||
node := t.term()
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
if t.peek().typ == itemField {
|
||||
chain := t.newChain(t.peek().pos, node)
|
||||
for t.peek().typ == itemField {
|
||||
chain.Add(t.next().val)
|
||||
}
|
||||
// Compatibility with original API: If the term is of type NodeField
|
||||
// or NodeVariable, just put more fields on the original.
|
||||
// Otherwise, keep the Chain node.
|
||||
// Obvious parsing errors involving literal values are detected here.
|
||||
// More complex error cases will have to be handled at execution time.
|
||||
switch node.Type() {
|
||||
case NodeField:
|
||||
node = t.newField(chain.Position(), chain.String())
|
||||
case NodeVariable:
|
||||
node = t.newVariable(chain.Position(), chain.String())
|
||||
case NodeBool, NodeString, NodeNumber, NodeNil, NodeDot:
|
||||
t.errorf("unexpected . after term %q", node.String())
|
||||
default:
|
||||
node = chain
|
||||
}
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// term:
|
||||
//
|
||||
// literal (number, string, nil, boolean)
|
||||
// function (identifier)
|
||||
// .
|
||||
// .Field
|
||||
// $
|
||||
// '(' pipeline ')'
|
||||
//
|
||||
// A term is a simple "expression".
|
||||
// A nil return means the next item is not a term.
|
||||
func (t *Tree) term() Node {
|
||||
switch token := t.nextNonSpace(); token.typ {
|
||||
case itemIdentifier:
|
||||
checkFunc := t.Mode&SkipFuncCheck == 0
|
||||
if checkFunc && !t.hasFunction(token.val) {
|
||||
t.errorf("function %q not defined", token.val)
|
||||
}
|
||||
return NewIdentifier(token.val).SetTree(t).SetPos(token.pos)
|
||||
case itemDot:
|
||||
return t.newDot(token.pos)
|
||||
case itemNil:
|
||||
return t.newNil(token.pos)
|
||||
case itemVariable:
|
||||
return t.useVar(token.pos, token.val)
|
||||
case itemField:
|
||||
return t.newField(token.pos, token.val)
|
||||
case itemBool:
|
||||
return t.newBool(token.pos, token.val == "true")
|
||||
case itemCharConstant, itemComplex, itemNumber:
|
||||
number, err := t.newNumber(token.pos, token.val, token.typ)
|
||||
if err != nil {
|
||||
t.error(err)
|
||||
}
|
||||
return number
|
||||
case itemLeftParen:
|
||||
return t.pipeline("parenthesized pipeline", itemRightParen)
|
||||
case itemString, itemRawString:
|
||||
s, err := strconv.Unquote(token.val)
|
||||
if err != nil {
|
||||
t.error(err)
|
||||
}
|
||||
return t.newString(token.pos, token.val, s)
|
||||
}
|
||||
t.backup()
|
||||
return nil
|
||||
}
|
||||
|
||||
// hasFunction reports if a function name exists in the Tree's maps.
|
||||
func (t *Tree) hasFunction(name string) bool {
|
||||
for _, funcMap := range t.funcs {
|
||||
if funcMap == nil {
|
||||
continue
|
||||
}
|
||||
if funcMap[name] != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// popVars trims the variable list to the specified length
|
||||
func (t *Tree) popVars(n int) {
|
||||
t.vars = t.vars[:n]
|
||||
}
|
||||
|
||||
// useVar returns a node for a variable reference. It errors if the
|
||||
// variable is not defined.
|
||||
func (t *Tree) useVar(pos Pos, name string) Node {
|
||||
v := t.newVariable(pos, name)
|
||||
for _, varName := range t.vars {
|
||||
if varName == v.Ident[0] {
|
||||
return v
|
||||
}
|
||||
}
|
||||
t.errorf("undefined variable %q", v.Ident[0])
|
||||
return nil
|
||||
}
|
||||
718
src/text/template/parse/parse_test.go
Normal file
718
src/text/template/parse/parse_test.go
Normal file
@@ -0,0 +1,718 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package parse
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var debug = flag.Bool("debug", false, "show the errors produced by the main tests")
|
||||
|
||||
type numberTest struct {
|
||||
text string
|
||||
isInt bool
|
||||
isUint bool
|
||||
isFloat bool
|
||||
isComplex bool
|
||||
int64
|
||||
uint64
|
||||
float64
|
||||
complex128
|
||||
}
|
||||
|
||||
var numberTests = []numberTest{
|
||||
// basics
|
||||
{"0", true, true, true, false, 0, 0, 0, 0},
|
||||
{"-0", true, true, true, false, 0, 0, 0, 0}, // check that -0 is a uint.
|
||||
{"73", true, true, true, false, 73, 73, 73, 0},
|
||||
{"7_3", true, true, true, false, 73, 73, 73, 0},
|
||||
{"0b10_010_01", true, true, true, false, 73, 73, 73, 0},
|
||||
{"0B10_010_01", true, true, true, false, 73, 73, 73, 0},
|
||||
{"073", true, true, true, false, 073, 073, 073, 0},
|
||||
{"0o73", true, true, true, false, 073, 073, 073, 0},
|
||||
{"0O73", true, true, true, false, 073, 073, 073, 0},
|
||||
{"0x73", true, true, true, false, 0x73, 0x73, 0x73, 0},
|
||||
{"0X73", true, true, true, false, 0x73, 0x73, 0x73, 0},
|
||||
{"0x7_3", true, true, true, false, 0x73, 0x73, 0x73, 0},
|
||||
{"-73", true, false, true, false, -73, 0, -73, 0},
|
||||
{"+73", true, false, true, false, 73, 0, 73, 0},
|
||||
{"100", true, true, true, false, 100, 100, 100, 0},
|
||||
{"1e9", true, true, true, false, 1e9, 1e9, 1e9, 0},
|
||||
{"-1e9", true, false, true, false, -1e9, 0, -1e9, 0},
|
||||
{"-1.2", false, false, true, false, 0, 0, -1.2, 0},
|
||||
{"1e19", false, true, true, false, 0, 1e19, 1e19, 0},
|
||||
{"1e1_9", false, true, true, false, 0, 1e19, 1e19, 0},
|
||||
{"1E19", false, true, true, false, 0, 1e19, 1e19, 0},
|
||||
{"-1e19", false, false, true, false, 0, 0, -1e19, 0},
|
||||
{"0x_1p4", true, true, true, false, 16, 16, 16, 0},
|
||||
{"0X_1P4", true, true, true, false, 16, 16, 16, 0},
|
||||
{"0x_1p-4", false, false, true, false, 0, 0, 1 / 16., 0},
|
||||
{"4i", false, false, false, true, 0, 0, 0, 4i},
|
||||
{"-1.2+4.2i", false, false, false, true, 0, 0, 0, -1.2 + 4.2i},
|
||||
{"073i", false, false, false, true, 0, 0, 0, 73i}, // not octal!
|
||||
// complex with 0 imaginary are float (and maybe integer)
|
||||
{"0i", true, true, true, true, 0, 0, 0, 0},
|
||||
{"-1.2+0i", false, false, true, true, 0, 0, -1.2, -1.2},
|
||||
{"-12+0i", true, false, true, true, -12, 0, -12, -12},
|
||||
{"13+0i", true, true, true, true, 13, 13, 13, 13},
|
||||
// funny bases
|
||||
{"0123", true, true, true, false, 0123, 0123, 0123, 0},
|
||||
{"-0x0", true, true, true, false, 0, 0, 0, 0},
|
||||
{"0xdeadbeef", true, true, true, false, 0xdeadbeef, 0xdeadbeef, 0xdeadbeef, 0},
|
||||
// character constants
|
||||
{`'a'`, true, true, true, false, 'a', 'a', 'a', 0},
|
||||
{`'\n'`, true, true, true, false, '\n', '\n', '\n', 0},
|
||||
{`'\\'`, true, true, true, false, '\\', '\\', '\\', 0},
|
||||
{`'\''`, true, true, true, false, '\'', '\'', '\'', 0},
|
||||
{`'\xFF'`, true, true, true, false, 0xFF, 0xFF, 0xFF, 0},
|
||||
{`'パ'`, true, true, true, false, 0x30d1, 0x30d1, 0x30d1, 0},
|
||||
{`'\u30d1'`, true, true, true, false, 0x30d1, 0x30d1, 0x30d1, 0},
|
||||
{`'\U000030d1'`, true, true, true, false, 0x30d1, 0x30d1, 0x30d1, 0},
|
||||
// some broken syntax
|
||||
{text: "+-2"},
|
||||
{text: "0x123."},
|
||||
{text: "1e."},
|
||||
{text: "0xi."},
|
||||
{text: "1+2."},
|
||||
{text: "'x"},
|
||||
{text: "'xx'"},
|
||||
{text: "'433937734937734969526500969526500'"}, // Integer too large - issue 10634.
|
||||
// Issue 8622 - 0xe parsed as floating point. Very embarrassing.
|
||||
{"0xef", true, true, true, false, 0xef, 0xef, 0xef, 0},
|
||||
}
|
||||
|
||||
func TestNumberParse(t *testing.T) {
|
||||
for _, test := range numberTests {
|
||||
// If fmt.Sscan thinks it's complex, it's complex. We can't trust the output
|
||||
// because imaginary comes out as a number.
|
||||
var c complex128
|
||||
typ := itemNumber
|
||||
var tree *Tree
|
||||
if test.text[0] == '\'' {
|
||||
typ = itemCharConstant
|
||||
} else {
|
||||
_, err := fmt.Sscan(test.text, &c)
|
||||
if err == nil {
|
||||
typ = itemComplex
|
||||
}
|
||||
}
|
||||
n, err := tree.newNumber(0, test.text, typ)
|
||||
ok := test.isInt || test.isUint || test.isFloat || test.isComplex
|
||||
if ok && err != nil {
|
||||
t.Errorf("unexpected error for %q: %s", test.text, err)
|
||||
continue
|
||||
}
|
||||
if !ok && err == nil {
|
||||
t.Errorf("expected error for %q", test.text)
|
||||
continue
|
||||
}
|
||||
if !ok {
|
||||
if *debug {
|
||||
fmt.Printf("%s\n\t%s\n", test.text, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if n.IsComplex != test.isComplex {
|
||||
t.Errorf("complex incorrect for %q; should be %t", test.text, test.isComplex)
|
||||
}
|
||||
if test.isInt {
|
||||
if !n.IsInt {
|
||||
t.Errorf("expected integer for %q", test.text)
|
||||
}
|
||||
if n.Int64 != test.int64 {
|
||||
t.Errorf("int64 for %q should be %d Is %d", test.text, test.int64, n.Int64)
|
||||
}
|
||||
} else if n.IsInt {
|
||||
t.Errorf("did not expect integer for %q", test.text)
|
||||
}
|
||||
if test.isUint {
|
||||
if !n.IsUint {
|
||||
t.Errorf("expected unsigned integer for %q", test.text)
|
||||
}
|
||||
if n.Uint64 != test.uint64 {
|
||||
t.Errorf("uint64 for %q should be %d Is %d", test.text, test.uint64, n.Uint64)
|
||||
}
|
||||
} else if n.IsUint {
|
||||
t.Errorf("did not expect unsigned integer for %q", test.text)
|
||||
}
|
||||
if test.isFloat {
|
||||
if !n.IsFloat {
|
||||
t.Errorf("expected float for %q", test.text)
|
||||
}
|
||||
if n.Float64 != test.float64 {
|
||||
t.Errorf("float64 for %q should be %g Is %g", test.text, test.float64, n.Float64)
|
||||
}
|
||||
} else if n.IsFloat {
|
||||
t.Errorf("did not expect float for %q", test.text)
|
||||
}
|
||||
if test.isComplex {
|
||||
if !n.IsComplex {
|
||||
t.Errorf("expected complex for %q", test.text)
|
||||
}
|
||||
if n.Complex128 != test.complex128 {
|
||||
t.Errorf("complex128 for %q should be %g Is %g", test.text, test.complex128, n.Complex128)
|
||||
}
|
||||
} else if n.IsComplex {
|
||||
t.Errorf("did not expect complex for %q", test.text)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type parseTest struct {
|
||||
name string
|
||||
input string
|
||||
ok bool
|
||||
result string // what the user would see in an error message.
|
||||
}
|
||||
|
||||
const (
|
||||
noError = true
|
||||
hasError = false
|
||||
)
|
||||
|
||||
var parseTests = []parseTest{
|
||||
{"empty", "", noError,
|
||||
``},
|
||||
{"comment", "{{/*\n\n\n*/}}", noError,
|
||||
``},
|
||||
{"spaces", " \t\n", noError,
|
||||
`" \t\n"`},
|
||||
{"text", "some text", noError,
|
||||
`"some text"`},
|
||||
{"emptyAction", "{{}}", hasError,
|
||||
`{{}}`},
|
||||
{"field", "{{.X}}", noError,
|
||||
`{{.X}}`},
|
||||
{"simple command", "{{printf}}", noError,
|
||||
`{{printf}}`},
|
||||
{"$ invocation", "{{$}}", noError,
|
||||
"{{$}}"},
|
||||
{"variable invocation", "{{with $x := 3}}{{$x 23}}{{end}}", noError,
|
||||
"{{with $x := 3}}{{$x 23}}{{end}}"},
|
||||
{"variable with fields", "{{$.I}}", noError,
|
||||
"{{$.I}}"},
|
||||
{"multi-word command", "{{printf `%d` 23}}", noError,
|
||||
"{{printf `%d` 23}}"},
|
||||
{"pipeline", "{{.X|.Y}}", noError,
|
||||
`{{.X | .Y}}`},
|
||||
{"pipeline with decl", "{{$x := .X|.Y}}", noError,
|
||||
`{{$x := .X | .Y}}`},
|
||||
{"nested pipeline", "{{.X (.Y .Z) (.A | .B .C) (.E)}}", noError,
|
||||
`{{.X (.Y .Z) (.A | .B .C) (.E)}}`},
|
||||
{"field applied to parentheses", "{{(.Y .Z).Field}}", noError,
|
||||
`{{(.Y .Z).Field}}`},
|
||||
{"simple if", "{{if .X}}hello{{end}}", noError,
|
||||
`{{if .X}}"hello"{{end}}`},
|
||||
{"if with else", "{{if .X}}true{{else}}false{{end}}", noError,
|
||||
`{{if .X}}"true"{{else}}"false"{{end}}`},
|
||||
{"if with else if", "{{if .X}}true{{else if .Y}}false{{end}}", noError,
|
||||
`{{if .X}}"true"{{else}}{{if .Y}}"false"{{end}}{{end}}`},
|
||||
{"if else chain", "+{{if .X}}X{{else if .Y}}Y{{else if .Z}}Z{{end}}+", noError,
|
||||
`"+"{{if .X}}"X"{{else}}{{if .Y}}"Y"{{else}}{{if .Z}}"Z"{{end}}{{end}}{{end}}"+"`},
|
||||
{"simple range", "{{range .X}}hello{{end}}", noError,
|
||||
`{{range .X}}"hello"{{end}}`},
|
||||
{"chained field range", "{{range .X.Y.Z}}hello{{end}}", noError,
|
||||
`{{range .X.Y.Z}}"hello"{{end}}`},
|
||||
{"nested range", "{{range .X}}hello{{range .Y}}goodbye{{end}}{{end}}", noError,
|
||||
`{{range .X}}"hello"{{range .Y}}"goodbye"{{end}}{{end}}`},
|
||||
{"range with else", "{{range .X}}true{{else}}false{{end}}", noError,
|
||||
`{{range .X}}"true"{{else}}"false"{{end}}`},
|
||||
{"range over pipeline", "{{range .X|.M}}true{{else}}false{{end}}", noError,
|
||||
`{{range .X | .M}}"true"{{else}}"false"{{end}}`},
|
||||
{"range []int", "{{range .SI}}{{.}}{{end}}", noError,
|
||||
`{{range .SI}}{{.}}{{end}}`},
|
||||
{"range 1 var", "{{range $x := .SI}}{{.}}{{end}}", noError,
|
||||
`{{range $x := .SI}}{{.}}{{end}}`},
|
||||
{"range 2 vars", "{{range $x, $y := .SI}}{{.}}{{end}}", noError,
|
||||
`{{range $x, $y := .SI}}{{.}}{{end}}`},
|
||||
{"range with break", "{{range .SI}}{{.}}{{break}}{{end}}", noError,
|
||||
`{{range .SI}}{{.}}{{break}}{{end}}`},
|
||||
{"range with continue", "{{range .SI}}{{.}}{{continue}}{{end}}", noError,
|
||||
`{{range .SI}}{{.}}{{continue}}{{end}}`},
|
||||
{"constants", "{{range .SI 1 -3.2i true false 'a' nil}}{{end}}", noError,
|
||||
`{{range .SI 1 -3.2i true false 'a' nil}}{{end}}`},
|
||||
{"template", "{{template `x`}}", noError,
|
||||
`{{template "x"}}`},
|
||||
{"template with arg", "{{template `x` .Y}}", noError,
|
||||
`{{template "x" .Y}}`},
|
||||
{"with", "{{with .X}}hello{{end}}", noError,
|
||||
`{{with .X}}"hello"{{end}}`},
|
||||
{"with with else", "{{with .X}}hello{{else}}goodbye{{end}}", noError,
|
||||
`{{with .X}}"hello"{{else}}"goodbye"{{end}}`},
|
||||
{"with with else with", "{{with .X}}hello{{else with .Y}}goodbye{{end}}", noError,
|
||||
`{{with .X}}"hello"{{else}}{{with .Y}}"goodbye"{{end}}{{end}}`},
|
||||
{"with else chain", "{{with .X}}X{{else with .Y}}Y{{else with .Z}}Z{{end}}", noError,
|
||||
`{{with .X}}"X"{{else}}{{with .Y}}"Y"{{else}}{{with .Z}}"Z"{{end}}{{end}}{{end}}`},
|
||||
// Trimming spaces.
|
||||
{"trim left", "x \r\n\t{{- 3}}", noError, `"x"{{3}}`},
|
||||
{"trim right", "{{3 -}}\n\n\ty", noError, `{{3}}"y"`},
|
||||
{"trim left and right", "x \r\n\t{{- 3 -}}\n\n\ty", noError, `"x"{{3}}"y"`},
|
||||
{"trim with extra spaces", "x\n{{- 3 -}}\ny", noError, `"x"{{3}}"y"`},
|
||||
{"comment trim left", "x \r\n\t{{- /* hi */}}", noError, `"x"`},
|
||||
{"comment trim right", "{{/* hi */ -}}\n\n\ty", noError, `"y"`},
|
||||
{"comment trim left and right", "x \r\n\t{{- /* */ -}}\n\n\ty", noError, `"x""y"`},
|
||||
{"block definition", `{{block "foo" .}}hello{{end}}`, noError,
|
||||
`{{template "foo" .}}`},
|
||||
|
||||
{"newline in assignment", "{{ $x \n := \n 1 \n }}", noError, "{{$x := 1}}"},
|
||||
{"newline in empty action", "{{\n}}", hasError, "{{\n}}"},
|
||||
{"newline in pipeline", "{{\n\"x\"\n|\nprintf\n}}", noError, `{{"x" | printf}}`},
|
||||
{"newline in comment", "{{/*\nhello\n*/}}", noError, ""},
|
||||
{"newline in comment", "{{-\n/*\nhello\n*/\n-}}", noError, ""},
|
||||
{"spaces around continue", "{{range .SI}}{{.}}{{ continue }}{{end}}", noError,
|
||||
`{{range .SI}}{{.}}{{continue}}{{end}}`},
|
||||
{"spaces around break", "{{range .SI}}{{.}}{{ break }}{{end}}", noError,
|
||||
`{{range .SI}}{{.}}{{break}}{{end}}`},
|
||||
|
||||
// Errors.
|
||||
{"unclosed action", "hello{{range", hasError, ""},
|
||||
{"unmatched end", "{{end}}", hasError, ""},
|
||||
{"unmatched else", "{{else}}", hasError, ""},
|
||||
{"unmatched else after if", "{{if .X}}hello{{end}}{{else}}", hasError, ""},
|
||||
{"multiple else", "{{if .X}}1{{else}}2{{else}}3{{end}}", hasError, ""},
|
||||
{"missing end", "hello{{range .x}}", hasError, ""},
|
||||
{"missing end after else", "hello{{range .x}}{{else}}", hasError, ""},
|
||||
{"undefined function", "hello{{undefined}}", hasError, ""},
|
||||
{"undefined variable", "{{$x}}", hasError, ""},
|
||||
{"variable undefined after end", "{{with $x := 4}}{{end}}{{$x}}", hasError, ""},
|
||||
{"variable undefined in template", "{{template $v}}", hasError, ""},
|
||||
{"declare with field", "{{with $x.Y := 4}}{{end}}", hasError, ""},
|
||||
{"template with field ref", "{{template .X}}", hasError, ""},
|
||||
{"template with var", "{{template $v}}", hasError, ""},
|
||||
{"invalid punctuation", "{{printf 3, 4}}", hasError, ""},
|
||||
{"multidecl outside range", "{{with $v, $u := 3}}{{end}}", hasError, ""},
|
||||
{"too many decls in range", "{{range $u, $v, $w := 3}}{{end}}", hasError, ""},
|
||||
{"dot applied to parentheses", "{{printf (printf .).}}", hasError, ""},
|
||||
{"adjacent args", "{{printf 3`x`}}", hasError, ""},
|
||||
{"adjacent args with .", "{{printf `x`.}}", hasError, ""},
|
||||
{"extra end after if", "{{if .X}}a{{else if .Y}}b{{end}}{{end}}", hasError, ""},
|
||||
{"break outside range", "{{range .}}{{end}} {{break}}", hasError, ""},
|
||||
{"continue outside range", "{{range .}}{{end}} {{continue}}", hasError, ""},
|
||||
{"break in range else", "{{range .}}{{else}}{{break}}{{end}}", hasError, ""},
|
||||
{"continue in range else", "{{range .}}{{else}}{{continue}}{{end}}", hasError, ""},
|
||||
// Other kinds of assignments and operators aren't available yet.
|
||||
{"bug0a", "{{$x := 0}}{{$x}}", noError, "{{$x := 0}}{{$x}}"},
|
||||
{"bug0b", "{{$x += 1}}{{$x}}", hasError, ""},
|
||||
{"bug0c", "{{$x ! 2}}{{$x}}", hasError, ""},
|
||||
{"bug0d", "{{$x % 3}}{{$x}}", hasError, ""},
|
||||
// Check the parse fails for := rather than comma.
|
||||
{"bug0e", "{{range $x := $y := 3}}{{end}}", hasError, ""},
|
||||
// Another bug: variable read must ignore following punctuation.
|
||||
{"bug1a", "{{$x:=.}}{{$x!2}}", hasError, ""}, // ! is just illegal here.
|
||||
{"bug1b", "{{$x:=.}}{{$x+2}}", hasError, ""}, // $x+2 should not parse as ($x) (+2).
|
||||
{"bug1c", "{{$x:=.}}{{$x +2}}", noError, "{{$x := .}}{{$x +2}}"}, // It's OK with a space.
|
||||
// Check the range handles assignment vs. declaration properly.
|
||||
{"bug2a", "{{range $x := 0}}{{$x}}{{end}}", noError, "{{range $x := 0}}{{$x}}{{end}}"},
|
||||
{"bug2b", "{{range $x = 0}}{{$x}}{{end}}", noError, "{{range $x = 0}}{{$x}}{{end}}"},
|
||||
// dot following a literal value
|
||||
{"dot after integer", "{{1.E}}", hasError, ""},
|
||||
{"dot after float", "{{0.1.E}}", hasError, ""},
|
||||
{"dot after boolean", "{{true.E}}", hasError, ""},
|
||||
{"dot after char", "{{'a'.any}}", hasError, ""},
|
||||
{"dot after string", `{{"hello".guys}}`, hasError, ""},
|
||||
{"dot after dot", "{{..E}}", hasError, ""},
|
||||
{"dot after nil", "{{nil.E}}", hasError, ""},
|
||||
// Wrong pipeline
|
||||
{"wrong pipeline dot", "{{12|.}}", hasError, ""},
|
||||
{"wrong pipeline number", "{{.|12|printf}}", hasError, ""},
|
||||
{"wrong pipeline string", "{{.|printf|\"error\"}}", hasError, ""},
|
||||
{"wrong pipeline char", "{{12|printf|'e'}}", hasError, ""},
|
||||
{"wrong pipeline boolean", "{{.|true}}", hasError, ""},
|
||||
{"wrong pipeline nil", "{{'c'|nil}}", hasError, ""},
|
||||
{"empty pipeline", `{{printf "%d" ( ) }}`, hasError, ""},
|
||||
// Missing pipeline in block
|
||||
{"block definition", `{{block "foo"}}hello{{end}}`, hasError, ""},
|
||||
}
|
||||
|
||||
var builtins = map[string]any{
|
||||
"printf": fmt.Sprintf,
|
||||
"contains": strings.Contains,
|
||||
}
|
||||
|
||||
func testParse(doCopy bool, t *testing.T) {
|
||||
textFormat = "%q"
|
||||
defer func() { textFormat = "%s" }()
|
||||
for _, test := range parseTests {
|
||||
tmpl, err := New(test.name).Parse(test.input, "", "", make(map[string]*Tree), builtins)
|
||||
switch {
|
||||
case err == nil && !test.ok:
|
||||
t.Errorf("%q: expected error; got none", test.name)
|
||||
continue
|
||||
case err != nil && test.ok:
|
||||
t.Errorf("%q: unexpected error: %v", test.name, err)
|
||||
continue
|
||||
case err != nil && !test.ok:
|
||||
// expected error, got one
|
||||
if *debug {
|
||||
fmt.Printf("%s: %s\n\t%s\n", test.name, test.input, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
var result string
|
||||
if doCopy {
|
||||
result = tmpl.Root.Copy().String()
|
||||
} else {
|
||||
result = tmpl.Root.String()
|
||||
}
|
||||
if result != test.result {
|
||||
t.Errorf("%s=(%q): got\n\t%v\nexpected\n\t%v", test.name, test.input, result, test.result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
testParse(false, t)
|
||||
}
|
||||
|
||||
// Same as TestParse, but we copy the node first
|
||||
func TestParseCopy(t *testing.T) {
|
||||
testParse(true, t)
|
||||
}
|
||||
|
||||
func TestParseWithComments(t *testing.T) {
|
||||
textFormat = "%q"
|
||||
defer func() { textFormat = "%s" }()
|
||||
tests := [...]parseTest{
|
||||
{"comment", "{{/*\n\n\n*/}}", noError, "{{/*\n\n\n*/}}"},
|
||||
{"comment trim left", "x \r\n\t{{- /* hi */}}", noError, `"x"{{/* hi */}}`},
|
||||
{"comment trim right", "{{/* hi */ -}}\n\n\ty", noError, `{{/* hi */}}"y"`},
|
||||
{"comment trim left and right", "x \r\n\t{{- /* */ -}}\n\n\ty", noError, `"x"{{/* */}}"y"`},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
tr := New(test.name)
|
||||
tr.Mode = ParseComments
|
||||
tmpl, err := tr.Parse(test.input, "", "", make(map[string]*Tree))
|
||||
if err != nil {
|
||||
t.Errorf("%q: expected error; got none", test.name)
|
||||
}
|
||||
if result := tmpl.Root.String(); result != test.result {
|
||||
t.Errorf("%s=(%q): got\n\t%v\nexpected\n\t%v", test.name, test.input, result, test.result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestKeywordsAndFuncs(t *testing.T) {
|
||||
// Check collisions between functions and new keywords like 'break'. When a
|
||||
// break function is provided, the parser should treat 'break' as a function,
|
||||
// not a keyword.
|
||||
textFormat = "%q"
|
||||
defer func() { textFormat = "%s" }()
|
||||
|
||||
inp := `{{range .X}}{{break 20}}{{end}}`
|
||||
{
|
||||
// 'break' is a defined function, don't treat it as a keyword: it should
|
||||
// accept an argument successfully.
|
||||
var funcsWithKeywordFunc = map[string]any{
|
||||
"break": func(in any) any { return in },
|
||||
}
|
||||
tmpl, err := New("").Parse(inp, "", "", make(map[string]*Tree), funcsWithKeywordFunc)
|
||||
if err != nil || tmpl == nil {
|
||||
t.Errorf("with break func: unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// No function called 'break'; treat it as a keyword. Results in a parse
|
||||
// error.
|
||||
tmpl, err := New("").Parse(inp, "", "", make(map[string]*Tree), make(map[string]any))
|
||||
if err == nil || tmpl != nil {
|
||||
t.Errorf("without break func: expected error; got none")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSkipFuncCheck(t *testing.T) {
|
||||
oldTextFormat := textFormat
|
||||
textFormat = "%q"
|
||||
defer func() { textFormat = oldTextFormat }()
|
||||
tr := New("skip func check")
|
||||
tr.Mode = SkipFuncCheck
|
||||
tmpl, err := tr.Parse("{{fn 1 2}}", "", "", make(map[string]*Tree))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
expected := "{{fn 1 2}}"
|
||||
if result := tmpl.Root.String(); result != expected {
|
||||
t.Errorf("got\n\t%v\nexpected\n\t%v", result, expected)
|
||||
}
|
||||
}
|
||||
|
||||
type isEmptyTest struct {
|
||||
name string
|
||||
input string
|
||||
empty bool
|
||||
}
|
||||
|
||||
var isEmptyTests = []isEmptyTest{
|
||||
{"empty", ``, true},
|
||||
{"nonempty", `hello`, false},
|
||||
{"spaces only", " \t\n \t\n", true},
|
||||
{"comment only", "{{/* comment */}}", true},
|
||||
{"definition", `{{define "x"}}something{{end}}`, true},
|
||||
{"definitions and space", "{{define `x`}}something{{end}}\n\n{{define `y`}}something{{end}}\n\n", true},
|
||||
{"definitions and text", "{{define `x`}}something{{end}}\nx\n{{define `y`}}something{{end}}\ny\n", false},
|
||||
{"definition and action", "{{define `x`}}something{{end}}{{if 3}}foo{{end}}", false},
|
||||
}
|
||||
|
||||
func TestIsEmpty(t *testing.T) {
|
||||
if !IsEmptyTree(nil) {
|
||||
t.Errorf("nil tree is not empty")
|
||||
}
|
||||
for _, test := range isEmptyTests {
|
||||
tree, err := New("root").Parse(test.input, "", "", make(map[string]*Tree), nil)
|
||||
if err != nil {
|
||||
t.Errorf("%q: unexpected error: %v", test.name, err)
|
||||
continue
|
||||
}
|
||||
if empty := IsEmptyTree(tree.Root); empty != test.empty {
|
||||
t.Errorf("%q: expected %t got %t", test.name, test.empty, empty)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorContextWithTreeCopy(t *testing.T) {
|
||||
tree, err := New("root").Parse("{{if true}}{{end}}", "", "", make(map[string]*Tree), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected tree parse failure: %v", err)
|
||||
}
|
||||
treeCopy := tree.Copy()
|
||||
wantLocation, wantContext := tree.ErrorContext(tree.Root.Nodes[0])
|
||||
gotLocation, gotContext := treeCopy.ErrorContext(treeCopy.Root.Nodes[0])
|
||||
if wantLocation != gotLocation {
|
||||
t.Errorf("wrong error location want %q got %q", wantLocation, gotLocation)
|
||||
}
|
||||
if wantContext != gotContext {
|
||||
t.Errorf("wrong error location want %q got %q", wantContext, gotContext)
|
||||
}
|
||||
}
|
||||
|
||||
// All failures, and the result is a string that must appear in the error message.
|
||||
var errorTests = []parseTest{
|
||||
// Check line numbers are accurate.
|
||||
{"unclosed1",
|
||||
"line1\n{{",
|
||||
hasError, `unclosed1:2: unclosed action`},
|
||||
{"unclosed2",
|
||||
"line1\n{{define `x`}}line2\n{{",
|
||||
hasError, `unclosed2:3: unclosed action`},
|
||||
{"unclosed3",
|
||||
"line1\n{{\"x\"\n\"y\"\n",
|
||||
hasError, `unclosed3:4: unclosed action started at unclosed3:2`},
|
||||
{"unclosed4",
|
||||
"{{\n\n\n\n\n",
|
||||
hasError, `unclosed4:6: unclosed action started at unclosed4:1`},
|
||||
{"var1",
|
||||
"line1\n{{\nx\n}}",
|
||||
hasError, `var1:3: function "x" not defined`},
|
||||
// Specific errors.
|
||||
{"function",
|
||||
"{{foo}}",
|
||||
hasError, `function "foo" not defined`},
|
||||
{"comment1",
|
||||
"{{/*}}",
|
||||
hasError, `comment1:1: unclosed comment`},
|
||||
{"comment2",
|
||||
"{{/*\nhello\n}}",
|
||||
hasError, `comment2:1: unclosed comment`},
|
||||
{"lparen",
|
||||
"{{.X (1 2 3}}",
|
||||
hasError, `unclosed left paren`},
|
||||
{"rparen",
|
||||
"{{.X 1 2 3 ) }}",
|
||||
hasError, "unexpected right paren"},
|
||||
{"rparen2",
|
||||
"{{(.X 1 2 3",
|
||||
hasError, `unclosed action`},
|
||||
{"space",
|
||||
"{{`x`3}}",
|
||||
hasError, `in operand`},
|
||||
{"idchar",
|
||||
"{{a#}}",
|
||||
hasError, `'#'`},
|
||||
{"charconst",
|
||||
"{{'a}}",
|
||||
hasError, `unterminated character constant`},
|
||||
{"stringconst",
|
||||
`{{"a}}`,
|
||||
hasError, `unterminated quoted string`},
|
||||
{"rawstringconst",
|
||||
"{{`a}}",
|
||||
hasError, `unterminated raw quoted string`},
|
||||
{"number",
|
||||
"{{0xi}}",
|
||||
hasError, `number syntax`},
|
||||
{"multidefine",
|
||||
"{{define `a`}}a{{end}}{{define `a`}}b{{end}}",
|
||||
hasError, `multiple definition of template`},
|
||||
{"eof",
|
||||
"{{range .X}}",
|
||||
hasError, `unexpected EOF`},
|
||||
{"variable",
|
||||
// Declare $x so it's defined, to avoid that error, and then check we don't parse a declaration.
|
||||
"{{$x := 23}}{{with $x.y := 3}}{{$x 23}}{{end}}",
|
||||
hasError, `unexpected ":="`},
|
||||
{"multidecl",
|
||||
"{{$a,$b,$c := 23}}",
|
||||
hasError, `too many declarations`},
|
||||
{"undefvar",
|
||||
"{{$a}}",
|
||||
hasError, `undefined variable`},
|
||||
{"wrongdot",
|
||||
"{{true.any}}",
|
||||
hasError, `unexpected . after term`},
|
||||
{"wrongpipeline",
|
||||
"{{12|false}}",
|
||||
hasError, `non executable command in pipeline`},
|
||||
{"emptypipeline",
|
||||
`{{ ( ) }}`,
|
||||
hasError, `missing value for parenthesized pipeline`},
|
||||
{"multilinerawstring",
|
||||
"{{ $v := `\n` }} {{",
|
||||
hasError, `multilinerawstring:2: unclosed action`},
|
||||
{"rangeundefvar",
|
||||
"{{range $k}}{{end}}",
|
||||
hasError, `undefined variable`},
|
||||
{"rangeundefvars",
|
||||
"{{range $k, $v}}{{end}}",
|
||||
hasError, `undefined variable`},
|
||||
{"rangemissingvalue1",
|
||||
"{{range $k,}}{{end}}",
|
||||
hasError, `missing value for range`},
|
||||
{"rangemissingvalue2",
|
||||
"{{range $k, $v := }}{{end}}",
|
||||
hasError, `missing value for range`},
|
||||
{"rangenotvariable1",
|
||||
"{{range $k, .}}{{end}}",
|
||||
hasError, `range can only initialize variables`},
|
||||
{"rangenotvariable2",
|
||||
"{{range $k, 123 := .}}{{end}}",
|
||||
hasError, `range can only initialize variables`},
|
||||
}
|
||||
|
||||
func TestErrors(t *testing.T) {
|
||||
for _, test := range errorTests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
_, err := New(test.name).Parse(test.input, "", "", make(map[string]*Tree))
|
||||
if err == nil {
|
||||
t.Fatalf("expected error %q, got nil", test.result)
|
||||
}
|
||||
if !strings.Contains(err.Error(), test.result) {
|
||||
t.Fatalf("error %q does not contain %q", err, test.result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlock(t *testing.T) {
|
||||
const (
|
||||
input = `a{{block "inner" .}}bar{{.}}baz{{end}}b`
|
||||
outer = `a{{template "inner" .}}b`
|
||||
inner = `bar{{.}}baz`
|
||||
)
|
||||
treeSet := make(map[string]*Tree)
|
||||
tmpl, err := New("outer").Parse(input, "", "", treeSet, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if g, w := tmpl.Root.String(), outer; g != w {
|
||||
t.Errorf("outer template = %q, want %q", g, w)
|
||||
}
|
||||
inTmpl := treeSet["inner"]
|
||||
if inTmpl == nil {
|
||||
t.Fatal("block did not define template")
|
||||
}
|
||||
if g, w := inTmpl.Root.String(), inner; g != w {
|
||||
t.Errorf("inner template = %q, want %q", g, w)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLineNum(t *testing.T) {
|
||||
// const count = 100
|
||||
const count = 3
|
||||
text := strings.Repeat("{{printf 1234}}\n", count)
|
||||
tree, err := New("bench").Parse(text, "", "", make(map[string]*Tree), builtins)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Check the line numbers. Each line is an action containing a template, followed by text.
|
||||
// That's two nodes per line.
|
||||
nodes := tree.Root.Nodes
|
||||
for i := 0; i < len(nodes); i += 2 {
|
||||
line := 1 + i/2
|
||||
// Action first.
|
||||
action := nodes[i].(*ActionNode)
|
||||
if action.Line != line {
|
||||
t.Errorf("line %d: action is line %d", line, action.Line)
|
||||
}
|
||||
pipe := action.Pipe
|
||||
if pipe.Line != line {
|
||||
t.Errorf("line %d: pipe is line %d", line, pipe.Line)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkParseLarge(b *testing.B) {
|
||||
text := strings.Repeat("{{1234}}\n", 10000)
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := New("bench").Parse(text, "", "", make(map[string]*Tree), builtins)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var sinkv, sinkl string
|
||||
|
||||
func BenchmarkVariableString(b *testing.B) {
|
||||
v := &VariableNode{
|
||||
Ident: []string{"$", "A", "BB", "CCC", "THIS_IS_THE_VARIABLE_BEING_PROCESSED"},
|
||||
}
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
sinkv = v.String()
|
||||
}
|
||||
if sinkv == "" {
|
||||
b.Fatal("Benchmark was not run")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkListString(b *testing.B) {
|
||||
text := `
|
||||
{{(printf .Field1.Field2.Field3).Value}}
|
||||
{{$x := (printf .Field1.Field2.Field3).Value}}
|
||||
{{$y := (printf $x.Field1.Field2.Field3).Value}}
|
||||
{{$z := $y.Field1.Field2.Field3}}
|
||||
{{if contains $y $z}}
|
||||
{{printf "%q" $y}}
|
||||
{{else}}
|
||||
{{printf "%q" $x}}
|
||||
{{end}}
|
||||
{{with $z.Field1 | contains "boring"}}
|
||||
{{printf "%q" . | printf "%s"}}
|
||||
{{else}}
|
||||
{{printf "%d %d %d" 11 11 11}}
|
||||
{{printf "%d %d %s" 22 22 $x.Field1.Field2.Field3 | printf "%s"}}
|
||||
{{printf "%v" (contains $z.Field1.Field2 $y)}}
|
||||
{{end}}
|
||||
`
|
||||
tree, err := New("bench").Parse(text, "", "", make(map[string]*Tree), builtins)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
sinkl = tree.Root.String()
|
||||
}
|
||||
if sinkl == "" {
|
||||
b.Fatal("Benchmark was not run")
|
||||
}
|
||||
}
|
||||
238
src/text/template/template.go
Normal file
238
src/text/template/template.go
Normal file
@@ -0,0 +1,238 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"text/template/parse"
|
||||
)
|
||||
|
||||
// common holds the information shared by related templates.
|
||||
type common struct {
|
||||
tmpl map[string]*Template // Map from name to defined templates.
|
||||
muTmpl sync.RWMutex // protects tmpl
|
||||
option option
|
||||
// We use two maps, one for parsing and one for execution.
|
||||
// This separation makes the API cleaner since it doesn't
|
||||
// expose reflection to the client.
|
||||
muFuncs sync.RWMutex // protects parseFuncs and execFuncs
|
||||
parseFuncs FuncMap
|
||||
execFuncs map[string]reflect.Value
|
||||
}
|
||||
|
||||
// Template is the representation of a parsed template. The *parse.Tree
|
||||
// field is exported only for use by [html/template] and should be treated
|
||||
// as unexported by all other clients.
|
||||
type Template struct {
|
||||
name string
|
||||
*parse.Tree
|
||||
*common
|
||||
leftDelim string
|
||||
rightDelim string
|
||||
}
|
||||
|
||||
// New allocates a new, undefined template with the given name.
|
||||
func New(name string) *Template {
|
||||
t := &Template{
|
||||
name: name,
|
||||
}
|
||||
t.init()
|
||||
return t
|
||||
}
|
||||
|
||||
// Name returns the name of the template.
|
||||
func (t *Template) Name() string {
|
||||
return t.name
|
||||
}
|
||||
|
||||
// New allocates a new, undefined template associated with the given one and with the same
|
||||
// delimiters. The association, which is transitive, allows one template to
|
||||
// invoke another with a {{template}} action.
|
||||
//
|
||||
// Because associated templates share underlying data, template construction
|
||||
// cannot be done safely in parallel. Once the templates are constructed, they
|
||||
// can be executed in parallel.
|
||||
func (t *Template) New(name string) *Template {
|
||||
t.init()
|
||||
nt := &Template{
|
||||
name: name,
|
||||
common: t.common,
|
||||
leftDelim: t.leftDelim,
|
||||
rightDelim: t.rightDelim,
|
||||
}
|
||||
return nt
|
||||
}
|
||||
|
||||
// init guarantees that t has a valid common structure.
|
||||
func (t *Template) init() {
|
||||
if t.common == nil {
|
||||
c := new(common)
|
||||
c.tmpl = make(map[string]*Template)
|
||||
c.parseFuncs = make(FuncMap)
|
||||
c.execFuncs = make(map[string]reflect.Value)
|
||||
t.common = c
|
||||
}
|
||||
}
|
||||
|
||||
// Clone returns a duplicate of the template, including all associated
|
||||
// templates. The actual representation is not copied, but the name space of
|
||||
// associated templates is, so further calls to [Template.Parse] in the copy will add
|
||||
// templates to the copy but not to the original. Clone can be used to prepare
|
||||
// common templates and use them with variant definitions for other templates
|
||||
// by adding the variants after the clone is made.
|
||||
func (t *Template) Clone() (*Template, error) {
|
||||
nt := t.copy(nil)
|
||||
nt.init()
|
||||
if t.common == nil {
|
||||
return nt, nil
|
||||
}
|
||||
t.muTmpl.RLock()
|
||||
defer t.muTmpl.RUnlock()
|
||||
for k, v := range t.tmpl {
|
||||
if k == t.name {
|
||||
nt.tmpl[t.name] = nt
|
||||
continue
|
||||
}
|
||||
// The associated templates share nt's common structure.
|
||||
tmpl := v.copy(nt.common)
|
||||
nt.tmpl[k] = tmpl
|
||||
}
|
||||
t.muFuncs.RLock()
|
||||
defer t.muFuncs.RUnlock()
|
||||
for k, v := range t.parseFuncs {
|
||||
nt.parseFuncs[k] = v
|
||||
}
|
||||
for k, v := range t.execFuncs {
|
||||
nt.execFuncs[k] = v
|
||||
}
|
||||
return nt, nil
|
||||
}
|
||||
|
||||
// copy returns a shallow copy of t, with common set to the argument.
|
||||
func (t *Template) copy(c *common) *Template {
|
||||
return &Template{
|
||||
name: t.name,
|
||||
Tree: t.Tree,
|
||||
common: c,
|
||||
leftDelim: t.leftDelim,
|
||||
rightDelim: t.rightDelim,
|
||||
}
|
||||
}
|
||||
|
||||
// AddParseTree associates the argument parse tree with the template t, giving
|
||||
// it the specified name. If the template has not been defined, this tree becomes
|
||||
// its definition. If it has been defined and already has that name, the existing
|
||||
// definition is replaced; otherwise a new template is created, defined, and returned.
|
||||
func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) {
|
||||
t.init()
|
||||
t.muTmpl.Lock()
|
||||
defer t.muTmpl.Unlock()
|
||||
nt := t
|
||||
if name != t.name {
|
||||
nt = t.New(name)
|
||||
}
|
||||
// Even if nt == t, we need to install it in the common.tmpl map.
|
||||
if t.associate(nt, tree) || nt.Tree == nil {
|
||||
nt.Tree = tree
|
||||
}
|
||||
return nt, nil
|
||||
}
|
||||
|
||||
// Templates returns a slice of defined templates associated with t.
|
||||
func (t *Template) Templates() []*Template {
|
||||
if t.common == nil {
|
||||
return nil
|
||||
}
|
||||
// Return a slice so we don't expose the map.
|
||||
t.muTmpl.RLock()
|
||||
defer t.muTmpl.RUnlock()
|
||||
m := make([]*Template, 0, len(t.tmpl))
|
||||
for _, v := range t.tmpl {
|
||||
m = append(m, v)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// Delims sets the action delimiters to the specified strings, to be used in
|
||||
// subsequent calls to [Template.Parse], [Template.ParseFiles], or [Template.ParseGlob]. Nested template
|
||||
// definitions will inherit the settings. An empty delimiter stands for the
|
||||
// corresponding default: {{ or }}.
|
||||
// The return value is the template, so calls can be chained.
|
||||
func (t *Template) Delims(left, right string) *Template {
|
||||
t.init()
|
||||
t.leftDelim = left
|
||||
t.rightDelim = right
|
||||
return t
|
||||
}
|
||||
|
||||
// Funcs adds the elements of the argument map to the template's function map.
|
||||
// It must be called before the template is parsed.
|
||||
// It panics if a value in the map is not a function with appropriate return
|
||||
// type or if the name cannot be used syntactically as a function in a template.
|
||||
// It is legal to overwrite elements of the map. The return value is the template,
|
||||
// so calls can be chained.
|
||||
func (t *Template) Funcs(funcMap FuncMap) *Template {
|
||||
t.init()
|
||||
t.muFuncs.Lock()
|
||||
defer t.muFuncs.Unlock()
|
||||
addValueFuncs(t.execFuncs, funcMap)
|
||||
addFuncs(t.parseFuncs, funcMap)
|
||||
return t
|
||||
}
|
||||
|
||||
// Lookup returns the template with the given name that is associated with t.
|
||||
// It returns nil if there is no such template or the template has no definition.
|
||||
func (t *Template) Lookup(name string) *Template {
|
||||
if t.common == nil {
|
||||
return nil
|
||||
}
|
||||
t.muTmpl.RLock()
|
||||
defer t.muTmpl.RUnlock()
|
||||
return t.tmpl[name]
|
||||
}
|
||||
|
||||
// Parse parses text as a template body for t.
|
||||
// Named template definitions ({{define ...}} or {{block ...}} statements) in text
|
||||
// define additional templates associated with t and are removed from the
|
||||
// definition of t itself.
|
||||
//
|
||||
// Templates can be redefined in successive calls to Parse.
|
||||
// A template definition with a body containing only white space and comments
|
||||
// is considered empty and will not replace an existing template's body.
|
||||
// This allows using Parse to add new named template definitions without
|
||||
// overwriting the main template body.
|
||||
func (t *Template) Parse(text string) (*Template, error) {
|
||||
t.init()
|
||||
t.muFuncs.RLock()
|
||||
trees, err := parse.Parse(t.name, text, t.leftDelim, t.rightDelim, t.parseFuncs, builtins())
|
||||
t.muFuncs.RUnlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Add the newly parsed trees, including the one for t, into our common structure.
|
||||
for name, tree := range trees {
|
||||
if _, err := t.AddParseTree(name, tree); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// associate installs the new template into the group of templates associated
|
||||
// with t. The two are already known to share the common structure.
|
||||
// The boolean return value reports whether to store this tree as t.Tree.
|
||||
func (t *Template) associate(new *Template, tree *parse.Tree) bool {
|
||||
if new.common != t.common {
|
||||
panic("internal error: associate not common")
|
||||
}
|
||||
if old := t.tmpl[new.name]; old != nil && parse.IsEmptyTree(tree.Root) && old.Tree != nil {
|
||||
// If a template by that name exists,
|
||||
// don't replace it with an empty template.
|
||||
return false
|
||||
}
|
||||
t.tmpl[new.name] = new
|
||||
return true
|
||||
}
|
||||
2
src/text/template/testdata/file1.tmpl
vendored
Normal file
2
src/text/template/testdata/file1.tmpl
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
{{define "x"}}TEXT{{end}}
|
||||
{{define "dotV"}}{{.V}}{{end}}
|
||||
2
src/text/template/testdata/file2.tmpl
vendored
Normal file
2
src/text/template/testdata/file2.tmpl
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
{{define "dot"}}{{.}}{{end}}
|
||||
{{define "nested"}}{{template "dot" .}}{{end}}
|
||||
3
src/text/template/testdata/tmpl1.tmpl
vendored
Normal file
3
src/text/template/testdata/tmpl1.tmpl
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
template1
|
||||
{{define "x"}}x{{end}}
|
||||
{{template "y"}}
|
||||
3
src/text/template/testdata/tmpl2.tmpl
vendored
Normal file
3
src/text/template/testdata/tmpl2.tmpl
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
template2
|
||||
{{define "y"}}y{{end}}
|
||||
{{template "x"}}
|
||||
Reference in New Issue
Block a user