remap.go 2.2 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788
  1. // Copyright 2017 The Go Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. // Package remap handles tracking the locations of Go tokens in a source text
  5. // across a rewrite by the Go formatter.
  6. package remap
  7. import (
  8. "fmt"
  9. "go/scanner"
  10. "go/token"
  11. )
  12. // A Location represents a span of byte offsets in the source text.
  13. type Location struct {
  14. Pos, End int // End is exclusive
  15. }
  16. // A Map represents a mapping between token locations in an input source text
  17. // and locations in the correspnding output text.
  18. type Map map[Location]Location
  19. // Find reports whether the specified span is recorded by m, and if so returns
  20. // the new location it was mapped to. If the input span was not found, the
  21. // returned location is the same as the input.
  22. func (m Map) Find(pos, end int) (Location, bool) {
  23. key := Location{
  24. Pos: pos,
  25. End: end,
  26. }
  27. if loc, ok := m[key]; ok {
  28. return loc, true
  29. }
  30. return key, false
  31. }
  32. func (m Map) add(opos, oend, npos, nend int) {
  33. m[Location{Pos: opos, End: oend}] = Location{Pos: npos, End: nend}
  34. }
  35. // Compute constructs a location mapping from input to output. An error is
  36. // reported if any of the tokens of output cannot be mapped.
  37. func Compute(input, output []byte) (Map, error) {
  38. itok := tokenize(input)
  39. otok := tokenize(output)
  40. if len(itok) != len(otok) {
  41. return nil, fmt.Errorf("wrong number of tokens, %d ≠ %d", len(itok), len(otok))
  42. }
  43. m := make(Map)
  44. for i, ti := range itok {
  45. to := otok[i]
  46. if ti.Token != to.Token {
  47. return nil, fmt.Errorf("token %d type mismatch: %s ≠ %s", i+1, ti, to)
  48. }
  49. m.add(ti.pos, ti.end, to.pos, to.end)
  50. }
  51. return m, nil
  52. }
  53. // tokinfo records the span and type of a source token.
  54. type tokinfo struct {
  55. pos, end int
  56. token.Token
  57. }
  58. func tokenize(src []byte) []tokinfo {
  59. fs := token.NewFileSet()
  60. var s scanner.Scanner
  61. s.Init(fs.AddFile("src", fs.Base(), len(src)), src, nil, scanner.ScanComments)
  62. var info []tokinfo
  63. for {
  64. pos, next, lit := s.Scan()
  65. switch next {
  66. case token.SEMICOLON:
  67. continue
  68. }
  69. info = append(info, tokinfo{
  70. pos: int(pos - 1),
  71. end: int(pos + token.Pos(len(lit)) - 1),
  72. Token: next,
  73. })
  74. if next == token.EOF {
  75. break
  76. }
  77. }
  78. return info
  79. }